hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
46d581da8a6ddd9ea39ed380d9c10f363330f6a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mem_trs_test2(int * input, int size)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid < size)
printf("tid : %d , gid : %d, value : %d \n", threadIdx.x, gid, input[gid]);
} | 46d581da8a6ddd9ea39ed380d9c10f363330f6a7.cu | #include "includes.h"
__global__ void mem_trs_test2(int * input, int size)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid < size)
printf("tid : %d , gid : %d, value : %d \n", threadIdx.x, gid, input[gid]);
} |
09fe27499aaab66732277dc15bf7e965d00df398.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:REPAIR_ERROR
//--blockDim=2048 --gridDim=64
struct s {
float *p;
};
__global__ void foo(s q) {
__requires_fresh_array(q.p);
q.p[0] = threadIdx.x;
}
| 09fe27499aaab66732277dc15bf7e965d00df398.cu | //xfail:REPAIR_ERROR
//--blockDim=2048 --gridDim=64
struct s {
float *p;
};
__global__ void foo(s q) {
__requires_fresh_array(q.p);
q.p[0] = threadIdx.x;
}
|
132257cb5eab4d205f82c25628f64f851d6c510a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#endif
#include <hip/hip_vector_types.h>
#include <driver_functions.h>
#include <cuda_gl_interop.h>
#include "../common/cutil.h"
GLuint g_screenPixelBuffer;
struct cudaGraphicsResource* g_cudaScreenPixelBuffer = 0;
void (*g_renderCallback)(void);
unsigned int g_width;
unsigned int g_height;
void idleCallback();
void reshapeCallback(int width, int height);
void displayCallback();
void createBuffers();
void releaseBuffers();
void setupGlApp(unsigned int width, unsigned int height){
int argc = 0;
char** args = 0;
g_width = width;
g_height = height;
glutInit(&argc, args);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
glutInitWindowSize(g_width, g_height);
glutCreateWindow("GL Screen");
glutDisplayFunc(displayCallback);
glutReshapeFunc(reshapeCallback);
glutIdleFunc(idleCallback);
glewInit();
createBuffers();
atexit(releaseBuffers);
}
void runGlApp(void (*renderCallback)(void))
{
g_renderCallback = renderCallback;
glutMainLoop();
hipDeviceReset();
}
void lockTarget(unsigned int** pTarget){
CUDA_SAFE_CALL(hipGraphicsMapResources(1, &g_cudaScreenPixelBuffer, 0));
size_t num_bytes;
CUDA_SAFE_CALL(hipGraphicsResourceGetMappedPointer( (void**)pTarget, &num_bytes, g_cudaScreenPixelBuffer));
}
void unlockTarget(unsigned int* pTarget){
CUDA_SAFE_CALL(hipGraphicsUnmapResources(1, &g_cudaScreenPixelBuffer, 0));
}
void reshapeCallback(int width, int height){
glViewport(0,0,width,height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
}
void displayCallback()
{
g_renderCallback();
glClear(GL_COLOR_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
glRasterPos2i(0,0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, g_screenPixelBuffer);
glDrawPixels(g_width, g_height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glutSwapBuffers();
glutReportErrors();
}
void idleCallback(){
glutPostRedisplay();
}
void createBuffers(){
// All of this sets up the pixel buffer object
glGenBuffersARB(1, &g_screenPixelBuffer);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, g_screenPixelBuffer);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB,
g_width * g_height * sizeof(GLubyte) * 4,
0,
GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// And this then allows CUDA/GL interop on that object
CUDA_SAFE_CALL(hipGraphicsGLRegisterBuffer(&g_cudaScreenPixelBuffer, g_screenPixelBuffer, hipGraphicsMapFlagsWriteDiscard));
}
void releaseBuffers(){
hipGraphicsUnregisterResource(g_cudaScreenPixelBuffer);
glDeleteBuffersARB(1, &g_screenPixelBuffer);
}
| 132257cb5eab4d205f82c25628f64f851d6c510a.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#endif
#include <vector_types.h>
#include <driver_functions.h>
#include <cuda_gl_interop.h>
#include "../common/cutil.h"
GLuint g_screenPixelBuffer;
struct cudaGraphicsResource* g_cudaScreenPixelBuffer = 0;
void (*g_renderCallback)(void);
unsigned int g_width;
unsigned int g_height;
void idleCallback();
void reshapeCallback(int width, int height);
void displayCallback();
void createBuffers();
void releaseBuffers();
void setupGlApp(unsigned int width, unsigned int height){
int argc = 0;
char** args = 0;
g_width = width;
g_height = height;
glutInit(&argc, args);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE);
glutInitWindowSize(g_width, g_height);
glutCreateWindow("GL Screen");
glutDisplayFunc(displayCallback);
glutReshapeFunc(reshapeCallback);
glutIdleFunc(idleCallback);
glewInit();
createBuffers();
atexit(releaseBuffers);
}
void runGlApp(void (*renderCallback)(void))
{
g_renderCallback = renderCallback;
glutMainLoop();
cudaThreadExit();
}
void lockTarget(unsigned int** pTarget){
CUDA_SAFE_CALL(cudaGraphicsMapResources(1, &g_cudaScreenPixelBuffer, 0));
size_t num_bytes;
CUDA_SAFE_CALL(cudaGraphicsResourceGetMappedPointer( (void**)pTarget, &num_bytes, g_cudaScreenPixelBuffer));
}
void unlockTarget(unsigned int* pTarget){
CUDA_SAFE_CALL(cudaGraphicsUnmapResources(1, &g_cudaScreenPixelBuffer, 0));
}
void reshapeCallback(int width, int height){
glViewport(0,0,width,height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
}
void displayCallback()
{
g_renderCallback();
glClear(GL_COLOR_BUFFER_BIT);
glDisable(GL_DEPTH_TEST);
glRasterPos2i(0,0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, g_screenPixelBuffer);
glDrawPixels(g_width, g_height, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
glutSwapBuffers();
glutReportErrors();
}
void idleCallback(){
glutPostRedisplay();
}
void createBuffers(){
// All of this sets up the pixel buffer object
glGenBuffersARB(1, &g_screenPixelBuffer);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, g_screenPixelBuffer);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB,
g_width * g_height * sizeof(GLubyte) * 4,
0,
GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
// And this then allows CUDA/GL interop on that object
CUDA_SAFE_CALL(cudaGraphicsGLRegisterBuffer(&g_cudaScreenPixelBuffer, g_screenPixelBuffer, cudaGraphicsMapFlagsWriteDiscard));
}
void releaseBuffers(){
cudaGraphicsUnregisterResource(g_cudaScreenPixelBuffer);
glDeleteBuffersARB(1, &g_screenPixelBuffer);
}
|
cbe5412f8304d8279cfce68d895e80b5bbdb9c85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHAtomics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <thrust/device_vector.h>
namespace at {
namespace native {
namespace {
// The maximum block size in CUDA
constexpr int MAX_BLOCK_SIZE = 1024;
/* This code computes the sum of the weights in two-steps:
1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces`
2) Each partial-sum from 1) are summed and scatter into `grad_weight`
Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the
kernel execution. If it is high, the size of the thread blocks will be
too small to achieve good occupancy. Similarly, a very low value will
make the size of the thread blocks in the final sum in step 2) too small.
*/
constexpr int NROWS_PER_THREAD = 10;
#ifdef __HIP_PLATFORM_HCC__
constexpr int WARP_SIZE = 64;
#else
constexpr int WARP_SIZE = 32;
#endif
// Fast ceil division (no overflow checking)
__host__ __device__ __forceinline__
int64_t ceil_div(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
__global__
void krn_partials_per_segment(int64_t *ret, const int64_t *segment_offsets,
int64_t num_of_segments, int64_t numel) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
const int64_t idx_start = segment_offsets[id];
const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
const int64_t size = idx_end - idx_start;
ret[id] = ceil_div(size, NROWS_PER_THREAD);
}
}
__global__
void krn_partial_segment_offset(
int64_t *ret,
const int64_t *partials_per_segment,
const int64_t *partials_per_segment_offset,
const int64_t *segment_offsets,
int64_t num_of_segments) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
int64_t idx = partials_per_segment_offset[id];
const int64_t num_partials = partials_per_segment[id];
const int64_t segment_offset = segment_offsets[id];
for (int64_t i=0; i<num_partials; ++i) {
ret[idx++] = segment_offset + i * NROWS_PER_THREAD;
}
}
}
template <typename scalar_t>
__global__ void compute_grad_weight_bags(
int64_t *indices, scalar_t *gradOutput,
int64_t *offset2bag, int64_t *count, ptrdiff_t numel,
int64_t stride, int mode_mean, const int64_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
int64_t* segment_offsets, int64_t num_of_segments, scalar_t *grad_weight_per_segment,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int origRow = indices[idx];
const int seq_number = offset2bag[origRow];
const int gradOutputRow = seq_number * stride;
acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0;
if (per_sample_weights) {
scale *= per_sample_weights[origRow * per_sample_weights_stride];
}
acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature];
if (mode_mean) {
gradient /= bag_size[seq_number];
}
weight += gradient * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
template <typename scalar_t>
__global__ void compute_grad_weight(
int64_t *indices,
scalar_t *gradOutput,
int64_t *count,
ptrdiff_t numel,
int64_t stride,
int64_t* segment_offsets,
int64_t num_of_segments,
scalar_t *grad_weight_per_segment,
int padding_idx,
const int64_t stride_warped) {
using accscalar_t = acc_type<scalar_t, true>;
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
if (idx_begin == padding_idx) {
return;
}
accscalar_t weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int64_t target_row = indices[idx];
if (target_row != padding_idx) {
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
weight += gradOutput[target_row * stride + startFeature] * scale;
}
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
// This kernel assumes that all input tensors are contiguous.
template <typename scalar_t>
__global__ void sum_and_scatter(
int64_t *input, scalar_t *gradWeight, int64_t stride,
int64_t* segment_offsets, int64_t num_of_segments, const scalar_t *grad_weight_per_segment,
const int64_t *segment_sizes_offsets, int64_t num_of_partial_segments,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_sizes_offsets[id];
const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
weight += grad_weight_per_segment[idx*stride + startFeature];
}
const int weightRow = input[segment_offsets[id]] * stride;
gradWeight[weightRow + startFeature] = weight;
}
} // anon namespace
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx,
bool scale_grad_by_freq,
bool mode_mean,
const Tensor &offset2bag,
const Tensor &bag_size,
const Tensor &per_sample_weights) {
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
const ptrdiff_t numel = sorted_indices.numel();
auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options());
const int64_t stride = grad_weight.stride(0);
// Compute the number of segments and their start position so that we do not have to
// spawn a warp per index. In this context, a segment is a number of rows that should
// be summarized.
// Unit: index in `sorted_indices` and `orig_indices`
thrust::device_vector<int64_t> segment_offsets(numel);
int64_t num_of_segments;
{
auto sorted_indices_dev = thrust::device_ptr<int64_t>(sorted_indices.data<int64_t>());
auto dummy = at::empty_like(sorted_indices);
auto dummy_dev = thrust::device_ptr<int64_t>(dummy.data<int64_t>());
auto ends = thrust::unique_by_key_copy(
policy,
sorted_indices_dev,
sorted_indices_dev + numel,
thrust::make_counting_iterator(0),
dummy_dev,
thrust::raw_pointer_cast(segment_offsets.data()));
num_of_segments = thrust::get<0>(ends) - dummy_dev;
}
// We split the segments up into sizes of `NROWS_PER_THREAD`
// Compute the number partial-segments per segment (some partial-segments
// may not be the full `NROWS_PER_THREAD` number of rows)
thrust::device_vector<int64_t> partials_per_segment(num_of_segments);
{
hipLaunchKernelGGL(( krn_partials_per_segment), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream,
thrust::raw_pointer_cast(partials_per_segment.data()),
thrust::raw_pointer_cast(segment_offsets.data()),
num_of_segments,
numel);
}
// In order to compute `partial_segment_offset`, which is the start index
// of each partial-segment in `sorted_indices`, we need to compute the
// start position of each _segment_ in `partial_segment_offset`.
// Unit: index in `partial_segment_offset`
thrust::device_vector<int64_t> partials_per_segment_offset(num_of_segments);
thrust::exclusive_scan(
policy,
partials_per_segment.begin(),
partials_per_segment.end(),
partials_per_segment_offset.begin());
// The total number of partial-segments is the sum of `partials_per_segment_offset`
const int num_of_partial_segments = partials_per_segment[num_of_segments-1] +
partials_per_segment_offset[num_of_segments-1];
// Now we can compute the start position of each partial-segment
// Unit: index in `sorted_indices` and `orig_indices`
thrust::device_vector<int64_t> partial_segment_offset(num_of_partial_segments);
{
hipLaunchKernelGGL(( krn_partial_segment_offset), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream,
thrust::raw_pointer_cast(partial_segment_offset.data()),
thrust::raw_pointer_cast(partials_per_segment.data()),
thrust::raw_pointer_cast(partials_per_segment_offset.data()),
thrust::raw_pointer_cast(segment_offsets.data()),
num_of_segments);
}
auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, grad.options());
const int stride_warped = ceil_div(stride, WARP_SIZE)*WARP_SIZE;
const int block = ::min(stride_warped, MAX_BLOCK_SIZE);
const int grid = ceil_div(num_of_partial_segments*stride_warped, block);
// Compute the sum of each partial-segment and handle bags
if (offset2bag.defined()) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
hipLaunchKernelGGL(( compute_grad_weight_bags<scalar_t>), dim3(grid), dim3(block), 0, stream,
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
offset2bag.data<int64_t>(),
count.defined() ? count.data<int64_t>() : nullptr, numel, stride,
mode_mean, bag_size.data<int64_t>(),
per_sample_weights.defined() ? per_sample_weights.data<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
thrust::raw_pointer_cast(partial_segment_offset.data()),
num_of_partial_segments, grad_weight_per_segment.data<scalar_t>(),
stride_warped);
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
hipLaunchKernelGGL(( compute_grad_weight<scalar_t>), dim3(grid), dim3(block), 0, stream,
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
numel, stride,
thrust::raw_pointer_cast(partial_segment_offset.data()),
num_of_partial_segments,
grad_weight_per_segment.data<scalar_t>(),
padding_idx,
stride_warped);
});
}
THCudaCheck(hipGetLastError());
// Finally, we sum all the partial-sums and scatter them
// into `grad_weight`.
const int grid2 = ceil_div(num_of_segments*stride_warped, block);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_sum_and_scatter", [&] {
hipLaunchKernelGGL(( sum_and_scatter<scalar_t>), dim3(grid2), dim3(block), 0, stream,
sorted_indices.data<int64_t>(),
grad_weight.data<scalar_t>(),
stride,
thrust::raw_pointer_cast(segment_offsets.data()),
num_of_segments, grad_weight_per_segment.data<scalar_t>(),
thrust::raw_pointer_cast(partials_per_segment_offset.data()),
num_of_partial_segments, stride_warped);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
}}
| cbe5412f8304d8279cfce68d895e80b5bbdb9c85.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCAtomics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <thrust/device_vector.h>
namespace at {
namespace native {
namespace {
// The maximum block size in CUDA
constexpr int MAX_BLOCK_SIZE = 1024;
/* This code computes the sum of the weights in two-steps:
1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces`
2) Each partial-sum from 1) are summed and scatter into `grad_weight`
Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the
kernel execution. If it is high, the size of the thread blocks will be
too small to achieve good occupancy. Similarly, a very low value will
make the size of the thread blocks in the final sum in step 2) too small.
*/
constexpr int NROWS_PER_THREAD = 10;
#ifdef __HIP_PLATFORM_HCC__
constexpr int WARP_SIZE = 64;
#else
constexpr int WARP_SIZE = 32;
#endif
// Fast ceil division (no overflow checking)
__host__ __device__ __forceinline__
int64_t ceil_div(int64_t x, int64_t y) {
return (x + y - 1) / y;
}
__global__
void krn_partials_per_segment(int64_t *ret, const int64_t *segment_offsets,
int64_t num_of_segments, int64_t numel) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
const int64_t idx_start = segment_offsets[id];
const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
const int64_t size = idx_end - idx_start;
ret[id] = ceil_div(size, NROWS_PER_THREAD);
}
}
__global__
void krn_partial_segment_offset(
int64_t *ret,
const int64_t *partials_per_segment,
const int64_t *partials_per_segment_offset,
const int64_t *segment_offsets,
int64_t num_of_segments) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < num_of_segments) {
int64_t idx = partials_per_segment_offset[id];
const int64_t num_partials = partials_per_segment[id];
const int64_t segment_offset = segment_offsets[id];
for (int64_t i=0; i<num_partials; ++i) {
ret[idx++] = segment_offset + i * NROWS_PER_THREAD;
}
}
}
template <typename scalar_t>
__global__ void compute_grad_weight_bags(
int64_t *indices, scalar_t *gradOutput,
int64_t *offset2bag, int64_t *count, ptrdiff_t numel,
int64_t stride, int mode_mean, const int64_t *bag_size,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride,
int64_t* segment_offsets, int64_t num_of_segments, scalar_t *grad_weight_per_segment,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int origRow = indices[idx];
const int seq_number = offset2bag[origRow];
const int gradOutputRow = seq_number * stride;
acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0;
if (per_sample_weights) {
scale *= per_sample_weights[origRow * per_sample_weights_stride];
}
acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature];
if (mode_mean) {
gradient /= bag_size[seq_number];
}
weight += gradient * scale;
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
template <typename scalar_t>
__global__ void compute_grad_weight(
int64_t *indices,
scalar_t *gradOutput,
int64_t *count,
ptrdiff_t numel,
int64_t stride,
int64_t* segment_offsets,
int64_t num_of_segments,
scalar_t *grad_weight_per_segment,
int padding_idx,
const int64_t stride_warped) {
using accscalar_t = acc_type<scalar_t, true>;
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_offsets[id];
const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1];
if (idx_begin == padding_idx) {
return;
}
accscalar_t weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
const int64_t target_row = indices[idx];
if (target_row != padding_idx) {
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
weight += gradOutput[target_row * stride + startFeature] * scale;
}
}
grad_weight_per_segment[id * stride + startFeature] = weight;
}
// This kernel assumes that all input tensors are contiguous.
template <typename scalar_t>
__global__ void sum_and_scatter(
int64_t *input, scalar_t *gradWeight, int64_t stride,
int64_t* segment_offsets, int64_t num_of_segments, const scalar_t *grad_weight_per_segment,
const int64_t *segment_sizes_offsets, int64_t num_of_partial_segments,
const int64_t stride_warped) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int id = gid / stride_warped;
const int startFeature = gid % stride_warped;
if (startFeature >= stride) {
return;
}
if (id >= num_of_segments) {
return;
}
const int idx_begin = segment_sizes_offsets[id];
const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1];
acc_type<scalar_t, true> weight = 0;
for (int idx=idx_begin; idx < idx_end; ++idx) {
weight += grad_weight_per_segment[idx*stride + startFeature];
}
const int weightRow = input[segment_offsets[id]] * stride;
gradWeight[weightRow + startFeature] = weight;
}
} // anon namespace
Tensor embedding_backward_cuda_kernel(
const Tensor &grad,
const Tensor &orig_indices,
const Tensor &sorted_indices,
const Tensor &count,
int64_t num_weights,
int padding_idx,
bool scale_grad_by_freq,
bool mode_mean,
const Tensor &offset2bag,
const Tensor &bag_size,
const Tensor &per_sample_weights) {
auto stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
const ptrdiff_t numel = sorted_indices.numel();
auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options());
const int64_t stride = grad_weight.stride(0);
// Compute the number of segments and their start position so that we do not have to
// spawn a warp per index. In this context, a segment is a number of rows that should
// be summarized.
// Unit: index in `sorted_indices` and `orig_indices`
thrust::device_vector<int64_t> segment_offsets(numel);
int64_t num_of_segments;
{
auto sorted_indices_dev = thrust::device_ptr<int64_t>(sorted_indices.data<int64_t>());
auto dummy = at::empty_like(sorted_indices);
auto dummy_dev = thrust::device_ptr<int64_t>(dummy.data<int64_t>());
auto ends = thrust::unique_by_key_copy(
policy,
sorted_indices_dev,
sorted_indices_dev + numel,
thrust::make_counting_iterator(0),
dummy_dev,
thrust::raw_pointer_cast(segment_offsets.data()));
num_of_segments = thrust::get<0>(ends) - dummy_dev;
}
// We split the segments up into sizes of `NROWS_PER_THREAD`
// Compute the number partial-segments per segment (some partial-segments
// may not be the full `NROWS_PER_THREAD` number of rows)
thrust::device_vector<int64_t> partials_per_segment(num_of_segments);
{
krn_partials_per_segment<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> (
thrust::raw_pointer_cast(partials_per_segment.data()),
thrust::raw_pointer_cast(segment_offsets.data()),
num_of_segments,
numel);
}
// In order to compute `partial_segment_offset`, which is the start index
// of each partial-segment in `sorted_indices`, we need to compute the
// start position of each _segment_ in `partial_segment_offset`.
// Unit: index in `partial_segment_offset`
thrust::device_vector<int64_t> partials_per_segment_offset(num_of_segments);
thrust::exclusive_scan(
policy,
partials_per_segment.begin(),
partials_per_segment.end(),
partials_per_segment_offset.begin());
// The total number of partial-segments is the sum of `partials_per_segment_offset`
const int num_of_partial_segments = partials_per_segment[num_of_segments-1] +
partials_per_segment_offset[num_of_segments-1];
// Now we can compute the start position of each partial-segment
// Unit: index in `sorted_indices` and `orig_indices`
thrust::device_vector<int64_t> partial_segment_offset(num_of_partial_segments);
{
krn_partial_segment_offset<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> (
thrust::raw_pointer_cast(partial_segment_offset.data()),
thrust::raw_pointer_cast(partials_per_segment.data()),
thrust::raw_pointer_cast(partials_per_segment_offset.data()),
thrust::raw_pointer_cast(segment_offsets.data()),
num_of_segments);
}
auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, grad.options());
const int stride_warped = ceil_div(stride, WARP_SIZE)*WARP_SIZE;
const int block = std::min(stride_warped, MAX_BLOCK_SIZE);
const int grid = ceil_div(num_of_partial_segments*stride_warped, block);
// Compute the sum of each partial-segment and handle bags
if (offset2bag.defined()) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
compute_grad_weight_bags<scalar_t><<<grid, block, 0, stream>>>(
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
offset2bag.data<int64_t>(),
count.defined() ? count.data<int64_t>() : nullptr, numel, stride,
mode_mean, bag_size.data<int64_t>(),
per_sample_weights.defined() ? per_sample_weights.data<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0,
thrust::raw_pointer_cast(partial_segment_offset.data()),
num_of_partial_segments, grad_weight_per_segment.data<scalar_t>(),
stride_warped);
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] {
compute_grad_weight<scalar_t><<<grid, block, 0, stream>>>(
orig_indices.data<int64_t>(),
grad.data<scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
numel, stride,
thrust::raw_pointer_cast(partial_segment_offset.data()),
num_of_partial_segments,
grad_weight_per_segment.data<scalar_t>(),
padding_idx,
stride_warped);
});
}
THCudaCheck(cudaGetLastError());
// Finally, we sum all the partial-sums and scatter them
// into `grad_weight`.
const int grid2 = ceil_div(num_of_segments*stride_warped, block);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_sum_and_scatter", [&] {
sum_and_scatter<scalar_t><<<grid2, block, 0, stream>>>(
sorted_indices.data<int64_t>(),
grad_weight.data<scalar_t>(),
stride,
thrust::raw_pointer_cast(segment_offsets.data()),
num_of_segments, grad_weight_per_segment.data<scalar_t>(),
thrust::raw_pointer_cast(partials_per_segment_offset.data()),
num_of_partial_segments, stride_warped);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
}}
|
dd12b6c8e887b6b98b9abbefe43944d11efd2f52.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <time.h>
#include <sys/time.h>
#include <vector>
#include <string>
#include <iostream>
#include <gtest/gtest.h>
#include <utilities/legacy/error_utils.hpp>
#include <cuspatial/hausdorff.hpp>
#include <tests/utilities/legacy/column_wrapper.cuh>
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
struct HausdorffToy : public GdfTest
{
};
TEST_F(HausdorffToy, hausdorfftest)
{
std::cout<<"in HausdorffToy"<<std::endl;
cudf::test::column_wrapper<double> point_x_wrapp{0,-8,6};
cudf::test::column_wrapper<double> point_y_wrapp{0,-8,6};
cudf::test::column_wrapper<uint32_t> cnt_wrapp{1,2};
gdf_column dist=cuspatial::directed_hausdorff_distance(
*(point_x_wrapp.get()), *(point_y_wrapp.get()),*(cnt_wrapp.get()));
double *h_dist=new double[dist.size];
CUDA_TRY(hipMemcpy(h_dist, dist.data, dist.size*sizeof(double), hipMemcpyDeviceToHost));
CUDF_EXPECTS(h_dist[0]==0&&h_dist[3]==0,"distance between the same trajectoriy pair should be 0");
std::cout<<"dist(0,1)="<<h_dist[1]<<std::endl;
std::cout<<"dist(1,0)="<<h_dist[2]<<std::endl;
delete[] h_dist;
}
| dd12b6c8e887b6b98b9abbefe43944d11efd2f52.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <time.h>
#include <sys/time.h>
#include <vector>
#include <string>
#include <iostream>
#include <gtest/gtest.h>
#include <utilities/legacy/error_utils.hpp>
#include <cuspatial/hausdorff.hpp>
#include <tests/utilities/legacy/column_wrapper.cuh>
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
struct HausdorffToy : public GdfTest
{
};
TEST_F(HausdorffToy, hausdorfftest)
{
std::cout<<"in HausdorffToy"<<std::endl;
cudf::test::column_wrapper<double> point_x_wrapp{0,-8,6};
cudf::test::column_wrapper<double> point_y_wrapp{0,-8,6};
cudf::test::column_wrapper<uint32_t> cnt_wrapp{1,2};
gdf_column dist=cuspatial::directed_hausdorff_distance(
*(point_x_wrapp.get()), *(point_y_wrapp.get()),*(cnt_wrapp.get()));
double *h_dist=new double[dist.size];
CUDA_TRY(cudaMemcpy(h_dist, dist.data, dist.size*sizeof(double), cudaMemcpyDeviceToHost));
CUDF_EXPECTS(h_dist[0]==0&&h_dist[3]==0,"distance between the same trajectoriy pair should be 0");
std::cout<<"dist(0,1)="<<h_dist[1]<<std::endl;
std::cout<<"dist(1,0)="<<h_dist[2]<<std::endl;
delete[] h_dist;
}
|
e3689697c5cd319f58530af3774c2c258fbfb45f.hip | // !!! This is a file automatically generated by hipify!!!
#include "SimpleEigentrustGPU.h"
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <rocblas.h>
bool SimpleEigentrustGPU::hasConverged(double * trust_vec_next, double * trust_vec_orig)
{
std::vector<Peer>::size_type m = getPeers().size();
thrust::device_vector<double> d_v(m);
square<double> unary_op;
thrust::plus<double> binary_op;
double init = 0;
thrust::device_ptr<double> trust_vec_next_ptr = thrust::device_pointer_cast(trust_vec_next);
thrust::device_ptr<double> trust_vec_orig_ptr = thrust::device_pointer_cast(trust_vec_orig);
thrust::transform(trust_vec_next_ptr, trust_vec_next_ptr + m, trust_vec_orig_ptr, d_v.begin(), thrust::minus<double>());
double norm = std::sqrt(thrust::transform_reduce(d_v.begin(), d_v.end(), unary_op, init, binary_op));
return norm < getError() ? true : false;
}
void SimpleEigentrustGPU::computeEigentrust(double * C, double * e, double * y)
{
std::vector<Peer>::size_type m = getPeers().size();
double alpha = 1;
double beta = 0;
hipblasHandle_t handle;
hipblasCreate(&handle);
do{
hipblasDgemv(handle, HIPBLAS_OP_T, m, m, &alpha, C, m, e, 1, &beta, y, 1);
double * tmp = e;
e = y;
y = tmp;
} while (!hasConverged(e, y));
hipblasDestroy(handle);
setTrustValues(e);
}
void SimpleEigentrustGPU::setTrustValues(double * dev_trust_vector)
{
thrust::device_ptr<double> dev_trust_vec_ptr = thrust::device_pointer_cast(dev_trust_vector);
thrust::device_vector<double> d_trust_vector(dev_trust_vec_ptr, dev_trust_vec_ptr + getPeers().size());
thrust::host_vector<double> host_trust_vector = d_trust_vector;
for (auto i = getPeers().begin(); i != getPeers().end(); i++)
{
i->setTrustValue(*(host_trust_vector.begin() + i->getId()));
}
} | e3689697c5cd319f58530af3774c2c258fbfb45f.cu | #include "SimpleEigentrustGPU.h"
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <cublas_v2.h>
bool SimpleEigentrustGPU::hasConverged(double * trust_vec_next, double * trust_vec_orig)
{
std::vector<Peer>::size_type m = getPeers().size();
thrust::device_vector<double> d_v(m);
square<double> unary_op;
thrust::plus<double> binary_op;
double init = 0;
thrust::device_ptr<double> trust_vec_next_ptr = thrust::device_pointer_cast(trust_vec_next);
thrust::device_ptr<double> trust_vec_orig_ptr = thrust::device_pointer_cast(trust_vec_orig);
thrust::transform(trust_vec_next_ptr, trust_vec_next_ptr + m, trust_vec_orig_ptr, d_v.begin(), thrust::minus<double>());
double norm = std::sqrt(thrust::transform_reduce(d_v.begin(), d_v.end(), unary_op, init, binary_op));
return norm < getError() ? true : false;
}
void SimpleEigentrustGPU::computeEigentrust(double * C, double * e, double * y)
{
std::vector<Peer>::size_type m = getPeers().size();
double alpha = 1;
double beta = 0;
cublasHandle_t handle;
cublasCreate(&handle);
do{
cublasDgemv(handle, CUBLAS_OP_T, m, m, &alpha, C, m, e, 1, &beta, y, 1);
double * tmp = e;
e = y;
y = tmp;
} while (!hasConverged(e, y));
cublasDestroy(handle);
setTrustValues(e);
}
void SimpleEigentrustGPU::setTrustValues(double * dev_trust_vector)
{
thrust::device_ptr<double> dev_trust_vec_ptr = thrust::device_pointer_cast(dev_trust_vector);
thrust::device_vector<double> d_trust_vector(dev_trust_vec_ptr, dev_trust_vec_ptr + getPeers().size());
thrust::host_vector<double> host_trust_vector = d_trust_vector;
for (auto i = getPeers().begin(); i != getPeers().end(); i++)
{
i->setTrustValue(*(host_trust_vector.begin() + i->getId()));
}
} |
8c4465d05fad14731231081b358d60f97a2bbd66.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
int main( void ) {
hipDeviceProp_t prop;
int dev;
HANDLE_ERROR( hipGetDevice( &dev ) );
printf( "ID of current CUDA device: %d\n", dev );
memset( &prop, 0, sizeof( hipDeviceProp_t ) );
prop.major = 1;
prop.minor = 3;
HANDLE_ERROR( hipChooseDevice( &dev, &prop ) );
printf( "ID of CUDA device closest to revision 1.3: %d\n", dev );
HANDLE_ERROR( hipSetDevice( dev ) );
}
| 8c4465d05fad14731231081b358d60f97a2bbd66.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
int main( void ) {
cudaDeviceProp prop;
int dev;
HANDLE_ERROR( cudaGetDevice( &dev ) );
printf( "ID of current CUDA device: %d\n", dev );
memset( &prop, 0, sizeof( cudaDeviceProp ) );
prop.major = 1;
prop.minor = 3;
HANDLE_ERROR( cudaChooseDevice( &dev, &prop ) );
printf( "ID of CUDA device closest to revision 1.3: %d\n", dev );
HANDLE_ERROR( cudaSetDevice( dev ) );
}
|
17241ece037dce70f35bfafad32699be3b3db84a.hip | // !!! This is a file automatically generated by hipify!!!
#include "gather_vector_kernel.hpp"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "yuzu/domain/analyses/ReducedNumericalModel.hpp"
#include "yuzu/domain/elements/ElementGeometry.hpp"
#include "yuzu/foundation/blas/ColumnVector.hpp"
#include "yuzu/foundation/memory/RelativePointer.hpp"
#include "yuzu/foundation/memory/pointer.hpp"
#include "yuzu/utils/kernel_utils.hpp"
#include "yuzu/common/gpu.hpp"
namespace afm = axis::foundation::memory;
namespace ay = axis::yuzu;
namespace ayda = axis::yuzu::domain::analyses;
namespace ayde = axis::yuzu::domain::elements;
namespace ayfb = axis::yuzu::foundation::blas;
namespace ayfm = axis::yuzu::foundation::memory;
__global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK)
RunGatherVectorKernel(uint64 numThreadsToUse,
uint64 startIndex, ayfm::RelativePointer vectorPtr,
ayfm::RelativePointer modelPtr)
{
using axis::yabsref;
using axis::yabsptr;
uint64 threadIndex =
ay::GetThreadIndex(gridDim, blockIdx, blockDim, threadIdx, startIndex);
if (!ay::IsActiveThread(threadIndex, numThreadsToUse)) return;
ayfb::ColumnVector& vector = yabsref<ayfb::ColumnVector>(vectorPtr);
const ayda::ReducedNumericalModel& model =
yabsref<ayda::ReducedNumericalModel>(modelPtr);
size_type nodeCount = model.GetNodeCount();
if (threadIndex >= nodeCount) return;
const ayde::Node& node = model.GetNode(threadIndex);
int elementCount = node.GetConnectedElementCount();
int dofCount = node.GetDofCount();
// clear entire vector
for (int dofIdx = 0; dofIdx < dofCount; ++dofIdx)
{
const ayde::DoF& dof = node.GetDoF(dofIdx);
vector(dof.GetId()) = 0;
}
for (int eIdx = 0; eIdx < elementCount; ++eIdx)
{
ayde::FiniteElement& e = node.GetConnectedElement(eIdx);
ayde::ElementGeometry& g = e.Geometry();
const real *elementBucket = model.GetElementOutputBucket(e.GetInternalId());
int localNodeIdx = g.GetNodeIndex(node);
for (int dofIdx = 0; dofIdx < dofCount; ++dofIdx)
{
const ayde::DoF& dof = node.GetDoF(dofIdx);
vector(dof.GetId()) += elementBucket[localNodeIdx*dofCount + dofIdx];
}
}
}
void axis::application::executors::gpu::commands::kernels::RunGatherVectorOnGPU(
uint64 numThreadsToUse, uint64 startIndex, const axis::Dimension3D& gridDim,
const axis::Dimension3D& blockDim, void * streamPtr,
afm::RelativePointer& vectorPtr, afm::RelativePointer& modelPtr )
{
dim3 grid, block;
grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z;
block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z;
hipLaunchKernelGGL(( RunGatherVectorKernel), dim3(grid), dim3(block), 0, (hipStream_t)streamPtr,
numThreadsToUse, startIndex,
reinterpret_cast<ayfm::RelativePointer&>(vectorPtr),
reinterpret_cast<ayfm::RelativePointer&>(modelPtr));
}
| 17241ece037dce70f35bfafad32699be3b3db84a.cu | #include "gather_vector_kernel.hpp"
#include <cuda.h>
#include <cuda_runtime.h>
#include "yuzu/domain/analyses/ReducedNumericalModel.hpp"
#include "yuzu/domain/elements/ElementGeometry.hpp"
#include "yuzu/foundation/blas/ColumnVector.hpp"
#include "yuzu/foundation/memory/RelativePointer.hpp"
#include "yuzu/foundation/memory/pointer.hpp"
#include "yuzu/utils/kernel_utils.hpp"
#include "yuzu/common/gpu.hpp"
namespace afm = axis::foundation::memory;
namespace ay = axis::yuzu;
namespace ayda = axis::yuzu::domain::analyses;
namespace ayde = axis::yuzu::domain::elements;
namespace ayfb = axis::yuzu::foundation::blas;
namespace ayfm = axis::yuzu::foundation::memory;
__global__ void __launch_bounds__(AXIS_YUZU_MAX_THREADS_PER_BLOCK)
RunGatherVectorKernel(uint64 numThreadsToUse,
uint64 startIndex, ayfm::RelativePointer vectorPtr,
ayfm::RelativePointer modelPtr)
{
using axis::yabsref;
using axis::yabsptr;
uint64 threadIndex =
ay::GetThreadIndex(gridDim, blockIdx, blockDim, threadIdx, startIndex);
if (!ay::IsActiveThread(threadIndex, numThreadsToUse)) return;
ayfb::ColumnVector& vector = yabsref<ayfb::ColumnVector>(vectorPtr);
const ayda::ReducedNumericalModel& model =
yabsref<ayda::ReducedNumericalModel>(modelPtr);
size_type nodeCount = model.GetNodeCount();
if (threadIndex >= nodeCount) return;
const ayde::Node& node = model.GetNode(threadIndex);
int elementCount = node.GetConnectedElementCount();
int dofCount = node.GetDofCount();
// clear entire vector
for (int dofIdx = 0; dofIdx < dofCount; ++dofIdx)
{
const ayde::DoF& dof = node.GetDoF(dofIdx);
vector(dof.GetId()) = 0;
}
for (int eIdx = 0; eIdx < elementCount; ++eIdx)
{
ayde::FiniteElement& e = node.GetConnectedElement(eIdx);
ayde::ElementGeometry& g = e.Geometry();
const real *elementBucket = model.GetElementOutputBucket(e.GetInternalId());
int localNodeIdx = g.GetNodeIndex(node);
for (int dofIdx = 0; dofIdx < dofCount; ++dofIdx)
{
const ayde::DoF& dof = node.GetDoF(dofIdx);
vector(dof.GetId()) += elementBucket[localNodeIdx*dofCount + dofIdx];
}
}
}
void axis::application::executors::gpu::commands::kernels::RunGatherVectorOnGPU(
uint64 numThreadsToUse, uint64 startIndex, const axis::Dimension3D& gridDim,
const axis::Dimension3D& blockDim, void * streamPtr,
afm::RelativePointer& vectorPtr, afm::RelativePointer& modelPtr )
{
dim3 grid, block;
grid.x = gridDim.X; grid.y = gridDim.Y; grid.z = gridDim.Z;
block.x = blockDim.X; block.y = blockDim.Y; block.z = blockDim.Z;
RunGatherVectorKernel<<<grid, block, 0, (cudaStream_t)streamPtr>>>(
numThreadsToUse, startIndex,
reinterpret_cast<ayfm::RelativePointer&>(vectorPtr),
reinterpret_cast<ayfm::RelativePointer&>(modelPtr));
}
|
9002e9be2ad061c02792e2929f8f475564cc5e1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
__global__ void kernel(char *dbText, char *pattern, char *result, int patternLength)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
bool flag = false;
for (int j = 0; j < patternLength; j++){
if (dbText[i + j] != pattern[j]){
flag = true;
break;
}
}
if (flag){
result[i] = 0;
}
else{
result[i] = 1;
}
}
int main(){
return 0;
}
| 9002e9be2ad061c02792e2929f8f475564cc5e1b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
__global__ void kernel(char *dbText, char *pattern, char *result, int patternLength)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
bool flag = false;
for (int j = 0; j < patternLength; j++){
if (dbText[i + j] != pattern[j]){
flag = true;
break;
}
}
if (flag){
result[i] = 0;
}
else{
result[i] = 1;
}
}
int main(){
return 0;
}
|
d5a5b1624496c644402c4cd78ec0c63c38a6594b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
im2col_gpu(data_im, channels, height, width, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, 1, 1, data_col);
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, Dtype* data_im) {
col2im_gpu(data_col, channels, height, width, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, 1, 1, data_im);
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
} // namespace caffe
| d5a5b1624496c644402c4cd78ec0c63c38a6594b.cu | #include <algorithm>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
Dtype* data_col) {
im2col_gpu(data_im, channels, height, width, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, 1, 1, data_col);
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, Dtype* data_im) {
col2im_gpu(data_col, channels, height, width, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, 1, 1, data_im);
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, double* data_im);
} // namespace caffe
|
e73ec5d8ebf9d4f9ee50b6d6ac234b540db0db17.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles(int index, int imageWidth, int imageHeight, int screenMinX, int screenMinY, int screenMaxX, int screenMaxY) {
//TODO: convert short to int
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
// const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
float rad = cuConstRendererParams.radius[index];
//TODO: do we need -1
// BlockDim = 256 x1, gridDim = 4x4
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
/*
if((threadIdx.x + threadIdx.y)== 0) {
printf("Blk : %d, grid: %d %d\n", blockIdx.x, gridDim.x, circleInBox);
printf("circleInBoxConservative p.x : %f, p.y : %f , rad : %f, %f, %f, %f, %f\n",
p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
}*/
if(circleInBox == 0) { return; }
/*
if((threadIdx.x + threadIdx.y)== 0) {
printf("Blk : %d, grid: %d\n", blockIdx.x, gridDim.x);
}*/
for(int tid_x = threadIdx.x ; tid_x < (imageWidth/gridDim.x); tid_x +=blockDim.x) {
for(int tid_y = threadIdx.y ; tid_y < (imageHeight/gridDim.y); tid_y +=blockDim.y) {
int x = blockIdx.x*(imageWidth/gridDim.x) + tid_x;
int y = blockIdx.y*(imageHeight/gridDim.y) + tid_y;
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
hipFree(cudaDevicePosition);
hipFree(cudaDeviceVelocity);
hipFree(cudaDeviceColor);
hipFree(cudaDeviceRadius);
hipFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
hipMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
hipMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// hipMalloc and hipMemcpy
hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
hipMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else {
hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f);
}
hipDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == BOUNCING_BALLS) {
hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == HYPNOSIS) {
hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, );
} else if (sceneName == FIREWORKS) {
hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, );
}
hipDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
for (int i = 0; i < numCircles; i++) {
// read position and radius
int index3 = 3 * i;
float3 p = *(float3*)(&position[index3]);
float rad = radius[i];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
/*
float px = position[index3];
float py = position[index3+1];
float rad = radius[i];
// compute the bounding box of the circle. This bounding box
// is in normalized coordinates
float minX = px - rad;
float maxX = px + rad;
float minY = py - rad;
float maxY = py + rad;
// convert normalized coordinate bounds to integer screen
// pixel bounds. Clamp to the edges of the screen.
int screenMinX = CLAMP(static_cast<int>(minX * image->width), 0, image->width);
int screenMaxX = CLAMP(static_cast<int>(maxX * image->width)+1, 0, image->width);
int screenMinY = CLAMP(static_cast<int>(minY * image->height), 0, image->height);
int screenMaxY = CLAMP(static_cast<int>(maxY * image->height)+1, 0, image->height);
*/
//int numPixels = (screenMaxY - screenMinY) * (screenMaxX - screenMinX);
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
dim3 gridDim(8,8); //dividing it into block -- each block working on a portion of image
printf("APARNA\n");
hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDim), dim3(blockDim), 0, 0, i, imageWidth, imageHeight, screenMinX, screenMinY, screenMaxX, screenMaxY);
gpuErrchk(hipDeviceSynchronize());
}
}
| e73ec5d8ebf9d4f9ee50b6d6ac234b540db0db17.cu | #include <string>
#include <algorithm>
#include <math.h>
#include <stdio.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "circleBoxTest.cu_inl"
#include "cudaRenderer.h"
#include "image.h"
#include "noise.h"
#include "sceneLoader.h"
#include "util.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
struct GlobalConstants {
SceneName sceneName;
int numCircles;
float* position;
float* velocity;
float* color;
float* radius;
int imageWidth;
int imageHeight;
float* imageData;
};
// Global variable that is in scope, but read-only, for all cuda
// kernels. The __constant__ modifier designates this variable will
// be stored in special "constant" memory on the GPU. (we didn't talk
// about this type of memory in class, but constant memory is a fast
// place to put read-only variables).
__constant__ GlobalConstants cuConstRendererParams;
// read-only lookup tables used to quickly compute noise (needed by
// advanceAnimation for the snowflake scene)
__constant__ int cuConstNoiseYPermutationTable[256];
__constant__ int cuConstNoiseXPermutationTable[256];
__constant__ float cuConstNoise1DValueTable[256];
// color ramp table needed for the color ramp lookup shader
#define COLOR_MAP_SIZE 5
__constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3];
// including parts of the CUDA code from external files to keep this
// file simpler and to seperate code that should not be modified
#include "noiseCuda.cu_inl"
#include "lookupColor.cu_inl"
// kernelClearImageSnowflake -- (CUDA device code)
//
// Clear the image, setting the image to the white-gray gradation that
// is used in the snowflake image
__global__ void kernelClearImageSnowflake() {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float shade = .4f + .45f * static_cast<float>(height-imageY) / height;
float4 value = make_float4(shade, shade, shade, 1.f);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelClearImage -- (CUDA device code)
//
// Clear the image, setting all pixels to the specified color rgba
__global__ void kernelClearImage(float r, float g, float b, float a) {
int imageX = blockIdx.x * blockDim.x + threadIdx.x;
int imageY = blockIdx.y * blockDim.y + threadIdx.y;
int width = cuConstRendererParams.imageWidth;
int height = cuConstRendererParams.imageHeight;
if (imageX >= width || imageY >= height)
return;
int offset = 4 * (imageY * width + imageX);
float4 value = make_float4(r, g, b, a);
// write to global memory: As an optimization, I use a float4
// store, that results in more efficient code than if I coded this
// up as four seperate fp32 stores.
*(float4*)(&cuConstRendererParams.imageData[offset]) = value;
}
// kernelAdvanceFireWorks
//
// Update the position of the fireworks (if circle is firework)
__global__ void kernelAdvanceFireWorks() {
const float dt = 1.f / 60.f;
const float pi = 3.14159;
const float maxDist = 0.25f;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
float* radius = cuConstRendererParams.radius;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update
return;
}
// determine the fire-work center/spark indices
int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS;
int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS;
int index3i = 3 * fIdx;
int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx;
int index3j = 3 * sIdx;
float cx = position[index3i];
float cy = position[index3i+1];
// update position
position[index3j] += velocity[index3j] * dt;
position[index3j+1] += velocity[index3j+1] * dt;
// fire-work sparks
float sx = position[index3j];
float sy = position[index3j+1];
// compute vector from firework-spark
float cxsx = sx - cx;
float cysy = sy - cy;
// compute distance from fire-work
float dist = sqrt(cxsx * cxsx + cysy * cysy);
if (dist > maxDist) { // restore to starting position
// random starting position on fire-work's rim
float angle = (sfIdx * 2 * pi)/NUM_SPARKS;
float sinA = sin(angle);
float cosA = cos(angle);
float x = cosA * radius[fIdx];
float y = sinA * radius[fIdx];
position[index3j] = position[index3i] + x;
position[index3j+1] = position[index3i+1] + y;
position[index3j+2] = 0.0f;
// travel scaled unit length
velocity[index3j] = cosA/5.0;
velocity[index3j+1] = sinA/5.0;
velocity[index3j+2] = 0.0f;
}
}
// kernelAdvanceHypnosis
//
// Update the radius/color of the circles
__global__ void kernelAdvanceHypnosis() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* radius = cuConstRendererParams.radius;
float cutOff = 0.5f;
// place circle back in center after reaching threshold radisus
if (radius[index] > cutOff) {
radius[index] = 0.02f;
} else {
radius[index] += 0.01f;
}
}
// kernelAdvanceBouncingBalls
//
// Update the positino of the balls
__global__ void kernelAdvanceBouncingBalls() {
const float dt = 1.f / 60.f;
const float kGravity = -2.8f; // sorry Newton
const float kDragCoeff = -0.8f;
const float epsilon = 0.001f;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
float* velocity = cuConstRendererParams.velocity;
float* position = cuConstRendererParams.position;
int index3 = 3 * index;
// reverse velocity if center position < 0
float oldVelocity = velocity[index3+1];
float oldPosition = position[index3+1];
if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition
return;
}
if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball
velocity[index3+1] *= kDragCoeff;
}
// update velocity: v = u + at (only along y-axis)
velocity[index3+1] += kGravity * dt;
// update positions (only along y-axis)
position[index3+1] += velocity[index3+1] * dt;
if (fabsf(velocity[index3+1] - oldVelocity) < epsilon
&& oldPosition < 0.0f
&& fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball
velocity[index3+1] = 0.f;
position[index3+1] = 0.f;
}
}
// kernelAdvanceSnowflake -- (CUDA device code)
//
// move the snowflake animation forward one time step. Updates circle
// positions and velocities. Note how the position of the snowflake
// is reset if it moves off the left, right, or bottom of the screen.
__global__ void kernelAdvanceSnowflake() {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= cuConstRendererParams.numCircles)
return;
const float dt = 1.f / 60.f;
const float kGravity = -1.8f; // sorry Newton
const float kDragCoeff = 2.f;
int index3 = 3 * index;
float* positionPtr = &cuConstRendererParams.position[index3];
float* velocityPtr = &cuConstRendererParams.velocity[index3];
// loads from global memory
float3 position = *((float3*)positionPtr);
float3 velocity = *((float3*)velocityPtr);
// hack to make farther circles move more slowly, giving the
// illusion of parallax
float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp
// add some noise to the motion to make the snow flutter
float3 noiseInput;
noiseInput.x = 10.f * position.x;
noiseInput.y = 10.f * position.y;
noiseInput.z = 255.f * position.z;
float2 noiseForce = cudaVec2CellNoise(noiseInput, index);
noiseForce.x *= 7.5f;
noiseForce.y *= 5.f;
// drag
float2 dragForce;
dragForce.x = -1.f * kDragCoeff * velocity.x;
dragForce.y = -1.f * kDragCoeff * velocity.y;
// update positions
position.x += velocity.x * dt;
position.y += velocity.y * dt;
// update velocities
velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt;
velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt;
float radius = cuConstRendererParams.radius[index];
// if the snowflake has moved off the left, right or bottom of
// the screen, place it back at the top and give it a
// pseudorandom x position and velocity.
if ( (position.y + radius < 0.f) ||
(position.x + radius) < -0.f ||
(position.x - radius) > 1.f)
{
noiseInput.x = 255.f * position.x;
noiseInput.y = 255.f * position.y;
noiseInput.z = 255.f * position.z;
noiseForce = cudaVec2CellNoise(noiseInput, index);
position.x = .5f + .5f * noiseForce.x;
position.y = 1.35f + radius;
// restart from 0 vertical velocity. Choose a
// pseudo-random horizontal velocity.
velocity.x = 2.f * noiseForce.y;
velocity.y = 0.f;
}
// store updated positions and velocities to global memory
*((float3*)positionPtr) = position;
*((float3*)velocityPtr) = velocity;
}
// shadePixel -- (CUDA device code)
//
// given a pixel and a circle, determines the contribution to the
// pixel from the circle. Update of the image is done in this
// function. Called by kernelRenderCircles()
__device__ __inline__ void
shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) {
float diffX = p.x - pixelCenter.x;
float diffY = p.y - pixelCenter.y;
float pixelDist = diffX * diffX + diffY * diffY;
float rad = cuConstRendererParams.radius[circleIndex];;
float maxDist = rad * rad;
// circle does not contribute to the image
if (pixelDist > maxDist)
return;
float3 rgb;
float alpha;
// there is a non-zero contribution. Now compute the shading value
// This conditional is in the inner loop, but it evaluates the
// same direction for all threads so it's cost is not so
// bad. Attempting to hoist this conditional is not a required
// student optimization in Assignment 2
if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) {
const float kCircleMaxAlpha = .5f;
const float falloffScale = 4.f;
float normPixelDist = sqrt(pixelDist) / rad;
rgb = lookupColor(normPixelDist);
float maxAlpha = .6f + .4f * (1.f-p.z);
maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value
alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist);
} else {
// simple: each circle has an assigned color
int index3 = 3 * circleIndex;
rgb = *(float3*)&(cuConstRendererParams.color[index3]);
alpha = .5f;
}
float oneMinusAlpha = 1.f - alpha;
// BEGIN SHOULD-BE-ATOMIC REGION
// global memory read
float4 existingColor = *imagePtr;
float4 newColor;
newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x;
newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y;
newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z;
newColor.w = alpha + existingColor.w;
// global memory write
*imagePtr = newColor;
// END SHOULD-BE-ATOMIC REGION
}
// kernelRenderCircles -- (CUDA device code)
//
// Each thread renders a circle. Since there is no protection to
// ensure order of update or mutual exclusion on the output image, the
// resulting image will be incorrect.
__global__ void kernelRenderCircles(int index, int imageWidth, int imageHeight, int screenMinX, int screenMinY, int screenMaxX, int screenMaxY) {
//TODO: convert short to int
float invWidth = 1.f / imageWidth;
float invHeight = 1.f / imageHeight;
int index3 = 3 * index;
float3 p = *(float3*)(&cuConstRendererParams.position[index3]);
// const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;
float rad = cuConstRendererParams.radius[index];
//TODO: do we need -1
// BlockDim = 256 x1, gridDim = 4x4
int circleInBox = circleInBoxConservative(p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
/*
if((threadIdx.x + threadIdx.y)== 0) {
printf("Blk : %d, grid: %d %d\n", blockIdx.x, gridDim.x, circleInBox);
printf("circleInBoxConservative p.x : %f, p.y : %f , rad : %f, %f, %f, %f, %f\n",
p.x, p.y, rad,
static_cast<float>(1.f/gridDim.x)*blockIdx.x, static_cast<float>(1.f/gridDim.x)*(blockIdx.x+1),
static_cast<float>(1.f/gridDim.y)*(blockIdx.y+1), static_cast<float>(1.f/gridDim.y)*(blockIdx.y));
}*/
if(circleInBox == 0) { return; }
/*
if((threadIdx.x + threadIdx.y)== 0) {
printf("Blk : %d, grid: %d\n", blockIdx.x, gridDim.x);
}*/
for(int tid_x = threadIdx.x ; tid_x < (imageWidth/gridDim.x); tid_x +=blockDim.x) {
for(int tid_y = threadIdx.y ; tid_y < (imageHeight/gridDim.y); tid_y +=blockDim.y) {
int x = blockIdx.x*(imageWidth/gridDim.x) + tid_x;
int y = blockIdx.y*(imageHeight/gridDim.y) + tid_y;
float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (y * imageWidth + x)]);
float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(x) + 0.5f),
invHeight * (static_cast<float>(y) + 0.5f));
shadePixel(index, pixelCenterNorm, p, imgPtr);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
CudaRenderer::CudaRenderer() {
image = NULL;
numCircles = 0;
position = NULL;
velocity = NULL;
color = NULL;
radius = NULL;
cudaDevicePosition = NULL;
cudaDeviceVelocity = NULL;
cudaDeviceColor = NULL;
cudaDeviceRadius = NULL;
cudaDeviceImageData = NULL;
}
CudaRenderer::~CudaRenderer() {
if (image) {
delete image;
}
if (position) {
delete [] position;
delete [] velocity;
delete [] color;
delete [] radius;
}
if (cudaDevicePosition) {
cudaFree(cudaDevicePosition);
cudaFree(cudaDeviceVelocity);
cudaFree(cudaDeviceColor);
cudaFree(cudaDeviceRadius);
cudaFree(cudaDeviceImageData);
}
}
const Image*
CudaRenderer::getImage() {
// need to copy contents of the rendered image from device memory
// before we expose the Image object to the caller
printf("Copying image data from device\n");
cudaMemcpy(image->data,
cudaDeviceImageData,
sizeof(float) * 4 * image->width * image->height,
cudaMemcpyDeviceToHost);
return image;
}
void
CudaRenderer::loadScene(SceneName scene) {
sceneName = scene;
loadCircleScene(sceneName, numCircles, position, velocity, color, radius);
}
void
CudaRenderer::setup() {
int deviceCount = 0;
std::string name;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Initializing CUDA for CudaRenderer\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
name = deviceProps.name;
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
// By this time the scene should be loaded. Now copy all the key
// data structures into device memory so they are accessible to
// CUDA kernels
//
// See the CUDA Programmer's Guide for descriptions of
// cudaMalloc and cudaMemcpy
cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles);
cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles);
cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height);
cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice);
// Initialize parameters in constant memory. We didn't talk about
// constant memory in class, but the use of read-only constant
// memory here is an optimization over just sticking these values
// in device global memory. NVIDIA GPUs have a few special tricks
// for optimizing access to constant memory. Using global memory
// here would have worked just as well. See the Programmer's
// Guide for more information about constant memory.
GlobalConstants params;
params.sceneName = sceneName;
params.numCircles = numCircles;
params.imageWidth = image->width;
params.imageHeight = image->height;
params.position = cudaDevicePosition;
params.velocity = cudaDeviceVelocity;
params.color = cudaDeviceColor;
params.radius = cudaDeviceRadius;
params.imageData = cudaDeviceImageData;
cudaMemcpyToSymbol(cuConstRendererParams, ¶ms, sizeof(GlobalConstants));
// also need to copy over the noise lookup tables, so we can
// implement noise on the GPU
int* permX;
int* permY;
float* value1D;
getNoiseTables(&permX, &permY, &value1D);
cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256);
cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256);
// last, copy over the color table that's used by the shading
// function for circles in the snowflake demo
float lookupTable[COLOR_MAP_SIZE][3] = {
{1.f, 1.f, 1.f},
{1.f, 1.f, 1.f},
{.8f, .9f, 1.f},
{.8f, .9f, 1.f},
{.8f, 0.8f, 1.f},
};
cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE);
}
// allocOutputImage --
//
// Allocate buffer the renderer will render into. Check status of
// image first to avoid memory leak.
void
CudaRenderer::allocOutputImage(int width, int height) {
if (image)
delete image;
image = new Image(width, height);
}
// clearImage --
//
// Clear's the renderer's target image. The state of the image after
// the clear depends on the scene being rendered.
void
CudaRenderer::clearImage() {
// 256 threads per block is a healthy number
dim3 blockDim(16, 16, 1);
dim3 gridDim(
(image->width + blockDim.x - 1) / blockDim.x,
(image->height + blockDim.y - 1) / blockDim.y);
if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) {
kernelClearImageSnowflake<<<gridDim, blockDim>>>();
} else {
kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f);
}
cudaDeviceSynchronize();
}
// advanceAnimation --
//
// Advance the simulation one time step. Updates all circle positions
// and velocities
void
CudaRenderer::advanceAnimation() {
// 256 threads per block is a healthy number
dim3 blockDim(256, 1);
dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
// only the snowflake scene has animation
if (sceneName == SNOWFLAKES) {
kernelAdvanceSnowflake<<<gridDim, blockDim>>>();
} else if (sceneName == BOUNCING_BALLS) {
kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>();
} else if (sceneName == HYPNOSIS) {
kernelAdvanceHypnosis<<<gridDim, blockDim>>>();
} else if (sceneName == FIREWORKS) {
kernelAdvanceFireWorks<<<gridDim, blockDim>>>();
}
cudaDeviceSynchronize();
}
void
CudaRenderer::render() {
// 256 threads per block is a healthy number
//dim3 blockDim(256, 1);
//dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x);
for (int i = 0; i < numCircles; i++) {
// read position and radius
int index3 = 3 * i;
float3 p = *(float3*)(&position[index3]);
float rad = radius[i];
// compute the bounding box of the circle. The bound is in integer
// screen coordinates, so it's clamped to the edges of the screen.
short imageWidth = image->width;
short imageHeight = image->height;
short minX = static_cast<short>(imageWidth * (p.x - rad));
short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1;
short minY = static_cast<short>(imageHeight * (p.y - rad));
short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1;
// a bunch of clamps. Is there a CUDA built-in for this?
short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0;
short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0;
short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0;
short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0;
/*
float px = position[index3];
float py = position[index3+1];
float rad = radius[i];
// compute the bounding box of the circle. This bounding box
// is in normalized coordinates
float minX = px - rad;
float maxX = px + rad;
float minY = py - rad;
float maxY = py + rad;
// convert normalized coordinate bounds to integer screen
// pixel bounds. Clamp to the edges of the screen.
int screenMinX = CLAMP(static_cast<int>(minX * image->width), 0, image->width);
int screenMaxX = CLAMP(static_cast<int>(maxX * image->width)+1, 0, image->width);
int screenMinY = CLAMP(static_cast<int>(minY * image->height), 0, image->height);
int screenMaxY = CLAMP(static_cast<int>(maxY * image->height)+1, 0, image->height);
*/
//int numPixels = (screenMaxY - screenMinY) * (screenMaxX - screenMinX);
dim3 blockDim(16, 16);
// dim3 gridDim((numPixels + blockDim.x - 1) / blockDim.x);
dim3 gridDim(8,8); //dividing it into block -- each block working on a portion of image
printf("APARNA\n");
kernelRenderCircles<<<gridDim, blockDim>>>(i, imageWidth, imageHeight, screenMinX, screenMinY, screenMaxX, screenMaxY);
gpuErrchk(cudaDeviceSynchronize());
}
}
|
93937d4102e05dd23a71cf7ff4013c475595c36e.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2016, Cranfield University
// All rights reserved
// Author: Salvatore Filippone
#include <iostream>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_timer.h>
const int VERSION=8;
// Computes the reduction using the CPU.
double Reduction(int n, const double* x) {
double result = 0.0f;
for (int i = 0; i < n; ++i) {
result += x[i] ;
}
return result;
}
// Memory management for device side
const int THREAD_BLOCK = 256; // Must be a power of 2 >= 64
const int BLOCKS_PER_MP = 32; // Sufficiently large for memory transaction hiding
int max_blocks=0; // Blocks in a grid
int red_sz=0; // Size of reduction buffer
double *o_data=NULL, *d_res_data=NULL, *h_res_data=NULL;
static struct hipDeviceProp_t *prop=NULL;
void reduce_alloc_wrk()
{
int mpCnt;
if (prop == NULL) {
if ((prop=(struct hipDeviceProp_t *) malloc(sizeof(struct hipDeviceProp_t)))==NULL) {
fprintf(stderr,"CUDA Error gpuInit3: not malloced prop\n");
return;
}
hipSetDevice(0); // BEWARE: you may have more than one device
hipGetDeviceProperties(prop,0);
}
if (max_blocks == 0) {
mpCnt = prop->multiProcessorCount;
max_blocks = mpCnt*BLOCKS_PER_MP;
// Enough to do the second-level reduction
red_sz = (max_blocks+THREAD_BLOCK-1)/THREAD_BLOCK;
//std::cerr << mpCnt << ' '<<max_blocks << ' '<<THREAD_BLOCK<< std::endl;
}
if (o_data == NULL) hipMalloc(&o_data,max_blocks*sizeof(double));
if (d_res_data == NULL) hipMalloc(&d_res_data,(red_sz)*sizeof(double));
if (h_res_data == NULL) h_res_data = (double *)malloc((red_sz)*sizeof(double));
}
// Fully unrolled
__device__ void warpReduce(volatile double *sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
template <unsigned int THD> __global__ void reduce(int n, double *g_idata, double *g_odata) {
extern __shared__ double sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
sdata[tid] = 0.0;
while (i<n) {
sdata[tid] += g_idata[i] ;
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (THD >= 1024){ if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); }
if (THD >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (THD >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (THD >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
// write result for this block to global mem
if (tid < 32) warpReduce(sdata,tid);
if (tid == 0) g_odata[blockIdx.x] += sdata[0];
}
void do_gpu_reduce(int n, double *g_idata, double *g_odata)
{
const int shmem_size = THREAD_BLOCK*sizeof(double);
int nblocks = ((n + THREAD_BLOCK - 1) / THREAD_BLOCK);
if (nblocks > max_blocks) nblocks = max_blocks;
hipLaunchKernelGGL(( reduce<THREAD_BLOCK>), dim3(nblocks),dim3(THREAD_BLOCK),shmem_size,0, n,g_idata,g_odata);
return;
}
double gpu_reduce(int n, double *d_v)
{
reduce_alloc_wrk();
hipMemset((void *) o_data, 0, max_blocks*sizeof(double));
hipMemset((void *)d_res_data,0,(red_sz)*sizeof(double));
do_gpu_reduce(n, d_v, o_data);
do_gpu_reduce(max_blocks,o_data,d_res_data);
hipError_t err = hipMemcpy(h_res_data, d_res_data,
red_sz*sizeof(double), hipMemcpyDeviceToHost);
return(Reduction(red_sz,h_res_data));
}
// Returns a random number from range [0, 1).
double rand_double() {
return static_cast<double>(rand()) / RAND_MAX;
}
int main(int argc, char** argv) {
if (argc < 2) {
std::cerr << "Usage: " <<argv[0] << " N" << std::endl;
exit(1);
}
int N = atoi(argv[1]);
double bdwdth;
double *h_x=(double *) malloc(N*sizeof(double));
double *d_x;
srand(time(0));
for (int i=0; i<N; i++)
h_x[i]=rand_double();
hipError_t err=hipMalloc((void **)&d_x,(N*sizeof(double)));
err = hipMemcpy(d_x, h_x, N*sizeof(double), hipMemcpyHostToDevice);
reduce_alloc_wrk();
StopWatchInterface* timer = 0;
sdkCreateTimer(&timer);
std::cout << "Testing reduction algorithm " << VERSION << " on a DOUBLE vector of size: " << N << std::endl;
// Calculate the reduction on the host.
timer->start();
double cpu_sum = Reduction(N, h_x);
timer->stop();
std::cout << "CPU reduction: " << cpu_sum
<< " " << timer->getTime() << " ms. " << std::endl;
// ------------ GPU reduction
timer->reset();
timer->start();
double gpu_sum = gpu_reduce(N,d_x);
timer->stop();
bdwdth = ((double)N*sizeof(double))/timer->getTime();
bdwdth *= 1.e-6;
std::cout << "GPU reduction: " << gpu_sum
<< " " << timer->getTime() << " ms. " << std::endl;
std::cout << "Relative difference: " << abs(gpu_sum-cpu_sum)/gpu_sum << std::endl;
std::cout << "Measured bandwidth: " << bdwdth << " GB/s" << std::endl;
// ------------------------------- Cleaning up ------------------------------ //
delete timer;
checkCudaErrors(hipDeviceReset());
return 0;
}
| 93937d4102e05dd23a71cf7ff4013c475595c36e.cu | // Copyright 2016, Cranfield University
// All rights reserved
// Author: Salvatore Filippone
#include <iostream>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_timer.h>
const int VERSION=8;
// Computes the reduction using the CPU.
double Reduction(int n, const double* x) {
double result = 0.0f;
for (int i = 0; i < n; ++i) {
result += x[i] ;
}
return result;
}
// Memory management for device side
const int THREAD_BLOCK = 256; // Must be a power of 2 >= 64
const int BLOCKS_PER_MP = 32; // Sufficiently large for memory transaction hiding
int max_blocks=0; // Blocks in a grid
int red_sz=0; // Size of reduction buffer
double *o_data=NULL, *d_res_data=NULL, *h_res_data=NULL;
static struct cudaDeviceProp *prop=NULL;
void reduce_alloc_wrk()
{
int mpCnt;
if (prop == NULL) {
if ((prop=(struct cudaDeviceProp *) malloc(sizeof(struct cudaDeviceProp)))==NULL) {
fprintf(stderr,"CUDA Error gpuInit3: not malloced prop\n");
return;
}
cudaSetDevice(0); // BEWARE: you may have more than one device
cudaGetDeviceProperties(prop,0);
}
if (max_blocks == 0) {
mpCnt = prop->multiProcessorCount;
max_blocks = mpCnt*BLOCKS_PER_MP;
// Enough to do the second-level reduction
red_sz = (max_blocks+THREAD_BLOCK-1)/THREAD_BLOCK;
//std::cerr << mpCnt << ' '<<max_blocks << ' '<<THREAD_BLOCK<< std::endl;
}
if (o_data == NULL) cudaMalloc(&o_data,max_blocks*sizeof(double));
if (d_res_data == NULL) cudaMalloc(&d_res_data,(red_sz)*sizeof(double));
if (h_res_data == NULL) h_res_data = (double *)malloc((red_sz)*sizeof(double));
}
// Fully unrolled
__device__ void warpReduce(volatile double *sdata, int tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
template <unsigned int THD> __global__ void reduce(int n, double *g_idata, double *g_odata) {
extern __shared__ double sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
sdata[tid] = 0.0;
while (i<n) {
sdata[tid] += g_idata[i] ;
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (THD >= 1024){ if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); }
if (THD >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (THD >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (THD >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
// write result for this block to global mem
if (tid < 32) warpReduce(sdata,tid);
if (tid == 0) g_odata[blockIdx.x] += sdata[0];
}
void do_gpu_reduce(int n, double *g_idata, double *g_odata)
{
const int shmem_size = THREAD_BLOCK*sizeof(double);
int nblocks = ((n + THREAD_BLOCK - 1) / THREAD_BLOCK);
if (nblocks > max_blocks) nblocks = max_blocks;
reduce<THREAD_BLOCK><<<nblocks,THREAD_BLOCK,shmem_size,0>>>(n,g_idata,g_odata);
return;
}
double gpu_reduce(int n, double *d_v)
{
reduce_alloc_wrk();
cudaMemset((void *) o_data, 0, max_blocks*sizeof(double));
cudaMemset((void *)d_res_data,0,(red_sz)*sizeof(double));
do_gpu_reduce(n, d_v, o_data);
do_gpu_reduce(max_blocks,o_data,d_res_data);
cudaError_t err = cudaMemcpy(h_res_data, d_res_data,
red_sz*sizeof(double), cudaMemcpyDeviceToHost);
return(Reduction(red_sz,h_res_data));
}
// Returns a random number from range [0, 1).
double rand_double() {
return static_cast<double>(rand()) / RAND_MAX;
}
int main(int argc, char** argv) {
if (argc < 2) {
std::cerr << "Usage: " <<argv[0] << " N" << std::endl;
exit(1);
}
int N = atoi(argv[1]);
double bdwdth;
double *h_x=(double *) malloc(N*sizeof(double));
double *d_x;
srand(time(0));
for (int i=0; i<N; i++)
h_x[i]=rand_double();
cudaError_t err=cudaMalloc((void **)&d_x,(N*sizeof(double)));
err = cudaMemcpy(d_x, h_x, N*sizeof(double), cudaMemcpyHostToDevice);
reduce_alloc_wrk();
StopWatchInterface* timer = 0;
sdkCreateTimer(&timer);
std::cout << "Testing reduction algorithm " << VERSION << " on a DOUBLE vector of size: " << N << std::endl;
// Calculate the reduction on the host.
timer->start();
double cpu_sum = Reduction(N, h_x);
timer->stop();
std::cout << "CPU reduction: " << cpu_sum
<< " " << timer->getTime() << " ms. " << std::endl;
// ------------ GPU reduction
timer->reset();
timer->start();
double gpu_sum = gpu_reduce(N,d_x);
timer->stop();
bdwdth = ((double)N*sizeof(double))/timer->getTime();
bdwdth *= 1.e-6;
std::cout << "GPU reduction: " << gpu_sum
<< " " << timer->getTime() << " ms. " << std::endl;
std::cout << "Relative difference: " << abs(gpu_sum-cpu_sum)/gpu_sum << std::endl;
std::cout << "Measured bandwidth: " << bdwdth << " GB/s" << std::endl;
// ------------------------------- Cleaning up ------------------------------ //
delete timer;
checkCudaErrors(cudaDeviceReset());
return 0;
}
|
14ef12e9d4c73ef4d6b710d1953a65ece360453e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014-2015 NVIDIA Corporation. All rights reserved.
*
* Sample CUPTI app to demonstrate the usage of unified memory counter profiling
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cupti.h>
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
if(_status == CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED) \
exit(0); \
else \
exit(-1); \
} \
} while (0)
#define DRIVER_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define BUF_SIZE (8 * 1024)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
static const char *
getUvmCounterKindString(CUpti_ActivityUnifiedMemoryCounterKind kind)
{
switch (kind)
{
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD:
return "BYTES_TRANSFER_HTOD";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH:
return "BYTES_TRANSFER_DTOH";
default:
break;
}
return "<unknown>";
}
static void
printActivity(CUpti_Activity *record)
{
switch (record->kind)
{
case CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER:
{
CUpti_ActivityUnifiedMemoryCounter2 *uvm = (CUpti_ActivityUnifiedMemoryCounter2 *)record;
printf("UNIFIED_MEMORY_COUNTER [ %llu %llu ] kind=%s value=%llu src %u dst %u\n",
(unsigned long long)(uvm->start),
(unsigned long long)(uvm->end),
getUvmCounterKindString(uvm->counterKind),
(unsigned long long)uvm->value,
uvm->srcId,
uvm->dstId);
break;
}
default:
printf(" <unknown>\n");
break;
}
}
static void CUPTIAPI
bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords)
{
uint8_t *rawBuffer;
*size = BUF_SIZE;
rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE);
*buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE);
*maxNumRecords = 0;
if (*buffer == NULL) {
printf("Error: out of memory\n");
exit(-1);
}
}
static void CUPTIAPI
bufferCompleted(hipCtx_t ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize)
{
CUptiResult status;
CUpti_Activity *record = NULL;
do {
status = cuptiActivityGetNextRecord(buffer, validSize, &record);
if (status == CUPTI_SUCCESS) {
printActivity(record);
}
else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) {
break;
}
else {
CUPTI_CALL(status);
}
} while (1);
// report any records dropped from the queue
size_t dropped;
CUPTI_CALL(cuptiActivityGetNumDroppedRecords(ctx, streamId, &dropped));
if (dropped != 0) {
printf("Dropped %u activity records\n", (unsigned int)dropped);
}
free(buffer);
}
template<class T>
__host__ __device__ void checkData(const char *loc, T *data, int size, int expectedVal) {
int i;
for (i = 0; i < size / (int)sizeof(T); i++) {
if (data[i] != expectedVal) {
printf("Mismatch found on %s\n", loc);
printf("Address 0x%p, Observed = 0x%x Expected = 0x%x\n", data+i, data[i], expectedVal);
break;
}
}
}
template<class T>
__host__ __device__ void writeData(T *data, int size, int writeVal) {
int i;
for (i = 0; i < size / (int)sizeof(T); i++) {
data[i] = writeVal;
}
}
__global__ void testKernel(int *data, int size, int expectedVal)
{
checkData("GPU", data, size, expectedVal);
writeData(data, size, -expectedVal);
}
int main(int argc, char **argv)
{
CUptiResult res;
int deviceCount;
int *data = NULL;
int size = 64*1024; // 64 KB
int i = 123;
CUpti_ActivityUnifiedMemoryCounterConfig config[2];
DRIVER_API_CALL(hipInit(0));
DRIVER_API_CALL(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
exit(-1);
}
// register cupti activity buffer callbacks
CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted));
// configure unified memory counters
config[0].scope = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE;
config[0].kind = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD;
config[0].deviceId = 0;
config[0].enable = 1;
config[1].scope = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE;
config[1].kind = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH;
config[1].deviceId = 0;
config[1].enable = 1;
res = cuptiActivityConfigureUnifiedMemoryCounter(config, 2);
if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED) {
printf("Test is waived, unified memory is not supported on the underlying platform.\n");
return 0;
}
else if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE) {
printf("Test is waived, unified memory is not supported on the device.\n");
return 0;
}
else if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES) {
printf("Test is waived, unified memory is not supported on the non-P2P multi-gpu setup.\n");
return 0;
}
else {
CUPTI_CALL(res);
}
// enable unified memory counter activity
CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER));
// allocate unified memory
printf("Allocation size in bytes %d\n", size);
RUNTIME_API_CALL(hipMallocManaged(&data, size));
// CPU access
writeData(data, size, i);
// kernel launch
hipLaunchKernelGGL(( testKernel), dim3(1),dim3(1), 0, 0, data, size, i);
RUNTIME_API_CALL(hipDeviceSynchronize());
// CPU access
checkData("CPU", data, size, -i);
// free unified memory
RUNTIME_API_CALL(hipFree(data));
CUPTI_CALL(cuptiActivityFlushAll(0));
// disable unified memory counter activity
CUPTI_CALL(cuptiActivityDisable(CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER));
hipDeviceReset();
return 0;
}
| 14ef12e9d4c73ef4d6b710d1953a65ece360453e.cu | /*
* Copyright 2014-2015 NVIDIA Corporation. All rights reserved.
*
* Sample CUPTI app to demonstrate the usage of unified memory counter profiling
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cupti.h>
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
if(_status == CUPTI_ERROR_LEGACY_PROFILER_NOT_SUPPORTED) \
exit(0); \
else \
exit(-1); \
} \
} while (0)
#define DRIVER_API_CALL(apiFuncCall) \
do { \
CUresult _status = apiFuncCall; \
if (_status != CUDA_SUCCESS) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status));\
exit(-1); \
} \
} while (0)
#define BUF_SIZE (8 * 1024)
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
static const char *
getUvmCounterKindString(CUpti_ActivityUnifiedMemoryCounterKind kind)
{
switch (kind)
{
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD:
return "BYTES_TRANSFER_HTOD";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH:
return "BYTES_TRANSFER_DTOH";
default:
break;
}
return "<unknown>";
}
static void
printActivity(CUpti_Activity *record)
{
switch (record->kind)
{
case CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER:
{
CUpti_ActivityUnifiedMemoryCounter2 *uvm = (CUpti_ActivityUnifiedMemoryCounter2 *)record;
printf("UNIFIED_MEMORY_COUNTER [ %llu %llu ] kind=%s value=%llu src %u dst %u\n",
(unsigned long long)(uvm->start),
(unsigned long long)(uvm->end),
getUvmCounterKindString(uvm->counterKind),
(unsigned long long)uvm->value,
uvm->srcId,
uvm->dstId);
break;
}
default:
printf(" <unknown>\n");
break;
}
}
static void CUPTIAPI
bufferRequested(uint8_t **buffer, size_t *size, size_t *maxNumRecords)
{
uint8_t *rawBuffer;
*size = BUF_SIZE;
rawBuffer = (uint8_t *)malloc(*size + ALIGN_SIZE);
*buffer = ALIGN_BUFFER(rawBuffer, ALIGN_SIZE);
*maxNumRecords = 0;
if (*buffer == NULL) {
printf("Error: out of memory\n");
exit(-1);
}
}
static void CUPTIAPI
bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, size_t size, size_t validSize)
{
CUptiResult status;
CUpti_Activity *record = NULL;
do {
status = cuptiActivityGetNextRecord(buffer, validSize, &record);
if (status == CUPTI_SUCCESS) {
printActivity(record);
}
else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) {
break;
}
else {
CUPTI_CALL(status);
}
} while (1);
// report any records dropped from the queue
size_t dropped;
CUPTI_CALL(cuptiActivityGetNumDroppedRecords(ctx, streamId, &dropped));
if (dropped != 0) {
printf("Dropped %u activity records\n", (unsigned int)dropped);
}
free(buffer);
}
template<class T>
__host__ __device__ void checkData(const char *loc, T *data, int size, int expectedVal) {
int i;
for (i = 0; i < size / (int)sizeof(T); i++) {
if (data[i] != expectedVal) {
printf("Mismatch found on %s\n", loc);
printf("Address 0x%p, Observed = 0x%x Expected = 0x%x\n", data+i, data[i], expectedVal);
break;
}
}
}
template<class T>
__host__ __device__ void writeData(T *data, int size, int writeVal) {
int i;
for (i = 0; i < size / (int)sizeof(T); i++) {
data[i] = writeVal;
}
}
__global__ void testKernel(int *data, int size, int expectedVal)
{
checkData("GPU", data, size, expectedVal);
writeData(data, size, -expectedVal);
}
int main(int argc, char **argv)
{
CUptiResult res;
int deviceCount;
int *data = NULL;
int size = 64*1024; // 64 KB
int i = 123;
CUpti_ActivityUnifiedMemoryCounterConfig config[2];
DRIVER_API_CALL(cuInit(0));
DRIVER_API_CALL(cuDeviceGetCount(&deviceCount));
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
exit(-1);
}
// register cupti activity buffer callbacks
CUPTI_CALL(cuptiActivityRegisterCallbacks(bufferRequested, bufferCompleted));
// configure unified memory counters
config[0].scope = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE;
config[0].kind = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD;
config[0].deviceId = 0;
config[0].enable = 1;
config[1].scope = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_SCOPE_PROCESS_SINGLE_DEVICE;
config[1].kind = CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH;
config[1].deviceId = 0;
config[1].enable = 1;
res = cuptiActivityConfigureUnifiedMemoryCounter(config, 2);
if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED) {
printf("Test is waived, unified memory is not supported on the underlying platform.\n");
return 0;
}
else if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_DEVICE) {
printf("Test is waived, unified memory is not supported on the device.\n");
return 0;
}
else if (res == CUPTI_ERROR_UM_PROFILING_NOT_SUPPORTED_ON_NON_P2P_DEVICES) {
printf("Test is waived, unified memory is not supported on the non-P2P multi-gpu setup.\n");
return 0;
}
else {
CUPTI_CALL(res);
}
// enable unified memory counter activity
CUPTI_CALL(cuptiActivityEnable(CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER));
// allocate unified memory
printf("Allocation size in bytes %d\n", size);
RUNTIME_API_CALL(cudaMallocManaged(&data, size));
// CPU access
writeData(data, size, i);
// kernel launch
testKernel<<<1,1>>>(data, size, i);
RUNTIME_API_CALL(cudaDeviceSynchronize());
// CPU access
checkData("CPU", data, size, -i);
// free unified memory
RUNTIME_API_CALL(cudaFree(data));
CUPTI_CALL(cuptiActivityFlushAll(0));
// disable unified memory counter activity
CUPTI_CALL(cuptiActivityDisable(CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER));
cudaDeviceReset();
return 0;
}
|
dccfc8c972524c6db6f030d39a48b931a7d2b491.hip | // !!! This is a file automatically generated by hipify!!!
/*
* AssignmentEngine.cu
* heuristic CUDA
*
* Created by Roberto Roverso on 25/08/09.
* Copyright 2009 Peerialism. All rights reserved.
*
*/
// includes, system
#include <stdio.h>
#include <assert.h>
//#include <jni.h>
#include "Global.h"
#ifdef MAC
#include "sys/malloc.h" // mac os x
#else
#include "malloc.h" // linux, windows
#endif
#include <stdlib.h>
#include <iostream>
#include <string>
using namespace std;
//CUDA imports
#include <hip/hip_runtime.h>
#include <cutil_inline.h>
#include <hip/hip_runtime_api.h>
#include <cutil.h>
#ifdef CUDPP
#include <cudpp/cudpp.h>
#endif CUDPP
// Include C files
#include "AssignmentEngine.h"
#include "Generator.h"
// include kernels
#include "BestDiffKernelShared.cu"
#include "BestDiffKernelGlobal.cu"
#include "InitAssignmentKernel.cu"
#define BLOCK_SIZE 16
#define DEFAULT_MULTI 8
typedef struct {
float happiness;
float time;
float memoryTimer;
} TestResult;
// CUDA related
void checkCUDAError(const char *msg);
void listCudaDevice();
// CUDA Kernels
__global__ void initialAssignment(float* diffs, int* bestChanges, AijMatrix A,
int* persons, int* objects, bool* bannedSwitches,
bool* clearedBannedSwitches, int* reset, int* blockSize);
__global__ void bestDiff(float* diffs, int* bestChanges, AijMatrix A,
int* persons, int* objects, bool* bannedSwitches,
bool* clearedBannedSwitches, int* reset, int blockSize);
__global__ void
findMax(float* diffs, int* bestChanges, AijMatrix A, int* persons,
int* objects, bool* bannedSwitches, bool* clearedBannedSwitches,
int* reset, int blockSize);
__global__ void
findMaxShared(float* diffs, int* bestChanges, AijMatrix A, int* persons,
int* objects, bool* bannedSwitches, bool* clearedBannedSwitches,
int* reset, int blockSize, float* resDiffs);
__global__ void
bestDiffShared(float* diffs, int* bestChanges, AijMatrix A, int* persons,
int* objects, bool* bannedSwitches, bool* clearedBannedSwitches,
int* reset, int blockSize, float* resDiffs);
__global__ void calculateHappiness(AijMatrix A, int* persons,
int numberOfPersons);
// Host
TestResult runTest(int, int);
void hostInit(float** aijMatrix, int numberOfPersons, int numberOfObjects);
void gpuInit(float** aijMatrix, int numberOfPersons, int numberOfObjects);
void gpuInit2();
void fail(string&);
TestResult runHeuristic(float** aijMatrix, int numberOfPersons,
int numberOfObjects);
void smartInitialAssignment();
void smartInitialAssignmentGPU();
void enhanceBySwitching();
void evaluateDifferences();
void sortDifferencesGPU();
int isFeasible();
void gpuTerninate();
//Utility functions
void printH(float** aijMatrix, int numberOfPersons, int numberOfObjects);
void printG(float* aijMatrix, int numberOfPersons, int numberOfObjects);
// Constants
int initializationTimeLim = 10; // time lim for the initialization in seconds ...
float negInf = -9999;
// Variables on Host
float** aijMatrix;
int* persons;
int* objects;
bool* bannedSwitches;
bool* clearedBannedSwitches;
Difference* differences;
Difference* differences_temp;
Difference* columnDifferences;
Difference* rowDifferences;
Difference emptyDiff;
int numberOfPersons, numberOfObjects;
int* reset;
float* h_Diffs;
int* h_bestChanges;
// Variables on GPU
unsigned int tAijMSizeB;
unsigned int tPersonsSizeB;
unsigned int tObjectSizeB;
unsigned int tbestChangesB;
unsigned int tDiffsSizeB;
unsigned int tBannedSwitches;
unsigned int tClearedBannedSwitches;
unsigned int indexSize;
AijMatrix d_aijM;
int* d_pers;
int* d_objs;
Difference d_emptyDiff;
float* d_Diffs;
int* d_bestChanges;
int* d_reset;
bool* d_bannedSwitches;
bool* d_clearedBannedSwitches;
int* d_blockSize;
int* h_index;
int* d_index;
float* d_DiffResults;
unsigned int freeMemDevice, totalMemDevice;
#ifdef CUDPP
CUDPPHandle scanplan = 0;
#endif
// Run option flags
bool useGenerator = false;
bool runCpu = false;
bool runGpu = true;
bool assignmentGpu = false;
bool assignmentCpu = true;
bool assignmentOnly = false;
bool niOut = false;
bool pOut = false;
bool pResInit = false;
bool pResAss = false;
bool pDbg = false;
bool pTimer = false;
bool mTests = false;
bool sGPU = false;
bool sortP = false;
bool sdk = false;
bool strictSrt = true;
// Timers for benchmarking
float tEvaluateDiff = 0.0f;
float tSorting = 0.0f;
float tSwitching = 0.0f;
float tMemory = 0.0f;
int iterations = 0;
int seed = 7;
int minMult = 0;
int blockSize = BLOCK_SIZE;
int maxMult = 10;
int multi = DEFAULT_MULTI;
static unsigned long inKB(unsigned long bytes) {
return bytes / 1024;
}
static unsigned long inMB(unsigned long bytes) {
return bytes / (1024 * 1024);
}
static void printStats(hipDevice_t dev, unsigned long free, unsigned long total) {
#if CUDART_VERSION < 2020
#error "This CUDART version does not support mapped memory!\n"
#endif
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Chosen GPU Device %d: \"%s\"\n", dev, deviceProp.name);
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n", 8
* deviceProp.multiProcessorCount);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
printf(" Can Map Host Memory: %s \n",
(deviceProp.canMapHostMemory) ? "true" : "false");
printf(" Free Mem: %lu bytes (%lu KB) (%lu MB)\n", free,
inKB(free), inMB(free));
printf(" Total Mem: %lu bytes (%lu KB) (%lu MB)\n", total, inKB(
total), inMB(total));
//checkCUDAError("hipGetDeviceProperties");
if (!deviceProp.canMapHostMemory)
{
fprintf(stderr, "Device %d cannot map host memory!\n", 0);
exit( EXIT_FAILURE);
}
// printf("%f%% free, %f%% used\n", 100.0 * free / (double) total, 100.0
// * (total - free) / (double) total);
}
// Main
int main(int argc, char** argv) {
for (int i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-l") == 0) /* Process optional arguments. */
{
listCudaDevice();
return 0;
}
if (strcmp(argv[i], "-c") == 0) /* Process optional arguments. */
{
runCpu = true;
runGpu = false;
}
if (strcmp(argv[i], "-ni") == 0) /* Process optional arguments. */
{
niOut = true;
}
if (strcmp(argv[i], "-t") == 0) /* Process optional arguments. */
{
pTimer = true;
}
if (strcmp(argv[i], "-ag") == 0) /* Process optional arguments. */
{
assignmentGpu = true;
assignmentCpu = false;
}
if (strcmp(argv[i], "-cg") == 0) /* Process optional arguments. */
{
runCpu = true;
}
if (strcmp(argv[i], "-b") == 0) /* Process optional arguments. */
{
blockSize = atoi(argv[i + 1]);
}
if (strcmp(argv[i], "-ao") == 0) /* Process optional arguments. */
{
assignmentOnly = true;
}
if (strcmp(argv[i], "-ssd") == 0) /* Process optional arguments. */
{
strictSrt = false;
}
if (strcmp(argv[i], "-m") == 0) /* Process optional arguments. */
{
multi = atoi(argv[i + 1]);
}
if (strcmp(argv[i], "-p") == 0) /* Process optional arguments. */
{
pOut = true;
}
if (strcmp(argv[i], "-ri") == 0) /* Process optional arguments. */
{
pResInit = true;
}
if (strcmp(argv[i], "-ra") == 0) /* Process optional arguments. */
{
pResAss = true;
}
if (strcmp(argv[i], "-d") == 0) /* Process optional arguments. */
{
pDbg = true;
}
if (strcmp(argv[i], "-gen") == 0) /* Process optional arguments. */
{
useGenerator = true;
}
if (strcmp(argv[i], "-sg") == 0) /* Process optional arguments. */
{
sGPU = true;
}
if (strcmp(argv[i], "-so") == 0) /* Process optional arguments. */
{
sortP = true;
}
if (strcmp(argv[i], "-sdk") == 0) /* Process optional arguments. */
{
sdk = true;
}
if (strcmp(argv[i], "-seed") == 0) /* Process optional arguments. */
{
seed = atoi(argv[i + 1]);
}
if (strcmp(argv[i], "-mT") == 0) /* Process optional arguments. */
{
mTests = true;
minMult = atoi(argv[i + 1]);
maxMult = atoi(argv[i + 2]);
}
}
// // use command-line specified CUDA device, otherwise use device with highest Gflops/s
// if (cutCheckCmdLineFlag(argc, (const char**) argv, "device"))
// cutilDeviceInit(argc, argv);
// else
int GPU_N;
cutilSafeCall(hipGetDeviceCount(&GPU_N));
if (!niOut)
{
printf("CUDA-capable device count: %i\n", GPU_N);
}
for (int i = 0; i < GPU_N; i++)
{
hipDevice_t device;
hipDeviceGet(&device, i);
hipCtx_t ctx;
hipCtxCreate(&ctx, 0, device);
hipError_t res = cuMemGetInfo(&freeMemDevice, &totalMemDevice);
if (!niOut)
{
printStats(i, freeMemDevice, totalMemDevice);
}
}
/*
* Check memory available
*/
if (!mTests)
{
minMult = multi;
maxMult = multi + 1;
}
for (int var = minMult; var < maxMult; ++var)
{
numberOfPersons = blockSize * var;
numberOfObjects = blockSize * var;
TestResult r = runTest(numberOfPersons, numberOfObjects);
if (!assignmentOnly)
printf("%d, %f, %f, %d, %f, %f, %f, %f, %f, %f, %f, C=%d\n",
numberOfPersons, r.happiness, r.time, iterations,
tEvaluateDiff, (tEvaluateDiff / iterations), tSorting,
(tSorting / iterations), tSwitching, (tSwitching
/ iterations), (r.memoryTimer), isFeasible());
}
return 0;
}
int isFeasible() {
int* numAssignedPersons = (int *) malloc(numberOfPersons * sizeof(int));
int* numAssignedObjects = (int *) malloc(numberOfObjects * sizeof(int));
int i;
for (i = 0; i < numberOfPersons; i++)
numAssignedPersons[i] = 0;
for (i = 0; i < numberOfObjects; i++)
numAssignedObjects[i] = 0;
for (i = 0; i < numberOfPersons; i++)
{
int assignedObject = persons[i];
numAssignedObjects[assignedObject] = numAssignedObjects[assignedObject]
+ 1;
}
for (i = 0; i < numberOfObjects; i++)
{
int assignedPerson = objects[i];
numAssignedPersons[assignedPerson] = numAssignedPersons[assignedPerson]
+ 1;
}
for (i = 0; i < numberOfPersons; i++)
if (numAssignedPersons[i] > 1)
return 0;
for (i = 0; i < numberOfObjects; i++)
if (numAssignedObjects[i] > 1)
return 0;
return 1;
}
TestResult runTest(int numberOfPersons, int numberOfObjects) {
if (useGenerator)
{
if (!niOut)
{
printf("Using Euclidean Generator\n");
}
aijMatrix = genMatrix(numberOfPersons, numberOfObjects,seed);
} else
{
if (!niOut)
{
printf("Using Random Generator\n");
}
// For testing purpose only
int C = 3000;
aijMatrix = (float **) malloc(numberOfPersons * sizeof(float *));
float *aijPtr = (float *) malloc(numberOfPersons * numberOfObjects
* sizeof(float));
for (int i = 0; i < numberOfPersons; i++)
{
aijMatrix[i] = aijPtr + (i * numberOfObjects);
}
for (int i = 0; i < numberOfPersons; i++)
{
for (int j = 0; j < numberOfObjects; j++)
{
aijMatrix[i][j] = random() % C;
}
}
}
// int *initialAssignment;
// initialAssignment = (int *) malloc(numberOfPersons * sizeof(int));
// int len = 0;
// if (len > 0)
// {
// for (int i = 0; i < numberOfPersons; i++)
// {
// initialAssignment[i] = i;
// }
// }
//RUN
TestResult r = runHeuristic(aijMatrix, numberOfPersons, numberOfObjects);
return r;
}
/**
* Initialize structure on host memory
*/
void hostInit(float** aijMatrix, int numberOfPersons, int numberOfObjects) {
emptyDiff.index = -1;
emptyDiff.myAssigned = -1;
emptyDiff.bestChangeAssigned = -1;
bannedSwitches = (bool *) malloc(numberOfPersons * numberOfPersons
* sizeof(bool));
int i, j;
clearedBannedSwitches = (bool *) malloc(numberOfPersons * sizeof(bool));
persons = (int *) malloc(numberOfPersons * sizeof(int));
differences = (Difference *) malloc((numberOfPersons + numberOfObjects)
* sizeof(Difference));
rowDifferences
= (Difference *) malloc(numberOfPersons * sizeof(Difference));
for (i = 0; i < numberOfPersons; i++)
{
clearedBannedSwitches[i] = false;
persons[i] = -1;
for (j = 0; j < numberOfPersons; j++)
bannedSwitches[i * numberOfObjects + j] = false;
rowDifferences[i] = emptyDiff;
differences[i] = emptyDiff;
}
columnDifferences = (Difference *) malloc(numberOfObjects
* sizeof(Difference));
objects = (int *) malloc(numberOfObjects * sizeof(int));
for (i = 0; i < numberOfObjects; i++)
{
objects[i] = -1;
columnDifferences[i] = emptyDiff;
differences[numberOfPersons + i] = emptyDiff;
}
// printH(aijMatrix, numberOfPersons, numberOfObjects);
}
/**
* Initialize structure on video memory and upload
*/
void gpuInit(float** aijMatrix, int numberOfPersons, int numberOfObjects) {
// GPU Memory management
// allocate host memory
// allocate device memory
tPersonsSizeB = sizeof(float) * numberOfPersons;
tObjectSizeB = sizeof(float) * numberOfObjects;
tbestChangesB = sizeof(int) * (numberOfPersons + numberOfObjects);
tDiffsSizeB = sizeof(float) * (numberOfPersons + numberOfObjects);
int resetB = sizeof(bool);
tClearedBannedSwitches = sizeof(bool) * numberOfPersons;
unsigned int tAijMSize = numberOfPersons * numberOfObjects;
tAijMSizeB = sizeof(float) * tAijMSize;
tBannedSwitches = sizeof(bool) * tAijMSize;
int totalBonGpu = tPersonsSizeB + tObjectSizeB + tAijMSizeB + tDiffsSizeB
+ tBannedSwitches + tClearedBannedSwitches;
if (!niOut)
{
printf("Memory used on GPU: %d Bytes\n", totalBonGpu);
}
if (totalBonGpu > freeMemDevice)
{
printf("Warning: not enough memory available on GPU: %d Bytes\n",
freeMemDevice);
}
float* h_aijM = (float*) malloc(tAijMSizeB);
for (int i = 0; i < numberOfPersons; i++)
{
for (int j = 0; j < numberOfObjects; j++)
{
h_aijM[i * numberOfPersons + j] = aijMatrix[i][j];
// printf("%f ", h_aijM[i * numberOfPersons + j]);
}
// printf("\n ");
}
d_aijM.height = numberOfPersons;
d_aijM.width = numberOfObjects;
// Init all the diffs to null before uploading
h_Diffs = (float *) malloc(tDiffsSizeB);
h_bestChanges = (int*) malloc(tbestChangesB);
for (int k = 0; k < numberOfPersons + numberOfObjects; k++)
{
// assigned[k] = false;
h_Diffs[k] = negInf;
h_bestChanges[k] = -1;
}
//aijMatrix
cutilSafeCall(hipMalloc((void**) &d_aijM.els, tAijMSizeB));
//persons
cutilSafeCall(hipMalloc((void**) &d_pers, tPersonsSizeB));
//object
cutilSafeCall(hipMalloc((void**) &d_objs, tObjectSizeB));
// Reset flag
cutilSafeCall(hipMalloc((void**) &d_reset, resetB));
// Banned Switches
cutilSafeCall(hipMalloc((void**) &d_bannedSwitches, tBannedSwitches));
// Cleared Banned Switches
cutilSafeCall(hipMalloc((void**) &d_clearedBannedSwitches,
tClearedBannedSwitches));
// BlockSize
cutilSafeCall(hipMalloc((void**) &d_blockSize, sizeof(int)));
// copy host memory to device
cutilSafeCall(hipMemcpy(d_aijM.els, h_aijM, tAijMSizeB,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_pers, persons, tPersonsSizeB,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_objs, objects, tObjectSizeB,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_bannedSwitches, bannedSwitches, tBannedSwitches,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_clearedBannedSwitches, clearedBannedSwitches,
tClearedBannedSwitches, hipMemcpyHostToDevice));
int blockS[1];
blockS[0] = blockSize;
cutilSafeCall(hipMemcpy(d_blockSize, blockS, sizeof(int),
hipMemcpyHostToDevice));
reset = (int *) malloc(sizeof(int));
cutilSafeCall(hipMemcpy(d_reset, reset, resetB, hipMemcpyHostToDevice));
// allocate device memory for result (diff values)
cutilSafeCall(hipMalloc((void**) &d_Diffs, tDiffsSizeB));
// allocate device memory for result (bestchange values)
cutilSafeCall(hipMalloc((void**) &d_bestChanges, tbestChangesB));
cutilSafeCall(hipMemcpy(d_Diffs, h_Diffs, tDiffsSizeB,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_bestChanges, h_bestChanges, tbestChangesB,
hipMemcpyHostToDevice));
if (sdk)
{
//ALLOCATE ROOM FOR THE RESULTING DIFFERENCES
cutilSafeCall(hipMalloc((void**) &d_DiffResults, tAijMSizeB));
}
cuMemGetInfo(&freeMemDevice, &totalMemDevice);
if (!niOut)
printf("Memory after allocation: %f%% free, %f%% used\n", 100.0
* freeMemDevice / (double) totalMemDevice, 100.0
* (totalMemDevice - freeMemDevice) / (double) totalMemDevice);
}
void gpuTerninate() {
hipFree(d_aijM.els);
hipFree(d_pers);
hipFree(d_objs);
hipFree(d_Diffs);
hipFree(d_bestChanges);
hipFree(d_bannedSwitches);
hipFree(d_clearedBannedSwitches);
hipFree(d_reset);
}
unsigned int memoryTimer = 0;
TestResult runHeuristic(float** aijMatrix, int numberOfPersons,
int numberOfObjects) {
//Init
hostInit(aijMatrix, numberOfPersons, numberOfObjects);
// if (len > 0)
// smartInitialAssignmentWithInitial( initialAssignment);
// else
// smartInitialAssignment();
// enhanceBySwitching();
if (!niOut)
{
printf("Entities %d\n", numberOfPersons);
printf("Block Size %d\n", blockSize);
}
unsigned int timer = 0;
cutilCheckError(cutCreateTimer(&timer));
cutilCheckError(cutCreateTimer(&memoryTimer));
if (assignmentGpu)
{
gpuInit(aijMatrix, numberOfPersons, numberOfObjects);
if (!niOut)
{
printf("-----GPU Assignment------\n");
}
smartInitialAssignmentGPU();
}
if (assignmentCpu)
{
cutilCheckError(cutCreateTimer(&timer));
cutilCheckError(cutStartTimer(timer));
if (!niOut)
{
printf("-----CPU Assignment------\n");
}
smartInitialAssignment();
cutilCheckError(cutStopTimer(timer));
if (!niOut)
{
printf("Done Assignment\n");
printf("Processing time: %f (ms)\n", cutGetTimerValue(timer));
}
}
if (pResInit)
{
for (int i = 0; i < numberOfPersons; i++)
{
printf("aij ");
printf("%d ", i);
printf("%d ", persons[i]);
printf("%f \n", aijMatrix[i][persons[i]]);
}
}
TestResult r;
if (!assignmentOnly)
{
if (!niOut)
{
printf("Enhance By Switching ");
}
if (runGpu)
{
if (!niOut)
{
printf("on GPU\n");
}
if (!assignmentGpu)
{
gpuInit(aijMatrix, numberOfPersons, numberOfObjects);
}
} else
{
if (!niOut)
{
printf("on CPU\n");
}
}
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
// Enhance by switching
enhanceBySwitching();
cutilCheckError(cutStopTimer(timer));
float happiness = calculateTotalHappiness();
float tValue = cutGetTimerValue(timer);
if (!niOut)
{
printf("Done Enhancing\n");
printf("Processing time: %f (ms)\n", tValue);
}
// cutilCheckError(cutStopTimer(timer));
// printf("Processing time: %f (ms)\n", cutGetTimerValue(timer));
if (pResAss)
{
for (int i = 0; i < numberOfPersons; i++)
{
printf("aij ");
printf("%d ", i);
printf("%d ", persons[i]);
printf("%f \n", aijMatrix[i][persons[i]]);
}
}
r.time = tValue;
r.happiness = happiness;
float v = cutGetTimerValue(memoryTimer);
r.memoryTimer = v;
}
if (runGpu || assignmentGpu)
{
if (!niOut && !assignmentOnly)
{
printf("Cleaning GPU state\n");
}
gpuTerninate();
}
return r;
}
void gpuInit2() {
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(hipMemcpy(d_bannedSwitches, bannedSwitches, tBannedSwitches,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_clearedBannedSwitches, clearedBannedSwitches,
tClearedBannedSwitches, hipMemcpyHostToDevice));
cutilCheckError(cutStopTimer(memoryTimer));
#ifdef CUDPP
int size = numberOfPersons + numberOfObjects;
CUDPPConfiguration config;
config.algorithm = CUDPP_SORT_RADIX;
config.datatype = CUDPP_FLOAT;
config.options = CUDPP_OPTION_KEY_VALUE_PAIRS;
CUDPPResult result = cudppPlan(&scanplan, config, size, 1, 0);
if (CUDPP_SUCCESS != result)
{
string eMsg = "Error creating CUDPPPlan";
fail(eMsg);
}
indexSize = (size) * sizeof(int);
h_index = (int*) malloc(indexSize);
cutilSafeCall(hipMalloc((void**) &d_index, indexSize));
// free(h_Diffs);
// tDiffsSizeB = (numberOfPersons + numberOfObjects) * sizeof(float);
// h_Diffs = (float*) malloc(tDiffsSizeB);
// for (int var = 0; var < numberOfPersons + numberOfObjects; ++var)
// {
// h_Diffs[var] = 0.0f;
// }
// cutilSafeCall(hipMalloc((void**) &d_Diffs, tDiffsSizeB));
//
// cutilSafeCall(hipMemcpy(d_Diffs, h_Diffs, tDiffsSizeB,
// hipMemcpyHostToDevice));
differences_temp = (Difference *) malloc(
(numberOfPersons + numberOfObjects) * sizeof(Difference));
#endif
}
void evaluateDifferences() {
if (runGpu)
{
if (pDbg)
{
printf("Eval Diff Phase Start\n");
}
dim3 threads;
dim3 grid;
if (!sdk)
{
threads.x = blockSize;
threads.y = 1;
int s = numberOfPersons / blockSize + (numberOfPersons % blockSize
== 0 ? 0 : 1);
grid.x = s;
grid.y = 1;
} else
{
threads.x = blockSize;
threads.y = blockSize;
grid.x = numberOfObjects / threads.x;
grid.y = numberOfPersons / threads.y;
}
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(hipMemcpy(d_pers, persons, tPersonsSizeB,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_objs, objects, tObjectSizeB,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_bannedSwitches, bannedSwitches,
tBannedSwitches, hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_clearedBannedSwitches,
clearedBannedSwitches, tClearedBannedSwitches,
hipMemcpyHostToDevice));
cutilCheckError(cutStopTimer(memoryTimer));
if (pDbg)
{
printf("Pre-MemCpy Over\n");
}
reset[0] = 1;
cutilSafeCall(hipMemcpy(d_reset, reset, sizeof(int),
hipMemcpyHostToDevice));
unsigned int timerEDSub = 0;
cutilCheckError(cutCreateTimer(&timerEDSub));
unsigned int timerEDSubb = 0;
cutilCheckError(cutCreateTimer(&timerEDSubb));
if (sdk)
{
cutilCheckError(cutStartTimer(timerEDSub));
if (!niOut)
{
printf("use shared kernel\n");
printf("GRID %d %d TH %d %d \n", grid.y, grid.x, threads.x,
threads.y);
hipLaunchKernelGGL((}bestDiffShared) , dim3(grid),dim3(threads), 0, 0, d_Diffs, d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize,d_DiffResults);
CUT_CHECK_ERROR("bestDiff");
cutilCheckError(cutStopTimer(timerEDSub));
if (pTimer)
printf("EDSub time: %f (ms)\n", cutGetTimerValue(timerEDSub));
cutilCheckError(cutStartTimer(timerEDSubb));
threads.x = blockSize;
threads.y = 1;
int s = numberOfPersons / blockSize + (numberOfPersons % blockSize
== 0 ? 0 : 1);
grid.x = s;
grid.y = 1;
hipLaunchKernelGGL(( findMaxShared) , dim3(grid),dim3(threads), 0, 0, d_Diffs, d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize,d_DiffResults);
cutilCheckError(cutStopTimer(timerEDSubb));
if (pTimer)
printf("EDSubb time: %f (ms)\n", cutGetTimerValue(timerEDSubb));
} else
{
cutilCheckError(cutStartTimer(timerEDSub));
hipLaunchKernelGGL(( bestDiff), dim3(grid),dim3(threads), 0, 0, d_Diffs, d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize);
cutilCheckError(cutStopTimer(timerEDSub));
if (pTimer)
printf("EDSub time: %f (ms)\n", cutGetTimerValue(timerEDSub));
cutilCheckError(cutStartTimer(timerEDSubb));
hipLaunchKernelGGL(( findMax), dim3(grid),dim3(threads), 0, 0, d_Diffs, d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize);
cutilCheckError(cutStopTimer(timerEDSubb));
if (pTimer)
printf("EDSubb time: %f (ms)\n", cutGetTimerValue(timerEDSubb));
CUT_CHECK_ERROR("bestDiff");
}
hipDeviceSynchronize();
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(hipMemcpy(h_Diffs, d_Diffs, tDiffsSizeB,
hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(h_bestChanges, d_bestChanges, tbestChangesB,
hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(persons, d_pers, tPersonsSizeB,
// hipMemcpyDeviceToHost));
cutilCheckError(cutStopTimer(memoryTimer));
if (pDbg)
{
printf("Post-MemCpy Over\n");
}
for (int i = 0; i < numberOfPersons; i++)
{
//int myCol = persons[i];
float maxDiff = h_Diffs[i];
//printf("bc: %d\n",h_bestChanges[i]);
int bestChangeCol = h_bestChanges[i];
Difference curDiff;
/*if (maxDiff < 0)
maxDiff = -maxDiff;
if (maxDiff > 0.1 || myCol == -1)
{
if (bestChangeCol == -1)
{
if (clearedBannedSwitches[i])
{
persons[i] = -1;
curDiff = emptyDiff;
} else
{
clearedBannedSwitches[i] = true;
int x;
for (x = 0; x < numberOfPersons; x++)
bannedSwitches[i * numberOfObjects + x] = false;
curDiff = getRowBestDifference(i);
}
} else
{
if (myCol == -1)
maxDiff = maxDiff * 1000;
curDiff.value = maxDiff;
}
} else
{
curDiff = emptyDiff;
}*/
if (maxDiff != negInf)
{
curDiff.index = i;
curDiff.bestChange = bestChangeCol;
curDiff.type = 0;
curDiff.myAssigned = persons[i];
curDiff.bestChangeAssigned = objects[bestChangeCol];
curDiff.value = maxDiff;
differences[i] = curDiff;
rowDifferences[i] = curDiff;
//printf("ass diff[%d]=%f bc %d bca:%d ma:%d\n",i,maxDiff, bestChangeCol,curDiff.bestChangeAssigned,curDiff.myAssigned);
}
}
} else
{
for (int l = 0; l < numberOfPersons; l++)
{
// Find the best objective improvement
addRowBestDifference(l, 0);
}
}
}
void sortDifferencesGPU() {
#ifdef CUDPP
int size = numberOfPersons + numberOfObjects;
for (int var = 0; var < numberOfPersons + numberOfObjects; ++var)
{
h_index[var] = var;
if (differences[var].index == -1)
h_Diffs[var] = negInf;
else
h_Diffs[var] = differences[var].value;
}
if (pDbg)
{
printf("Pre-sort\n");
}
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(hipMemcpy(d_Diffs, h_Diffs, tDiffsSizeB,
hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpy(d_index, h_index, indexSize,
hipMemcpyHostToDevice));
cutilCheckError(cutStopTimer(memoryTimer));
// Sorting now
CUDPPResult res = cudppSort(scanplan, d_Diffs, d_index, 32, size);
if (CUDPP_SUCCESS != res)
{
string eMsg = "Error sorting on GPU\n";
fail(eMsg);
}
CUT_CHECK_ERROR("cudppRadixSort");
if (pDbg)
{
printf("Post-sort\n");
}
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(hipMemcpy(h_index, d_index, indexSize,
hipMemcpyDeviceToHost));
cutilCheckError(cutStopTimer(memoryTimer));
for (int curEntry = 0; curEntry < size; curEntry++)
{
differences_temp[curEntry] = differences[h_index[size - 1 - curEntry]];
}
for (int curEntry = 0; curEntry < size; curEntry++)
{
differences[curEntry] = differences_temp[curEntry];
}
if(strictSrt)
{
if(!niOut)
printf("Strict srt");
for (int varT = 0; varT < size; varT++)
{
if (varT == size - 1)
{
break;
}
float cv = differences[varT].value;
if (cv != 0.0f)
{
int count = 0;
for (int var1 = varT + 1; var1 < size; ++var1)
{
if (differences[var1].value == cv)
{
count++;
} else
break;
}
if (count > 0)
{
Difference ds[count+1];
int inc = varT + count;
for (int N = 0; N < count + 1; ++N)
{
ds[N] = differences[varT + N];
}
qsort(ds, count + 1, sizeof(Difference), compare_differences);
for (int N = 0; N < count + 1; ++N)
{
differences[varT + N] = ds[N];
}
varT = varT + count;
}
}
}
}
if (pDbg)
{
printf("Post-red\n");
}
#endif
}
void fail(string& s) {
cout << s;
exit(-1);
}
void smartInitialAssignmentGPU() {
unsigned int timerAGPU = 0;
cutilCheckError(cutCreateTimer(&timerAGPU));
cutilCheckError(cutStartTimer(timerAGPU));
int s = numberOfPersons / blockSize + (numberOfPersons % blockSize == 0 ? 0
: 1);
dim3 grid(s);
reset[0] = 1;
hipMemcpy(d_Diffs, h_Diffs, tDiffsSizeB, hipMemcpyHostToDevice);
hipMemcpy(d_bestChanges, h_bestChanges, tbestChangesB,
hipMemcpyHostToDevice);
unsigned int timer = 0;
cutilCheckError(cutCreateTimer(&timer));
cutilCheckError(cutResetTimer(timer));
reset[0] = 1;
while (reset[0] == 1)
{
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
reset[0] = 0;
cutilSafeCall(hipMemcpy(d_reset, reset, sizeof(bool),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( bestDiff), dim3(grid),dim3(blockSize), 0, 0, d_Diffs,d_bestChanges, d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize);
hipDeviceSynchronize();
cutilCheckError(cutStopTimer(timer));
if (pTimer)
printf("BD time: %f (ms)\n", cutGetTimerValue(timer));
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
reset[0] = 0;
cutilSafeCall(hipMemcpy(d_reset, reset, sizeof(bool),
hipMemcpyHostToDevice));
cutilCheckError(cutStopTimer(timer));
// if (pTimer)
// printf("CR time: %f (ms)\n", cutGetTimerValue(timer));
//Kernel here
// dim3 dimBlock(1,numberOfPersons);
// dim3 dimGrid(1, 1);
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
hipLaunchKernelGGL(( initialAssignment), dim3(grid),dim3(1), 0, 0, d_Diffs,d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,d_blockSize);
hipDeviceSynchronize();
cutilCheckError(cutStopTimer(timer));
if (pTimer)
printf("IA time: %f (ms)\n", cutGetTimerValue(timer));
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
hipMemcpy(reset, d_reset, sizeof(int), hipMemcpyDeviceToHost);
cutilCheckError(cutStopTimer(timer));
if (pTimer)
printf("CR time: %f (ms)\n", cutGetTimerValue(timer));
}
unsigned int timerMcpy = 0;
cutilCheckError(cutCreateTimer(&timerMcpy));
cutilCheckError(cutStartTimer(timerMcpy));
hipMemcpy(h_Diffs, d_Diffs, tDiffsSizeB, hipMemcpyDeviceToHost);
hipMemcpy(h_bestChanges, d_bestChanges, tbestChangesB,
hipMemcpyDeviceToHost);
cutilSafeCall(hipMemcpy(persons, d_pers, tPersonsSizeB,
hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(objects, d_objs, tObjectSizeB,
hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(bannedSwitches, d_bannedSwitches, tBannedSwitches,
hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(clearedBannedSwitches, d_clearedBannedSwitches,
tClearedBannedSwitches, hipMemcpyDeviceToHost));
cutilCheckError(cutStopTimer(timerMcpy));
if (pTimer)
printf("MCR time: %f (ms)\n", cutGetTimerValue(timerMcpy));
cutilCheckError(cutStopTimer(timerAGPU));
printf("Assignment time: %f (ms)\n", cutGetTimerValue(timerAGPU));
if (pOut)
{
for (int k = 0; k < numberOfPersons; k++)
{
printf("P%d O%d ->%f \n", k, persons[k], h_Diffs[k]);
}
}
}
void enhanceBySwitching() {
float newTotalHappiness, oldTotalHappiness;
// int counter = 1;
// time_t start = time(NULL);
if (sGPU)
{
if (!niOut)
printf("Sorting on GPU\n");
gpuInit2();
} else
{
if (!niOut)
printf("Sorting on CPU \n");
}
// int size = numberOfPersons + numberOfObjects;
unsigned int timerED = 0;
cutilCheckError(cutCreateTimer(&timerED));
unsigned int timerSort = 0;
cutilCheckError(cutCreateTimer(&timerSort));
unsigned int timerProc = 0;
cutilCheckError(cutCreateTimer(&timerProc));
int count = 0;
while (1)
{
// if (pDbg)
// {
if (!niOut)
{
printf("\nIteration %d\n", count++);
}
iterations++;
// }
oldTotalHappiness = calculateTotalHappiness();
cutilCheckError(cutStartTimer(timerED));
//--------
evaluateDifferences();
//--------
cutilCheckError(cutStopTimer(timerED));
if (pTimer)
printf("ED time: %f (ms)\n", cutGetTimerValue(timerED));
tEvaluateDiff += cutGetTimerValue(timerED);
cutilCheckError(cutResetTimer(timerED));
int switchedRows[2];
int switchedColumns[2];
if (sortP)
{
printf("Before sorting\n");
int count2 = 0;
for (int var = 0; var < numberOfPersons + numberOfObjects; ++var)
{
count2++;
printf("\t %d i%d->%f", var, differences[var].index,
differences[var].value);
if (count2 == 8)
{
printf("\n");
count2 = 0;
}
}
}
cutilCheckError(cutStartTimer(timerSort));
if (sGPU)
{
sortDifferencesGPU();
} else
{
qsort(differences, numberOfPersons + numberOfPersons,
sizeof(Difference), compare_differences);
}
if (sortP)
{
printf("After sorting\n");
int count2 = 0;
for (int var = 0; var < numberOfPersons + numberOfObjects; ++var)
{
count2++;
printf("\t %d i%d->%f", var, differences[var].index,
differences[var].value);
if (count2 == 8)
{
printf("\n");
count2 = 0;
}
}
}
cutilCheckError(cutStopTimer(timerSort));
if (pTimer)
printf("Srt time: %f (ms)\n", cutGetTimerValue(timerSort));
tSorting += cutGetTimerValue(timerSort);
cutilCheckError(cutResetTimer(timerSort));
int rdC = 0;
cutilCheckError(cutStartTimer(timerProc));
while (differences[0].index > 1)
{
//printf("Iteration: %d --> %f\n", rdC,differences[0].value);
rdC++;
// printf("\nDifference %d\n", differences[0].index);
Difference myDiff = differences[0];
int row1, row2, col1, col2;
// Here I need to retrieve the 2 columns and 2 rows that will be
// altered due to switch ...
// to not reUpdate all the differences in the Tree
float diffCheck;
if (myDiff.type == 0)
{ // Found in row..i.e. switching happens
//printf("Switching on row: \n");
// along columns
row1 = myDiff.index; // index of row of the difference
//printf("index: \n");
col1 = persons[row1]; // index of column of the chosen
// cell in the row of difference
col2 = myDiff.bestChange; // index of column of the best
//printf("bc: \n");
// cell in the row of difference
row2 = objects[col2]; // index of row of the chosen in the
// column of the best cell in the
// difference row
//printf("ma %d, bca %d",myDiff.myAssigned,myDiff.bestChangeAssigned);
if (col1 != myDiff.myAssigned || row2
!= myDiff.bestChangeAssigned)
{
diffCheck = -1.0;
} else if (row2 == -1)
{
diffCheck = aijMatrix[row1][col2] - aijMatrix[row1][col1];
} else
{
diffCheck = aijMatrix[row1][col2] + aijMatrix[row2][col1]
- (aijMatrix[row1][col1] + aijMatrix[row2][col2]);
}
} else
{
//printf("Switching on column: \n");
col1 = myDiff.index; // index of column of the difference
row1 = objects[col1]; // index of row of the chosen cell
// in the column of difference
row2 = myDiff.bestChange; // index of row of the best cell
// in the column of difference
col2 = persons[row2]; // index of column of the chosen in
// the row of the best cell in the
// difference column
if (row1 != myDiff.myAssigned || col2
!= myDiff.bestChangeAssigned)
diffCheck = -1.0f;
else
diffCheck = aijMatrix[row1][col2] + aijMatrix[row2][col1]
- (aijMatrix[row1][col1] + aijMatrix[row2][col2]);
}
//printf("DiffCheck: \n");
// We need to check that our previous calculation still holds
// It may not due to second order effects
if (diffCheck <= 0)
{
if (myDiff.type == 0)
rowDifferences[myDiff.index] = emptyDiff;
else
columnDifferences[myDiff.index] = emptyDiff;
removeDifference(0);
continue;
}
// System.out.println("Happiness before switch:
// "+calculateTotalHappiness());
// So now we switch rows and columns
persons[row1] = col2;
if (row2 != -1)
{
// if (col1 == -1)
// bannedSwitches[row1].add(row2);
persons[row2] = col1;
}
// if (col1 != -1)
objects[col1] = row2;
objects[col2] = row1;
// if (col1 == -1 && row2 == -1)
// return;
// System.out.println("Happiness after switch:
// "+calculateTotalHappiness());
// Now we update the modified rows and columns
switchedRows[0] = row1;
switchedRows[1] = row2;
switchedColumns[0] = col1;
switchedColumns[1] = col2;
int i;
for (i = 0; i < 2; i++)
{
if (columnDifferences[switchedColumns[i]].index != -1)
{
Difference toRemove = columnDifferences[switchedColumns[i]];
int z;
for (z = 1; z < numberOfObjects + numberOfPersons; z++)
{
Difference toCheck = differences[z];
if (toCheck.index == -1)
break;
if (toCheck.index == toRemove.index && toCheck.type
== toRemove.type)
{
removeDifference(z);
break;
}
}
columnDifferences[switchedColumns[i]] = emptyDiff;
}
addColBestDifference(switchedColumns[i], 1);
}
for (i = 0; i < 2; i++)
{
if (rowDifferences[switchedRows[i]].index != -1)
{
Difference toRemove = rowDifferences[switchedRows[i]];
int z;
for (z = 1; z < numberOfObjects + numberOfPersons; z++)
{
Difference toCheck = differences[z];
if (toCheck.index == -1)
break;
if (toCheck.index == toRemove.index && toCheck.type
== toRemove.type)
{
removeDifference(z);
break;
}
}
rowDifferences[switchedRows[i]] = emptyDiff;
}
addRowBestDifference(switchedRows[i], 1);
}
// if (time(NULL) - start > initializationTimeLim) {
// break;
// }
}
cutilCheckError(cutStopTimer(timerProc));
if (pTimer)
printf("PC time for %d rounds: %f (ms)\n", rdC, cutGetTimerValue(
timerProc));
tSwitching += cutGetTimerValue(timerProc);
cutilCheckError(cutResetTimer(timerProc));
// System.out.println("Total Happiness " +
// calculateTotalHappiness());
newTotalHappiness = calculateTotalHappiness();
if (newTotalHappiness == oldTotalHappiness)
{
if (!niOut)
{
printf("Finished\n");
}
break;
} // || (SimulableSystem.currentTimeMillis() - start) > Conf.heuristicMaxTime
}
}
void smartInitialAssignment() {
int row1, curRow, col2, i;
for (i = 0; i < numberOfPersons; i++)
{
curRow = i;
while (curRow != -1)
{
Difference myDiff = getRowBestDifference(curRow);
if (myDiff.index != -1)
{
row1 = myDiff.index; // index of row of the difference
col2 = myDiff.bestChange; // index of column of the best
// cell in the row of difference
curRow = objects[col2]; // index of row of the chosen in the
// column of the best cell in the
// difference row
persons[row1] = col2;
objects[col2] = row1;
if (curRow != -1)
{
persons[curRow] = -1;
bannedSwitches[row1 * numberOfObjects + curRow] = true;
}
// if (pRes)
// printf("P%d O%d ->%f \n", row1, col2, myDiff.value);
}
}
}
}
//void smartInitialAssignmentWithInitial(int initialAssignmet[])
//{
// int row1, curRow, col2, i;
// // time_t start = time(NULL);
// for (i = 0; i < numberOfPersons; i++)
// {
// if (initialAssignmet[i] != -1 && objects[initialAssignmet[i]] == -1
// && aijMatrix[i][initialAssignmet[i]] != negInf)
// {
// persons[i] = initialAssignmet[i];
// objects[initialAssignmet[i]] = i;
// } else
// {
// curRow = i;
// while (curRow != -1)
// {
// Difference myDiff = getRowBestDifference(curRow);
// if (myDiff.index != -1)
// {
// row1 = myDiff.index; // index of row of the
// // difference
// col2 = myDiff.bestChange; // index of column of the
// // best cell in the row of
// // difference
// curRow = objects[col2]; // index of row of the chosen in
// // the column of the best cell
// // in the difference row
// persons[row1] = col2;
// objects[col2] = row1;
// if (curRow != -1)
// {
// persons[curRow] = -1;
// bannedSwitches[row1][curRow] = 1;
// }
// }
// }
// // if (time(NULL) - start > initializationTimeLim) {
// // fastInitialAssignment();
// // break;
// // }
// }
// }
//}
//void addRowBestDifference(int rowId, int sort)
//{
// Difference myDifference = getRowBestDifference(rowId);
// if (myDifference.index != -1)
// {
// myDifference.myAssigned = persons[rowId];
// myDifference.bestChangeAssigned = objects[myDifference.bestChange];
// if (sort == 0)
// {
// differences[rowId] = myDifference;
// } else
// {
// addSortedDifference(myDifference);
// }
// rowDifferences[rowId] = myDifference;
// }
//}
//JNIEXPORT jobjectArray JNICALL Java_se_peertv_opto_ImprovedHeuristic_JniHeuristic_heuristic
//(JNIEnv *env, jobject self,
// jobjectArray aijMatrixCp, int numberOfPersonsCp, int numberOfObjectsCp, jintArray initialAssignments) {
// // for test ...
// int C = 300;
// numberOfPersons = 100;
// numberOfObjects = 100;
// emptyDiff.index = -1;
// float *aijPtr;
// aijPtr = malloc(numberOfPersons * numberOfObjects * sizeof(float));
// int *bannedSwitchesPtr;
// bannedSwitchesPtr = malloc(numberOfPersons * numberOfObjects * sizeof(int));
// aijMatrix = malloc(numberOfPersons * sizeof(float *));
// bannedSwitches = malloc(numberOfPersons * sizeof(int *));
// int i,j;
//
// for (i = 0; i < numberOfPersons; i++) {
// aijMatrix[i] = aijPtr + (i*numberOfObjects);
// bannedSwitches[i] = bannedSwitchesPtr + (i*numberOfObjects);
// }
//
// for (i = 0; i < numberOfPersons; i++) {
// for (j = 0; j < numberOfObjects; j++) {
// aijMatrix[i][j] = random() % C;
// //printf("j has the value %f and is stored at %p\n", aijMatrix[i][j], (void *)&aijMatrix[i][j]);
// //printf("%f jjj ",aijMatrix[i][j]);
// }
// }s
// int *initialAssignment;
// initialAssignment = malloc(numberOfPersons * sizeof(int));
// int len = 0;
// if (len > 0) {
// for(i = 0; i < numberOfPersons; i++)
// {
// initialAssignment[i] = i;
// }
// }
// clearedBannedSwitches = malloc(numberOfPersons * sizeof(int));
// persons = malloc(numberOfPersons * sizeof(int));
// differences = malloc((numberOfPersons + numberOfObjects) * sizeof(Difference));
// rowDifferences = malloc(numberOfPersons * sizeof(Difference));
// for (i = 0; i < numberOfPersons; i++) {
// clearedBannedSwitches[i] = 0;
// persons[i] = -1;
// for (j = 0; j < numberOfPersons; j++)
// bannedSwitches[i][j] = -1;
// rowDifferences[i] = emptyDiff;
// differences[i] = emptyDiff;
// }
//
// columnDifferences = malloc(numberOfObjects * sizeof(Difference));
// objects = malloc(numberOfObjects * sizeof (int));
// for (i = 0; i < numberOfObjects; i++) {
// objects[i] = -1;
// columnDifferences[i] = emptyDiff;
// differences[numberOfPersons + i] = emptyDiff;
// }
//
// if (len > 0)
// smartInitialAssignmentWithInitial(initialAssignment);
// else
// smartInitialAssignment();
// enhanceBySwitching();
// for (i = 0; i < numberOfPersons; i++) {
// printf("aij ");
// printf("%d ",i);
// printf("%d ",persons[i]);
// printf("%f \n",aijMatrix[i][persons[i]]);
// }
// return 0;
// jintArray r= (*env)->NewIntArray(env, numberOfPersons);
// //for (i = 0; i < numberOfPersons; i++)
// // (*env)->SetObjectArrayElement(env, r,i,persons[i]);
// return r;
//}
void printH(float** aijMatrix, int numberOfPersons, int numberOfObjects) {
for (int i = 0; i < numberOfPersons; i++)
{
for (int j = 0; j < numberOfObjects; j++)
{
printf("%f ", aijMatrix[i][j]);
//printf("j has the value %f and is stored at %p\n", aijMatrix[i][j], (void *)&aijMatrix[i][j]);
//printf("%f jjj ",aijMatrix[i][j]);
}
printf("\n");
}
}
void printG(float* h_A, int numberOfPersons, int numberOfObjects) {
for (int i = 0; i < numberOfPersons; i++)
{
for (int j = 0; j < numberOfObjects; j++)
{
printf("%f ", h_A[i * numberOfPersons + j]);
//printf("j has the value %f and is stored at %p\n", aijMatrix[i][j], (void *)&aijMatrix[i][j]);
//printf("%f jjj ",aijMatrix[i][j]);
}
printf("\n");
}
}
void listCudaDevice() {
int deviceCount;
hipGetDeviceCount(&deviceCount);
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0)
{
// This function call returns 9999 for both major & minor fields, if no CUDA capable devices are present
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" CUDA Capability Major revision number: %d\n",
deviceProp.major);
printf(" CUDA Capability Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(
" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(
" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
// printf(" Texture alignment: %u bytes\n",
// deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No");
#endif
#if CUDART_VERSION >= 2020
printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated: %s\n", deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Compute mode: %s\n", deviceProp.computeMode == hipComputeModeDefault ?
"Default (multiple host threads can use this device simultaneously)" :
deviceProp.computeMode == hipComputeModeExclusive ?
"Exclusive (only one host thread at a time can use this device)" :
deviceProp.computeMode == hipComputeModeProhibited ?
"Prohibited (no host thread can use this device)" :
"Unknown");
#endif
}
}
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(-1);
}
}
| dccfc8c972524c6db6f030d39a48b931a7d2b491.cu | /*
* AssignmentEngine.cu
* heuristic CUDA
*
* Created by Roberto Roverso on 25/08/09.
* Copyright 2009 Peerialism. All rights reserved.
*
*/
// includes, system
#include <stdio.h>
#include <assert.h>
//#include <jni.h>
#include "Global.h"
#ifdef MAC
#include "sys/malloc.h" // mac os x
#else
#include "malloc.h" // linux, windows
#endif
#include <stdlib.h>
#include <iostream>
#include <string>
using namespace std;
//CUDA imports
#include <cuda.h>
#include <cutil_inline.h>
#include <cuda_runtime_api.h>
#include <cutil.h>
#ifdef CUDPP
#include <cudpp/cudpp.h>
#endif CUDPP
// Include C files
#include "AssignmentEngine.h"
#include "Generator.h"
// include kernels
#include "BestDiffKernelShared.cu"
#include "BestDiffKernelGlobal.cu"
#include "InitAssignmentKernel.cu"
#define BLOCK_SIZE 16
#define DEFAULT_MULTI 8
typedef struct {
float happiness;
float time;
float memoryTimer;
} TestResult;
// CUDA related
void checkCUDAError(const char *msg);
void listCudaDevice();
// CUDA Kernels
__global__ void initialAssignment(float* diffs, int* bestChanges, AijMatrix A,
int* persons, int* objects, bool* bannedSwitches,
bool* clearedBannedSwitches, int* reset, int* blockSize);
__global__ void bestDiff(float* diffs, int* bestChanges, AijMatrix A,
int* persons, int* objects, bool* bannedSwitches,
bool* clearedBannedSwitches, int* reset, int blockSize);
__global__ void
findMax(float* diffs, int* bestChanges, AijMatrix A, int* persons,
int* objects, bool* bannedSwitches, bool* clearedBannedSwitches,
int* reset, int blockSize);
__global__ void
findMaxShared(float* diffs, int* bestChanges, AijMatrix A, int* persons,
int* objects, bool* bannedSwitches, bool* clearedBannedSwitches,
int* reset, int blockSize, float* resDiffs);
__global__ void
bestDiffShared(float* diffs, int* bestChanges, AijMatrix A, int* persons,
int* objects, bool* bannedSwitches, bool* clearedBannedSwitches,
int* reset, int blockSize, float* resDiffs);
__global__ void calculateHappiness(AijMatrix A, int* persons,
int numberOfPersons);
// Host
TestResult runTest(int, int);
void hostInit(float** aijMatrix, int numberOfPersons, int numberOfObjects);
void gpuInit(float** aijMatrix, int numberOfPersons, int numberOfObjects);
void gpuInit2();
void fail(string&);
TestResult runHeuristic(float** aijMatrix, int numberOfPersons,
int numberOfObjects);
void smartInitialAssignment();
void smartInitialAssignmentGPU();
void enhanceBySwitching();
void evaluateDifferences();
void sortDifferencesGPU();
int isFeasible();
void gpuTerninate();
//Utility functions
void printH(float** aijMatrix, int numberOfPersons, int numberOfObjects);
void printG(float* aijMatrix, int numberOfPersons, int numberOfObjects);
// Constants
int initializationTimeLim = 10; // time lim for the initialization in seconds ...
float negInf = -9999;
// Variables on Host
float** aijMatrix;
int* persons;
int* objects;
bool* bannedSwitches;
bool* clearedBannedSwitches;
Difference* differences;
Difference* differences_temp;
Difference* columnDifferences;
Difference* rowDifferences;
Difference emptyDiff;
int numberOfPersons, numberOfObjects;
int* reset;
float* h_Diffs;
int* h_bestChanges;
// Variables on GPU
unsigned int tAijMSizeB;
unsigned int tPersonsSizeB;
unsigned int tObjectSizeB;
unsigned int tbestChangesB;
unsigned int tDiffsSizeB;
unsigned int tBannedSwitches;
unsigned int tClearedBannedSwitches;
unsigned int indexSize;
AijMatrix d_aijM;
int* d_pers;
int* d_objs;
Difference d_emptyDiff;
float* d_Diffs;
int* d_bestChanges;
int* d_reset;
bool* d_bannedSwitches;
bool* d_clearedBannedSwitches;
int* d_blockSize;
int* h_index;
int* d_index;
float* d_DiffResults;
unsigned int freeMemDevice, totalMemDevice;
#ifdef CUDPP
CUDPPHandle scanplan = 0;
#endif
// Run option flags
bool useGenerator = false;
bool runCpu = false;
bool runGpu = true;
bool assignmentGpu = false;
bool assignmentCpu = true;
bool assignmentOnly = false;
bool niOut = false;
bool pOut = false;
bool pResInit = false;
bool pResAss = false;
bool pDbg = false;
bool pTimer = false;
bool mTests = false;
bool sGPU = false;
bool sortP = false;
bool sdk = false;
bool strictSrt = true;
// Timers for benchmarking
float tEvaluateDiff = 0.0f;
float tSorting = 0.0f;
float tSwitching = 0.0f;
float tMemory = 0.0f;
int iterations = 0;
int seed = 7;
int minMult = 0;
int blockSize = BLOCK_SIZE;
int maxMult = 10;
int multi = DEFAULT_MULTI;
static unsigned long inKB(unsigned long bytes) {
return bytes / 1024;
}
static unsigned long inMB(unsigned long bytes) {
return bytes / (1024 * 1024);
}
static void printStats(CUdevice dev, unsigned long free, unsigned long total) {
#if CUDART_VERSION < 2020
#error "This CUDART version does not support mapped memory!\n"
#endif
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Chosen GPU Device %d: \"%s\"\n", dev, deviceProp.name);
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n", 8
* deviceProp.multiProcessorCount);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
printf(" Can Map Host Memory: %s \n",
(deviceProp.canMapHostMemory) ? "true" : "false");
printf(" Free Mem: %lu bytes (%lu KB) (%lu MB)\n", free,
inKB(free), inMB(free));
printf(" Total Mem: %lu bytes (%lu KB) (%lu MB)\n", total, inKB(
total), inMB(total));
//checkCUDAError("cudaGetDeviceProperties");
if (!deviceProp.canMapHostMemory)
{
fprintf(stderr, "Device %d cannot map host memory!\n", 0);
exit( EXIT_FAILURE);
}
// printf("%f%% free, %f%% used\n", 100.0 * free / (double) total, 100.0
// * (total - free) / (double) total);
}
// Main
int main(int argc, char** argv) {
for (int i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-l") == 0) /* Process optional arguments. */
{
listCudaDevice();
return 0;
}
if (strcmp(argv[i], "-c") == 0) /* Process optional arguments. */
{
runCpu = true;
runGpu = false;
}
if (strcmp(argv[i], "-ni") == 0) /* Process optional arguments. */
{
niOut = true;
}
if (strcmp(argv[i], "-t") == 0) /* Process optional arguments. */
{
pTimer = true;
}
if (strcmp(argv[i], "-ag") == 0) /* Process optional arguments. */
{
assignmentGpu = true;
assignmentCpu = false;
}
if (strcmp(argv[i], "-cg") == 0) /* Process optional arguments. */
{
runCpu = true;
}
if (strcmp(argv[i], "-b") == 0) /* Process optional arguments. */
{
blockSize = atoi(argv[i + 1]);
}
if (strcmp(argv[i], "-ao") == 0) /* Process optional arguments. */
{
assignmentOnly = true;
}
if (strcmp(argv[i], "-ssd") == 0) /* Process optional arguments. */
{
strictSrt = false;
}
if (strcmp(argv[i], "-m") == 0) /* Process optional arguments. */
{
multi = atoi(argv[i + 1]);
}
if (strcmp(argv[i], "-p") == 0) /* Process optional arguments. */
{
pOut = true;
}
if (strcmp(argv[i], "-ri") == 0) /* Process optional arguments. */
{
pResInit = true;
}
if (strcmp(argv[i], "-ra") == 0) /* Process optional arguments. */
{
pResAss = true;
}
if (strcmp(argv[i], "-d") == 0) /* Process optional arguments. */
{
pDbg = true;
}
if (strcmp(argv[i], "-gen") == 0) /* Process optional arguments. */
{
useGenerator = true;
}
if (strcmp(argv[i], "-sg") == 0) /* Process optional arguments. */
{
sGPU = true;
}
if (strcmp(argv[i], "-so") == 0) /* Process optional arguments. */
{
sortP = true;
}
if (strcmp(argv[i], "-sdk") == 0) /* Process optional arguments. */
{
sdk = true;
}
if (strcmp(argv[i], "-seed") == 0) /* Process optional arguments. */
{
seed = atoi(argv[i + 1]);
}
if (strcmp(argv[i], "-mT") == 0) /* Process optional arguments. */
{
mTests = true;
minMult = atoi(argv[i + 1]);
maxMult = atoi(argv[i + 2]);
}
}
// // use command-line specified CUDA device, otherwise use device with highest Gflops/s
// if (cutCheckCmdLineFlag(argc, (const char**) argv, "device"))
// cutilDeviceInit(argc, argv);
// else
int GPU_N;
cutilSafeCall(cudaGetDeviceCount(&GPU_N));
if (!niOut)
{
printf("CUDA-capable device count: %i\n", GPU_N);
}
for (int i = 0; i < GPU_N; i++)
{
CUdevice device;
cuDeviceGet(&device, i);
CUcontext ctx;
cuCtxCreate(&ctx, 0, device);
CUresult res = cuMemGetInfo(&freeMemDevice, &totalMemDevice);
if (!niOut)
{
printStats(i, freeMemDevice, totalMemDevice);
}
}
/*
* Check memory available
*/
if (!mTests)
{
minMult = multi;
maxMult = multi + 1;
}
for (int var = minMult; var < maxMult; ++var)
{
numberOfPersons = blockSize * var;
numberOfObjects = blockSize * var;
TestResult r = runTest(numberOfPersons, numberOfObjects);
if (!assignmentOnly)
printf("%d, %f, %f, %d, %f, %f, %f, %f, %f, %f, %f, C=%d\n",
numberOfPersons, r.happiness, r.time, iterations,
tEvaluateDiff, (tEvaluateDiff / iterations), tSorting,
(tSorting / iterations), tSwitching, (tSwitching
/ iterations), (r.memoryTimer), isFeasible());
}
return 0;
}
int isFeasible() {
int* numAssignedPersons = (int *) malloc(numberOfPersons * sizeof(int));
int* numAssignedObjects = (int *) malloc(numberOfObjects * sizeof(int));
int i;
for (i = 0; i < numberOfPersons; i++)
numAssignedPersons[i] = 0;
for (i = 0; i < numberOfObjects; i++)
numAssignedObjects[i] = 0;
for (i = 0; i < numberOfPersons; i++)
{
int assignedObject = persons[i];
numAssignedObjects[assignedObject] = numAssignedObjects[assignedObject]
+ 1;
}
for (i = 0; i < numberOfObjects; i++)
{
int assignedPerson = objects[i];
numAssignedPersons[assignedPerson] = numAssignedPersons[assignedPerson]
+ 1;
}
for (i = 0; i < numberOfPersons; i++)
if (numAssignedPersons[i] > 1)
return 0;
for (i = 0; i < numberOfObjects; i++)
if (numAssignedObjects[i] > 1)
return 0;
return 1;
}
TestResult runTest(int numberOfPersons, int numberOfObjects) {
if (useGenerator)
{
if (!niOut)
{
printf("Using Euclidean Generator\n");
}
aijMatrix = genMatrix(numberOfPersons, numberOfObjects,seed);
} else
{
if (!niOut)
{
printf("Using Random Generator\n");
}
// For testing purpose only
int C = 3000;
aijMatrix = (float **) malloc(numberOfPersons * sizeof(float *));
float *aijPtr = (float *) malloc(numberOfPersons * numberOfObjects
* sizeof(float));
for (int i = 0; i < numberOfPersons; i++)
{
aijMatrix[i] = aijPtr + (i * numberOfObjects);
}
for (int i = 0; i < numberOfPersons; i++)
{
for (int j = 0; j < numberOfObjects; j++)
{
aijMatrix[i][j] = random() % C;
}
}
}
// int *initialAssignment;
// initialAssignment = (int *) malloc(numberOfPersons * sizeof(int));
// int len = 0;
// if (len > 0)
// {
// for (int i = 0; i < numberOfPersons; i++)
// {
// initialAssignment[i] = i;
// }
// }
//RUN
TestResult r = runHeuristic(aijMatrix, numberOfPersons, numberOfObjects);
return r;
}
/**
* Initialize structure on host memory
*/
void hostInit(float** aijMatrix, int numberOfPersons, int numberOfObjects) {
emptyDiff.index = -1;
emptyDiff.myAssigned = -1;
emptyDiff.bestChangeAssigned = -1;
bannedSwitches = (bool *) malloc(numberOfPersons * numberOfPersons
* sizeof(bool));
int i, j;
clearedBannedSwitches = (bool *) malloc(numberOfPersons * sizeof(bool));
persons = (int *) malloc(numberOfPersons * sizeof(int));
differences = (Difference *) malloc((numberOfPersons + numberOfObjects)
* sizeof(Difference));
rowDifferences
= (Difference *) malloc(numberOfPersons * sizeof(Difference));
for (i = 0; i < numberOfPersons; i++)
{
clearedBannedSwitches[i] = false;
persons[i] = -1;
for (j = 0; j < numberOfPersons; j++)
bannedSwitches[i * numberOfObjects + j] = false;
rowDifferences[i] = emptyDiff;
differences[i] = emptyDiff;
}
columnDifferences = (Difference *) malloc(numberOfObjects
* sizeof(Difference));
objects = (int *) malloc(numberOfObjects * sizeof(int));
for (i = 0; i < numberOfObjects; i++)
{
objects[i] = -1;
columnDifferences[i] = emptyDiff;
differences[numberOfPersons + i] = emptyDiff;
}
// printH(aijMatrix, numberOfPersons, numberOfObjects);
}
/**
* Initialize structure on video memory and upload
*/
void gpuInit(float** aijMatrix, int numberOfPersons, int numberOfObjects) {
// GPU Memory management
// allocate host memory
// allocate device memory
tPersonsSizeB = sizeof(float) * numberOfPersons;
tObjectSizeB = sizeof(float) * numberOfObjects;
tbestChangesB = sizeof(int) * (numberOfPersons + numberOfObjects);
tDiffsSizeB = sizeof(float) * (numberOfPersons + numberOfObjects);
int resetB = sizeof(bool);
tClearedBannedSwitches = sizeof(bool) * numberOfPersons;
unsigned int tAijMSize = numberOfPersons * numberOfObjects;
tAijMSizeB = sizeof(float) * tAijMSize;
tBannedSwitches = sizeof(bool) * tAijMSize;
int totalBonGpu = tPersonsSizeB + tObjectSizeB + tAijMSizeB + tDiffsSizeB
+ tBannedSwitches + tClearedBannedSwitches;
if (!niOut)
{
printf("Memory used on GPU: %d Bytes\n", totalBonGpu);
}
if (totalBonGpu > freeMemDevice)
{
printf("Warning: not enough memory available on GPU: %d Bytes\n",
freeMemDevice);
}
float* h_aijM = (float*) malloc(tAijMSizeB);
for (int i = 0; i < numberOfPersons; i++)
{
for (int j = 0; j < numberOfObjects; j++)
{
h_aijM[i * numberOfPersons + j] = aijMatrix[i][j];
// printf("%f ", h_aijM[i * numberOfPersons + j]);
}
// printf("\n ");
}
d_aijM.height = numberOfPersons;
d_aijM.width = numberOfObjects;
// Init all the diffs to null before uploading
h_Diffs = (float *) malloc(tDiffsSizeB);
h_bestChanges = (int*) malloc(tbestChangesB);
for (int k = 0; k < numberOfPersons + numberOfObjects; k++)
{
// assigned[k] = false;
h_Diffs[k] = negInf;
h_bestChanges[k] = -1;
}
//aijMatrix
cutilSafeCall(cudaMalloc((void**) &d_aijM.els, tAijMSizeB));
//persons
cutilSafeCall(cudaMalloc((void**) &d_pers, tPersonsSizeB));
//object
cutilSafeCall(cudaMalloc((void**) &d_objs, tObjectSizeB));
// Reset flag
cutilSafeCall(cudaMalloc((void**) &d_reset, resetB));
// Banned Switches
cutilSafeCall(cudaMalloc((void**) &d_bannedSwitches, tBannedSwitches));
// Cleared Banned Switches
cutilSafeCall(cudaMalloc((void**) &d_clearedBannedSwitches,
tClearedBannedSwitches));
// BlockSize
cutilSafeCall(cudaMalloc((void**) &d_blockSize, sizeof(int)));
// copy host memory to device
cutilSafeCall(cudaMemcpy(d_aijM.els, h_aijM, tAijMSizeB,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_pers, persons, tPersonsSizeB,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_objs, objects, tObjectSizeB,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_bannedSwitches, bannedSwitches, tBannedSwitches,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_clearedBannedSwitches, clearedBannedSwitches,
tClearedBannedSwitches, cudaMemcpyHostToDevice));
int blockS[1];
blockS[0] = blockSize;
cutilSafeCall(cudaMemcpy(d_blockSize, blockS, sizeof(int),
cudaMemcpyHostToDevice));
reset = (int *) malloc(sizeof(int));
cutilSafeCall(cudaMemcpy(d_reset, reset, resetB, cudaMemcpyHostToDevice));
// allocate device memory for result (diff values)
cutilSafeCall(cudaMalloc((void**) &d_Diffs, tDiffsSizeB));
// allocate device memory for result (bestchange values)
cutilSafeCall(cudaMalloc((void**) &d_bestChanges, tbestChangesB));
cutilSafeCall(cudaMemcpy(d_Diffs, h_Diffs, tDiffsSizeB,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_bestChanges, h_bestChanges, tbestChangesB,
cudaMemcpyHostToDevice));
if (sdk)
{
//ALLOCATE ROOM FOR THE RESULTING DIFFERENCES
cutilSafeCall(cudaMalloc((void**) &d_DiffResults, tAijMSizeB));
}
cuMemGetInfo(&freeMemDevice, &totalMemDevice);
if (!niOut)
printf("Memory after allocation: %f%% free, %f%% used\n", 100.0
* freeMemDevice / (double) totalMemDevice, 100.0
* (totalMemDevice - freeMemDevice) / (double) totalMemDevice);
}
void gpuTerninate() {
cudaFree(d_aijM.els);
cudaFree(d_pers);
cudaFree(d_objs);
cudaFree(d_Diffs);
cudaFree(d_bestChanges);
cudaFree(d_bannedSwitches);
cudaFree(d_clearedBannedSwitches);
cudaFree(d_reset);
}
unsigned int memoryTimer = 0;
TestResult runHeuristic(float** aijMatrix, int numberOfPersons,
int numberOfObjects) {
//Init
hostInit(aijMatrix, numberOfPersons, numberOfObjects);
// if (len > 0)
// smartInitialAssignmentWithInitial( initialAssignment);
// else
// smartInitialAssignment();
// enhanceBySwitching();
if (!niOut)
{
printf("Entities %d\n", numberOfPersons);
printf("Block Size %d\n", blockSize);
}
unsigned int timer = 0;
cutilCheckError(cutCreateTimer(&timer));
cutilCheckError(cutCreateTimer(&memoryTimer));
if (assignmentGpu)
{
gpuInit(aijMatrix, numberOfPersons, numberOfObjects);
if (!niOut)
{
printf("-----GPU Assignment------\n");
}
smartInitialAssignmentGPU();
}
if (assignmentCpu)
{
cutilCheckError(cutCreateTimer(&timer));
cutilCheckError(cutStartTimer(timer));
if (!niOut)
{
printf("-----CPU Assignment------\n");
}
smartInitialAssignment();
cutilCheckError(cutStopTimer(timer));
if (!niOut)
{
printf("Done Assignment\n");
printf("Processing time: %f (ms)\n", cutGetTimerValue(timer));
}
}
if (pResInit)
{
for (int i = 0; i < numberOfPersons; i++)
{
printf("aij ");
printf("%d ", i);
printf("%d ", persons[i]);
printf("%f \n", aijMatrix[i][persons[i]]);
}
}
TestResult r;
if (!assignmentOnly)
{
if (!niOut)
{
printf("Enhance By Switching ");
}
if (runGpu)
{
if (!niOut)
{
printf("on GPU\n");
}
if (!assignmentGpu)
{
gpuInit(aijMatrix, numberOfPersons, numberOfObjects);
}
} else
{
if (!niOut)
{
printf("on CPU\n");
}
}
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
// Enhance by switching
enhanceBySwitching();
cutilCheckError(cutStopTimer(timer));
float happiness = calculateTotalHappiness();
float tValue = cutGetTimerValue(timer);
if (!niOut)
{
printf("Done Enhancing\n");
printf("Processing time: %f (ms)\n", tValue);
}
// cutilCheckError(cutStopTimer(timer));
// printf("Processing time: %f (ms)\n", cutGetTimerValue(timer));
if (pResAss)
{
for (int i = 0; i < numberOfPersons; i++)
{
printf("aij ");
printf("%d ", i);
printf("%d ", persons[i]);
printf("%f \n", aijMatrix[i][persons[i]]);
}
}
r.time = tValue;
r.happiness = happiness;
float v = cutGetTimerValue(memoryTimer);
r.memoryTimer = v;
}
if (runGpu || assignmentGpu)
{
if (!niOut && !assignmentOnly)
{
printf("Cleaning GPU state\n");
}
gpuTerninate();
}
return r;
}
void gpuInit2() {
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(cudaMemcpy(d_bannedSwitches, bannedSwitches, tBannedSwitches,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_clearedBannedSwitches, clearedBannedSwitches,
tClearedBannedSwitches, cudaMemcpyHostToDevice));
cutilCheckError(cutStopTimer(memoryTimer));
#ifdef CUDPP
int size = numberOfPersons + numberOfObjects;
CUDPPConfiguration config;
config.algorithm = CUDPP_SORT_RADIX;
config.datatype = CUDPP_FLOAT;
config.options = CUDPP_OPTION_KEY_VALUE_PAIRS;
CUDPPResult result = cudppPlan(&scanplan, config, size, 1, 0);
if (CUDPP_SUCCESS != result)
{
string eMsg = "Error creating CUDPPPlan";
fail(eMsg);
}
indexSize = (size) * sizeof(int);
h_index = (int*) malloc(indexSize);
cutilSafeCall(cudaMalloc((void**) &d_index, indexSize));
// free(h_Diffs);
// tDiffsSizeB = (numberOfPersons + numberOfObjects) * sizeof(float);
// h_Diffs = (float*) malloc(tDiffsSizeB);
// for (int var = 0; var < numberOfPersons + numberOfObjects; ++var)
// {
// h_Diffs[var] = 0.0f;
// }
// cutilSafeCall(cudaMalloc((void**) &d_Diffs, tDiffsSizeB));
//
// cutilSafeCall(cudaMemcpy(d_Diffs, h_Diffs, tDiffsSizeB,
// cudaMemcpyHostToDevice));
differences_temp = (Difference *) malloc(
(numberOfPersons + numberOfObjects) * sizeof(Difference));
#endif
}
void evaluateDifferences() {
if (runGpu)
{
if (pDbg)
{
printf("Eval Diff Phase Start\n");
}
dim3 threads;
dim3 grid;
if (!sdk)
{
threads.x = blockSize;
threads.y = 1;
int s = numberOfPersons / blockSize + (numberOfPersons % blockSize
== 0 ? 0 : 1);
grid.x = s;
grid.y = 1;
} else
{
threads.x = blockSize;
threads.y = blockSize;
grid.x = numberOfObjects / threads.x;
grid.y = numberOfPersons / threads.y;
}
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(cudaMemcpy(d_pers, persons, tPersonsSizeB,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_objs, objects, tObjectSizeB,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_bannedSwitches, bannedSwitches,
tBannedSwitches, cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_clearedBannedSwitches,
clearedBannedSwitches, tClearedBannedSwitches,
cudaMemcpyHostToDevice));
cutilCheckError(cutStopTimer(memoryTimer));
if (pDbg)
{
printf("Pre-MemCpy Over\n");
}
reset[0] = 1;
cutilSafeCall(cudaMemcpy(d_reset, reset, sizeof(int),
cudaMemcpyHostToDevice));
unsigned int timerEDSub = 0;
cutilCheckError(cutCreateTimer(&timerEDSub));
unsigned int timerEDSubb = 0;
cutilCheckError(cutCreateTimer(&timerEDSubb));
if (sdk)
{
cutilCheckError(cutStartTimer(timerEDSub));
if (!niOut)
{
printf("use shared kernel\n");
printf("GRID %d %d TH %d %d \n", grid.y, grid.x, threads.x,
threads.y);
}bestDiffShared <<<grid,threads>>>(d_Diffs, d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize,d_DiffResults);
CUT_CHECK_ERROR("bestDiff");
cutilCheckError(cutStopTimer(timerEDSub));
if (pTimer)
printf("EDSub time: %f (ms)\n", cutGetTimerValue(timerEDSub));
cutilCheckError(cutStartTimer(timerEDSubb));
threads.x = blockSize;
threads.y = 1;
int s = numberOfPersons / blockSize + (numberOfPersons % blockSize
== 0 ? 0 : 1);
grid.x = s;
grid.y = 1;
findMaxShared <<<grid,threads>>>(d_Diffs, d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize,d_DiffResults);
cutilCheckError(cutStopTimer(timerEDSubb));
if (pTimer)
printf("EDSubb time: %f (ms)\n", cutGetTimerValue(timerEDSubb));
} else
{
cutilCheckError(cutStartTimer(timerEDSub));
bestDiff<<<grid,threads>>>(d_Diffs, d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize);
cutilCheckError(cutStopTimer(timerEDSub));
if (pTimer)
printf("EDSub time: %f (ms)\n", cutGetTimerValue(timerEDSub));
cutilCheckError(cutStartTimer(timerEDSubb));
findMax<<<grid,threads>>>(d_Diffs, d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize);
cutilCheckError(cutStopTimer(timerEDSubb));
if (pTimer)
printf("EDSubb time: %f (ms)\n", cutGetTimerValue(timerEDSubb));
CUT_CHECK_ERROR("bestDiff");
}
cudaThreadSynchronize();
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(cudaMemcpy(h_Diffs, d_Diffs, tDiffsSizeB,
cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(h_bestChanges, d_bestChanges, tbestChangesB,
cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(persons, d_pers, tPersonsSizeB,
// cudaMemcpyDeviceToHost));
cutilCheckError(cutStopTimer(memoryTimer));
if (pDbg)
{
printf("Post-MemCpy Over\n");
}
for (int i = 0; i < numberOfPersons; i++)
{
//int myCol = persons[i];
float maxDiff = h_Diffs[i];
//printf("bc: %d\n",h_bestChanges[i]);
int bestChangeCol = h_bestChanges[i];
Difference curDiff;
/*if (maxDiff < 0)
maxDiff = -maxDiff;
if (maxDiff > 0.1 || myCol == -1)
{
if (bestChangeCol == -1)
{
if (clearedBannedSwitches[i])
{
persons[i] = -1;
curDiff = emptyDiff;
} else
{
clearedBannedSwitches[i] = true;
int x;
for (x = 0; x < numberOfPersons; x++)
bannedSwitches[i * numberOfObjects + x] = false;
curDiff = getRowBestDifference(i);
}
} else
{
if (myCol == -1)
maxDiff = maxDiff * 1000;
curDiff.value = maxDiff;
}
} else
{
curDiff = emptyDiff;
}*/
if (maxDiff != negInf)
{
curDiff.index = i;
curDiff.bestChange = bestChangeCol;
curDiff.type = 0;
curDiff.myAssigned = persons[i];
curDiff.bestChangeAssigned = objects[bestChangeCol];
curDiff.value = maxDiff;
differences[i] = curDiff;
rowDifferences[i] = curDiff;
//printf("ass diff[%d]=%f bc %d bca:%d ma:%d\n",i,maxDiff, bestChangeCol,curDiff.bestChangeAssigned,curDiff.myAssigned);
}
}
} else
{
for (int l = 0; l < numberOfPersons; l++)
{
// Find the best objective improvement
addRowBestDifference(l, 0);
}
}
}
void sortDifferencesGPU() {
#ifdef CUDPP
int size = numberOfPersons + numberOfObjects;
for (int var = 0; var < numberOfPersons + numberOfObjects; ++var)
{
h_index[var] = var;
if (differences[var].index == -1)
h_Diffs[var] = negInf;
else
h_Diffs[var] = differences[var].value;
}
if (pDbg)
{
printf("Pre-sort\n");
}
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(cudaMemcpy(d_Diffs, h_Diffs, tDiffsSizeB,
cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpy(d_index, h_index, indexSize,
cudaMemcpyHostToDevice));
cutilCheckError(cutStopTimer(memoryTimer));
// Sorting now
CUDPPResult res = cudppSort(scanplan, d_Diffs, d_index, 32, size);
if (CUDPP_SUCCESS != res)
{
string eMsg = "Error sorting on GPU\n";
fail(eMsg);
}
CUT_CHECK_ERROR("cudppRadixSort");
if (pDbg)
{
printf("Post-sort\n");
}
cutilCheckError(cutStartTimer(memoryTimer));
cutilSafeCall(cudaMemcpy(h_index, d_index, indexSize,
cudaMemcpyDeviceToHost));
cutilCheckError(cutStopTimer(memoryTimer));
for (int curEntry = 0; curEntry < size; curEntry++)
{
differences_temp[curEntry] = differences[h_index[size - 1 - curEntry]];
}
for (int curEntry = 0; curEntry < size; curEntry++)
{
differences[curEntry] = differences_temp[curEntry];
}
if(strictSrt)
{
if(!niOut)
printf("Strict srt");
for (int varT = 0; varT < size; varT++)
{
if (varT == size - 1)
{
break;
}
float cv = differences[varT].value;
if (cv != 0.0f)
{
int count = 0;
for (int var1 = varT + 1; var1 < size; ++var1)
{
if (differences[var1].value == cv)
{
count++;
} else
break;
}
if (count > 0)
{
Difference ds[count+1];
int inc = varT + count;
for (int N = 0; N < count + 1; ++N)
{
ds[N] = differences[varT + N];
}
qsort(ds, count + 1, sizeof(Difference), compare_differences);
for (int N = 0; N < count + 1; ++N)
{
differences[varT + N] = ds[N];
}
varT = varT + count;
}
}
}
}
if (pDbg)
{
printf("Post-red\n");
}
#endif
}
void fail(string& s) {
cout << s;
exit(-1);
}
void smartInitialAssignmentGPU() {
unsigned int timerAGPU = 0;
cutilCheckError(cutCreateTimer(&timerAGPU));
cutilCheckError(cutStartTimer(timerAGPU));
int s = numberOfPersons / blockSize + (numberOfPersons % blockSize == 0 ? 0
: 1);
dim3 grid(s);
reset[0] = 1;
cudaMemcpy(d_Diffs, h_Diffs, tDiffsSizeB, cudaMemcpyHostToDevice);
cudaMemcpy(d_bestChanges, h_bestChanges, tbestChangesB,
cudaMemcpyHostToDevice);
unsigned int timer = 0;
cutilCheckError(cutCreateTimer(&timer));
cutilCheckError(cutResetTimer(timer));
reset[0] = 1;
while (reset[0] == 1)
{
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
reset[0] = 0;
cutilSafeCall(cudaMemcpy(d_reset, reset, sizeof(bool),
cudaMemcpyHostToDevice));
bestDiff<<<grid,blockSize>>>(d_Diffs,d_bestChanges, d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,blockSize);
cudaThreadSynchronize();
cutilCheckError(cutStopTimer(timer));
if (pTimer)
printf("BD time: %f (ms)\n", cutGetTimerValue(timer));
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
reset[0] = 0;
cutilSafeCall(cudaMemcpy(d_reset, reset, sizeof(bool),
cudaMemcpyHostToDevice));
cutilCheckError(cutStopTimer(timer));
// if (pTimer)
// printf("CR time: %f (ms)\n", cutGetTimerValue(timer));
//Kernel here
// dim3 dimBlock(1,numberOfPersons);
// dim3 dimGrid(1, 1);
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
initialAssignment<<<grid,1>>>(d_Diffs,d_bestChanges,d_aijM, d_pers, d_objs,d_bannedSwitches,d_clearedBannedSwitches,d_reset,d_blockSize);
cudaThreadSynchronize();
cutilCheckError(cutStopTimer(timer));
if (pTimer)
printf("IA time: %f (ms)\n", cutGetTimerValue(timer));
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
cudaMemcpy(reset, d_reset, sizeof(int), cudaMemcpyDeviceToHost);
cutilCheckError(cutStopTimer(timer));
if (pTimer)
printf("CR time: %f (ms)\n", cutGetTimerValue(timer));
}
unsigned int timerMcpy = 0;
cutilCheckError(cutCreateTimer(&timerMcpy));
cutilCheckError(cutStartTimer(timerMcpy));
cudaMemcpy(h_Diffs, d_Diffs, tDiffsSizeB, cudaMemcpyDeviceToHost);
cudaMemcpy(h_bestChanges, d_bestChanges, tbestChangesB,
cudaMemcpyDeviceToHost);
cutilSafeCall(cudaMemcpy(persons, d_pers, tPersonsSizeB,
cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(objects, d_objs, tObjectSizeB,
cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(bannedSwitches, d_bannedSwitches, tBannedSwitches,
cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(clearedBannedSwitches, d_clearedBannedSwitches,
tClearedBannedSwitches, cudaMemcpyDeviceToHost));
cutilCheckError(cutStopTimer(timerMcpy));
if (pTimer)
printf("MCR time: %f (ms)\n", cutGetTimerValue(timerMcpy));
cutilCheckError(cutStopTimer(timerAGPU));
printf("Assignment time: %f (ms)\n", cutGetTimerValue(timerAGPU));
if (pOut)
{
for (int k = 0; k < numberOfPersons; k++)
{
printf("P%d O%d ->%f \n", k, persons[k], h_Diffs[k]);
}
}
}
void enhanceBySwitching() {
float newTotalHappiness, oldTotalHappiness;
// int counter = 1;
// time_t start = time(NULL);
if (sGPU)
{
if (!niOut)
printf("Sorting on GPU\n");
gpuInit2();
} else
{
if (!niOut)
printf("Sorting on CPU \n");
}
// int size = numberOfPersons + numberOfObjects;
unsigned int timerED = 0;
cutilCheckError(cutCreateTimer(&timerED));
unsigned int timerSort = 0;
cutilCheckError(cutCreateTimer(&timerSort));
unsigned int timerProc = 0;
cutilCheckError(cutCreateTimer(&timerProc));
int count = 0;
while (1)
{
// if (pDbg)
// {
if (!niOut)
{
printf("\nIteration %d\n", count++);
}
iterations++;
// }
oldTotalHappiness = calculateTotalHappiness();
cutilCheckError(cutStartTimer(timerED));
//--------
evaluateDifferences();
//--------
cutilCheckError(cutStopTimer(timerED));
if (pTimer)
printf("ED time: %f (ms)\n", cutGetTimerValue(timerED));
tEvaluateDiff += cutGetTimerValue(timerED);
cutilCheckError(cutResetTimer(timerED));
int switchedRows[2];
int switchedColumns[2];
if (sortP)
{
printf("Before sorting\n");
int count2 = 0;
for (int var = 0; var < numberOfPersons + numberOfObjects; ++var)
{
count2++;
printf("\t %d i%d->%f", var, differences[var].index,
differences[var].value);
if (count2 == 8)
{
printf("\n");
count2 = 0;
}
}
}
cutilCheckError(cutStartTimer(timerSort));
if (sGPU)
{
sortDifferencesGPU();
} else
{
qsort(differences, numberOfPersons + numberOfPersons,
sizeof(Difference), compare_differences);
}
if (sortP)
{
printf("After sorting\n");
int count2 = 0;
for (int var = 0; var < numberOfPersons + numberOfObjects; ++var)
{
count2++;
printf("\t %d i%d->%f", var, differences[var].index,
differences[var].value);
if (count2 == 8)
{
printf("\n");
count2 = 0;
}
}
}
cutilCheckError(cutStopTimer(timerSort));
if (pTimer)
printf("Srt time: %f (ms)\n", cutGetTimerValue(timerSort));
tSorting += cutGetTimerValue(timerSort);
cutilCheckError(cutResetTimer(timerSort));
int rdC = 0;
cutilCheckError(cutStartTimer(timerProc));
while (differences[0].index > 1)
{
//printf("Iteration: %d --> %f\n", rdC,differences[0].value);
rdC++;
// printf("\nDifference %d\n", differences[0].index);
Difference myDiff = differences[0];
int row1, row2, col1, col2;
// Here I need to retrieve the 2 columns and 2 rows that will be
// altered due to switch ...
// to not reUpdate all the differences in the Tree
float diffCheck;
if (myDiff.type == 0)
{ // Found in row..i.e. switching happens
//printf("Switching on row: \n");
// along columns
row1 = myDiff.index; // index of row of the difference
//printf("index: \n");
col1 = persons[row1]; // index of column of the chosen
// cell in the row of difference
col2 = myDiff.bestChange; // index of column of the best
//printf("bc: \n");
// cell in the row of difference
row2 = objects[col2]; // index of row of the chosen in the
// column of the best cell in the
// difference row
//printf("ma %d, bca %d",myDiff.myAssigned,myDiff.bestChangeAssigned);
if (col1 != myDiff.myAssigned || row2
!= myDiff.bestChangeAssigned)
{
diffCheck = -1.0;
} else if (row2 == -1)
{
diffCheck = aijMatrix[row1][col2] - aijMatrix[row1][col1];
} else
{
diffCheck = aijMatrix[row1][col2] + aijMatrix[row2][col1]
- (aijMatrix[row1][col1] + aijMatrix[row2][col2]);
}
} else
{
//printf("Switching on column: \n");
col1 = myDiff.index; // index of column of the difference
row1 = objects[col1]; // index of row of the chosen cell
// in the column of difference
row2 = myDiff.bestChange; // index of row of the best cell
// in the column of difference
col2 = persons[row2]; // index of column of the chosen in
// the row of the best cell in the
// difference column
if (row1 != myDiff.myAssigned || col2
!= myDiff.bestChangeAssigned)
diffCheck = -1.0f;
else
diffCheck = aijMatrix[row1][col2] + aijMatrix[row2][col1]
- (aijMatrix[row1][col1] + aijMatrix[row2][col2]);
}
//printf("DiffCheck: \n");
// We need to check that our previous calculation still holds
// It may not due to second order effects
if (diffCheck <= 0)
{
if (myDiff.type == 0)
rowDifferences[myDiff.index] = emptyDiff;
else
columnDifferences[myDiff.index] = emptyDiff;
removeDifference(0);
continue;
}
// System.out.println("Happiness before switch:
// "+calculateTotalHappiness());
// So now we switch rows and columns
persons[row1] = col2;
if (row2 != -1)
{
// if (col1 == -1)
// bannedSwitches[row1].add(row2);
persons[row2] = col1;
}
// if (col1 != -1)
objects[col1] = row2;
objects[col2] = row1;
// if (col1 == -1 && row2 == -1)
// return;
// System.out.println("Happiness after switch:
// "+calculateTotalHappiness());
// Now we update the modified rows and columns
switchedRows[0] = row1;
switchedRows[1] = row2;
switchedColumns[0] = col1;
switchedColumns[1] = col2;
int i;
for (i = 0; i < 2; i++)
{
if (columnDifferences[switchedColumns[i]].index != -1)
{
Difference toRemove = columnDifferences[switchedColumns[i]];
int z;
for (z = 1; z < numberOfObjects + numberOfPersons; z++)
{
Difference toCheck = differences[z];
if (toCheck.index == -1)
break;
if (toCheck.index == toRemove.index && toCheck.type
== toRemove.type)
{
removeDifference(z);
break;
}
}
columnDifferences[switchedColumns[i]] = emptyDiff;
}
addColBestDifference(switchedColumns[i], 1);
}
for (i = 0; i < 2; i++)
{
if (rowDifferences[switchedRows[i]].index != -1)
{
Difference toRemove = rowDifferences[switchedRows[i]];
int z;
for (z = 1; z < numberOfObjects + numberOfPersons; z++)
{
Difference toCheck = differences[z];
if (toCheck.index == -1)
break;
if (toCheck.index == toRemove.index && toCheck.type
== toRemove.type)
{
removeDifference(z);
break;
}
}
rowDifferences[switchedRows[i]] = emptyDiff;
}
addRowBestDifference(switchedRows[i], 1);
}
// if (time(NULL) - start > initializationTimeLim) {
// break;
// }
}
cutilCheckError(cutStopTimer(timerProc));
if (pTimer)
printf("PC time for %d rounds: %f (ms)\n", rdC, cutGetTimerValue(
timerProc));
tSwitching += cutGetTimerValue(timerProc);
cutilCheckError(cutResetTimer(timerProc));
// System.out.println("Total Happiness " +
// calculateTotalHappiness());
newTotalHappiness = calculateTotalHappiness();
if (newTotalHappiness == oldTotalHappiness)
{
if (!niOut)
{
printf("Finished\n");
}
break;
} // || (SimulableSystem.currentTimeMillis() - start) > Conf.heuristicMaxTime
}
}
void smartInitialAssignment() {
int row1, curRow, col2, i;
for (i = 0; i < numberOfPersons; i++)
{
curRow = i;
while (curRow != -1)
{
Difference myDiff = getRowBestDifference(curRow);
if (myDiff.index != -1)
{
row1 = myDiff.index; // index of row of the difference
col2 = myDiff.bestChange; // index of column of the best
// cell in the row of difference
curRow = objects[col2]; // index of row of the chosen in the
// column of the best cell in the
// difference row
persons[row1] = col2;
objects[col2] = row1;
if (curRow != -1)
{
persons[curRow] = -1;
bannedSwitches[row1 * numberOfObjects + curRow] = true;
}
// if (pRes)
// printf("P%d O%d ->%f \n", row1, col2, myDiff.value);
}
}
}
}
//void smartInitialAssignmentWithInitial(int initialAssignmet[])
//{
// int row1, curRow, col2, i;
// // time_t start = time(NULL);
// for (i = 0; i < numberOfPersons; i++)
// {
// if (initialAssignmet[i] != -1 && objects[initialAssignmet[i]] == -1
// && aijMatrix[i][initialAssignmet[i]] != negInf)
// {
// persons[i] = initialAssignmet[i];
// objects[initialAssignmet[i]] = i;
// } else
// {
// curRow = i;
// while (curRow != -1)
// {
// Difference myDiff = getRowBestDifference(curRow);
// if (myDiff.index != -1)
// {
// row1 = myDiff.index; // index of row of the
// // difference
// col2 = myDiff.bestChange; // index of column of the
// // best cell in the row of
// // difference
// curRow = objects[col2]; // index of row of the chosen in
// // the column of the best cell
// // in the difference row
// persons[row1] = col2;
// objects[col2] = row1;
// if (curRow != -1)
// {
// persons[curRow] = -1;
// bannedSwitches[row1][curRow] = 1;
// }
// }
// }
// // if (time(NULL) - start > initializationTimeLim) {
// // fastInitialAssignment();
// // break;
// // }
// }
// }
//}
//void addRowBestDifference(int rowId, int sort)
//{
// Difference myDifference = getRowBestDifference(rowId);
// if (myDifference.index != -1)
// {
// myDifference.myAssigned = persons[rowId];
// myDifference.bestChangeAssigned = objects[myDifference.bestChange];
// if (sort == 0)
// {
// differences[rowId] = myDifference;
// } else
// {
// addSortedDifference(myDifference);
// }
// rowDifferences[rowId] = myDifference;
// }
//}
//JNIEXPORT jobjectArray JNICALL Java_se_peertv_opto_ImprovedHeuristic_JniHeuristic_heuristic
//(JNIEnv *env, jobject self,
// jobjectArray aijMatrixCp, int numberOfPersonsCp, int numberOfObjectsCp, jintArray initialAssignments) {
// // for test ...
// int C = 300;
// numberOfPersons = 100;
// numberOfObjects = 100;
// emptyDiff.index = -1;
// float *aijPtr;
// aijPtr = malloc(numberOfPersons * numberOfObjects * sizeof(float));
// int *bannedSwitchesPtr;
// bannedSwitchesPtr = malloc(numberOfPersons * numberOfObjects * sizeof(int));
// aijMatrix = malloc(numberOfPersons * sizeof(float *));
// bannedSwitches = malloc(numberOfPersons * sizeof(int *));
// int i,j;
//
// for (i = 0; i < numberOfPersons; i++) {
// aijMatrix[i] = aijPtr + (i*numberOfObjects);
// bannedSwitches[i] = bannedSwitchesPtr + (i*numberOfObjects);
// }
//
// for (i = 0; i < numberOfPersons; i++) {
// for (j = 0; j < numberOfObjects; j++) {
// aijMatrix[i][j] = random() % C;
// //printf("j has the value %f and is stored at %p\n", aijMatrix[i][j], (void *)&aijMatrix[i][j]);
// //printf("%f jjj ",aijMatrix[i][j]);
// }
// }s
// int *initialAssignment;
// initialAssignment = malloc(numberOfPersons * sizeof(int));
// int len = 0;
// if (len > 0) {
// for(i = 0; i < numberOfPersons; i++)
// {
// initialAssignment[i] = i;
// }
// }
// clearedBannedSwitches = malloc(numberOfPersons * sizeof(int));
// persons = malloc(numberOfPersons * sizeof(int));
// differences = malloc((numberOfPersons + numberOfObjects) * sizeof(Difference));
// rowDifferences = malloc(numberOfPersons * sizeof(Difference));
// for (i = 0; i < numberOfPersons; i++) {
// clearedBannedSwitches[i] = 0;
// persons[i] = -1;
// for (j = 0; j < numberOfPersons; j++)
// bannedSwitches[i][j] = -1;
// rowDifferences[i] = emptyDiff;
// differences[i] = emptyDiff;
// }
//
// columnDifferences = malloc(numberOfObjects * sizeof(Difference));
// objects = malloc(numberOfObjects * sizeof (int));
// for (i = 0; i < numberOfObjects; i++) {
// objects[i] = -1;
// columnDifferences[i] = emptyDiff;
// differences[numberOfPersons + i] = emptyDiff;
// }
//
// if (len > 0)
// smartInitialAssignmentWithInitial(initialAssignment);
// else
// smartInitialAssignment();
// enhanceBySwitching();
// for (i = 0; i < numberOfPersons; i++) {
// printf("aij ");
// printf("%d ",i);
// printf("%d ",persons[i]);
// printf("%f \n",aijMatrix[i][persons[i]]);
// }
// return 0;
// jintArray r= (*env)->NewIntArray(env, numberOfPersons);
// //for (i = 0; i < numberOfPersons; i++)
// // (*env)->SetObjectArrayElement(env, r,i,persons[i]);
// return r;
//}
void printH(float** aijMatrix, int numberOfPersons, int numberOfObjects) {
for (int i = 0; i < numberOfPersons; i++)
{
for (int j = 0; j < numberOfObjects; j++)
{
printf("%f ", aijMatrix[i][j]);
//printf("j has the value %f and is stored at %p\n", aijMatrix[i][j], (void *)&aijMatrix[i][j]);
//printf("%f jjj ",aijMatrix[i][j]);
}
printf("\n");
}
}
void printG(float* h_A, int numberOfPersons, int numberOfObjects) {
for (int i = 0; i < numberOfPersons; i++)
{
for (int j = 0; j < numberOfObjects; j++)
{
printf("%f ", h_A[i * numberOfPersons + j]);
//printf("j has the value %f and is stored at %p\n", aijMatrix[i][j], (void *)&aijMatrix[i][j]);
//printf("%f jjj ",aijMatrix[i][j]);
}
printf("\n");
}
}
void listCudaDevice() {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0)
{
// This function call returns 9999 for both major & minor fields, if no CUDA capable devices are present
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" CUDA Capability Major revision number: %d\n",
deviceProp.major);
printf(" CUDA Capability Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %u bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %u bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %u bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(
" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(
" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %u bytes\n",
deviceProp.memPitch);
// printf(" Texture alignment: %u bytes\n",
// deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No");
#endif
#if CUDART_VERSION >= 2020
printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated: %s\n", deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Compute mode: %s\n", deviceProp.computeMode == cudaComputeModeDefault ?
"Default (multiple host threads can use this device simultaneously)" :
deviceProp.computeMode == cudaComputeModeExclusive ?
"Exclusive (only one host thread at a time can use this device)" :
deviceProp.computeMode == cudaComputeModeProhibited ?
"Prohibited (no host thread can use this device)" :
"Unknown");
#endif
}
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
|
b914462f246397de4d7ff76458cbb8dd6b798649.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.cu"
#else
#include "../common.h"
void THNN_(SpatialAdaptiveMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int nOutputCols,
int nOutputRows)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THCIndex_t *indices_data;
real *output_data;
real *input_data;
THCUNN_argCheck(state, input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 3) {
int64_t nInputCols = input->size[2];
int64_t nInputRows = input->size[1];
int64_t nInputPlane = input->size[0];
int64_t istride_d = input->stride[0];
int64_t istride_h = input->stride[1];
int64_t istride_w = input->stride[2];
input_data = THCTensor_(data)(state, input);
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCIndexTensor_(resize4d)(state, indices, 2, nInputPlane, nOutputRows, nOutputCols);
indices_data = THCIndexTensor_(data)(state, indices);
output_data = THCTensor_(data)(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run maxpool kernel
hipLaunchKernelGGL(( adaptivemaxpool) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
istride_h, istride_w, istride_d);
THCudaCheck(hipGetLastError());
} else {
input = THCTensor_(newContiguous)(state, input);
int64_t nInputCols = input->size[3];
int64_t nInputRows = input->size[2];
int64_t nInputPlane = input->size[1];
int64_t nbatch = input->size[0];
int64_t istride_d = input->stride[1];
int64_t istride_h = input->stride[2];
int64_t istride_w = input->stride[3];
input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols);
THCIndexTensor_(resize5d)(state, indices, 2, nbatch, nInputPlane, nOutputRows, nOutputCols);
indices_data = THCIndexTensor_(data)(state, indices);
output_data = THCTensor_(data)(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run maxpool kernel
hipLaunchKernelGGL(( adaptivemaxpool) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
istride_h, istride_w, istride_d);
THCudaCheck(hipGetLastError());
// clean
THCTensor_(free)(state, input);
}
}
void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices)
{
bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests
THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput);
THCIndex_t *indices_data;
real *gradInput_data;
real *gradOutput_data;
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if (input->nDimension == 3) {
int64_t nInputCols = input->size[2];
int64_t nInputRows = input->size[1];
int64_t nInputPlane = input->size[0];
int64_t nOutputCols = gradOutput->size[2];
int64_t nOutputRows = gradOutput->size[1];
//bool atomic = (nInputCols%nOutputCols != 0) || (nInputRows%nOutputRows != 0);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
indices_data = THCIndexTensor_(data)(state, indices);
gradOutput_data = THCTensor_(data)(state, gradOutput);
gradInput_data = THCTensor_(data)(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols);
}
else
{
// run updateGradInput kernel
hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols);
}
THCudaCheck(hipGetLastError());
} else {
int64_t nInputCols = input->size[3];
int64_t nInputRows = input->size[2];
int64_t nInputPlane = input->size[1];
int64_t nbatch = input->size[0];
int64_t nOutputCols = gradOutput->size[3];
int64_t nOutputRows = gradOutput->size[2];
//bool atomic = //(nInputCols%nOutputCols != 0) || (nInputRows%nOutputRows != 0);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
indices_data = THCIndexTensor_(data)(state, indices);
gradOutput_data = THCTensor_(data)(state, gradOutput);
gradInput_data = THCTensor_(data)(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols);
}
else
{
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( adaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols);
}
THCudaCheck(hipGetLastError());
}
// clean
THCTensor_(free)(state,gradOutput);
}
#endif
| b914462f246397de4d7ff76458cbb8dd6b798649.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.cu"
#else
#include "../common.h"
void THNN_(SpatialAdaptiveMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int nOutputCols,
int nOutputRows)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THCIndex_t *indices_data;
real *output_data;
real *input_data;
THCUNN_argCheck(state, input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 3) {
int64_t nInputCols = input->size[2];
int64_t nInputRows = input->size[1];
int64_t nInputPlane = input->size[0];
int64_t istride_d = input->stride[0];
int64_t istride_h = input->stride[1];
int64_t istride_w = input->stride[2];
input_data = THCTensor_(data)(state, input);
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCIndexTensor_(resize4d)(state, indices, 2, nInputPlane, nOutputRows, nOutputCols);
indices_data = THCIndexTensor_(data)(state, indices);
output_data = THCTensor_(data)(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run maxpool kernel
adaptivemaxpool <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
istride_h, istride_w, istride_d);
THCudaCheck(cudaGetLastError());
} else {
input = THCTensor_(newContiguous)(state, input);
int64_t nInputCols = input->size[3];
int64_t nInputRows = input->size[2];
int64_t nInputPlane = input->size[1];
int64_t nbatch = input->size[0];
int64_t istride_d = input->stride[1];
int64_t istride_h = input->stride[2];
int64_t istride_w = input->stride[3];
input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols);
THCIndexTensor_(resize5d)(state, indices, 2, nbatch, nInputPlane, nOutputRows, nOutputCols);
indices_data = THCIndexTensor_(data)(state, indices);
output_data = THCTensor_(data)(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run maxpool kernel
adaptivemaxpool <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
istride_h, istride_w, istride_d);
THCudaCheck(cudaGetLastError());
// clean
THCTensor_(free)(state, input);
}
}
void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices)
{
bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests
THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput);
THCIndex_t *indices_data;
real *gradInput_data;
real *gradOutput_data;
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if (input->nDimension == 3) {
int64_t nInputCols = input->size[2];
int64_t nInputRows = input->size[1];
int64_t nInputPlane = input->size[0];
int64_t nOutputCols = gradOutput->size[2];
int64_t nOutputRows = gradOutput->size[1];
//bool atomic = (nInputCols%nOutputCols != 0) || (nInputRows%nOutputRows != 0);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
indices_data = THCIndexTensor_(data)(state, indices);
gradOutput_data = THCTensor_(data)(state, gradOutput);
gradInput_data = THCTensor_(data)(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
atomicadaptivemaxgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols);
}
else
{
// run updateGradInput kernel
atomicadaptivemaxgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data,
indices_data+nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols);
}
THCudaCheck(cudaGetLastError());
} else {
int64_t nInputCols = input->size[3];
int64_t nInputRows = input->size[2];
int64_t nInputPlane = input->size[1];
int64_t nbatch = input->size[0];
int64_t nOutputCols = gradOutput->size[3];
int64_t nOutputRows = gradOutput->size[2];
//bool atomic = //(nInputCols%nOutputCols != 0) || (nInputRows%nOutputRows != 0);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
indices_data = THCIndexTensor_(data)(state, indices);
gradOutput_data = THCTensor_(data)(state, gradOutput);
gradInput_data = THCTensor_(data)(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
if(atomic)
{
// run updateGradInput kernel, accumulate gradients atomically
atomicadaptivemaxgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols);
}
else
{
// run updateGradInput kernel, accumulate gradients atomically
adaptivemaxgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data,
indices_data+nbatch*nInputPlane*nOutputCols*nOutputRows, indices_data,
nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols);
}
THCudaCheck(cudaGetLastError());
}
// clean
THCTensor_(free)(state,gradOutput);
}
#endif
|
812c40fb5805fe4d989dca05a66e06ec0385ccee.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip"
#else
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
at::namedinference::propagate_names(result, src);
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(hipGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
#endif
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
| 812c40fb5805fe4d989dca05a66e06ec0385ccee.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu"
#else
#include <ATen/MemoryOverlap.h>
#include <ATen/NamedTensorUtils.h>
void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitand is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
return THError("cbitor is only supported for integer type tensors");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (self == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#if !defined(THC_REAL_IS_BOOL)
static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) {
at::namedinference::propagate_names(result, src);
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \
struct Tensor_##NAME##_##REAL##_Op { \
__device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \
*out = CFUNC(*in); \
} \
\
__device__ __forceinline__ void operator()(scalar_t* v) const { \
*v = CFUNC(*v); \
} \
}; \
\
void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \
at::assert_no_internal_overlap(self_); \
if (self_ == src) { \
if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} else { \
THCTensor_(resizeAs)(state, self_, src); \
\
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \
THArgCheck(false, 2, CUTORCH_DIM_WARNING); \
} \
} \
\
THCudaCheck(cudaGetLastError()); \
propagate_names_if_named_tensor_enabled(self_, src); \
}
#define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real)
IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real)
#endif
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_
#undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC
void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value,
scalar_t max_value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
if (self_ == src) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y));
int64_t sx = THCTensor_(stride)(state, x, dimension);
int64_t sy = THCTensor_(stride)(state, y, dimension);
int64_t so = THCTensor_(stride)(state, self, dimension);
THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1);
THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1);
THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCTensor_(free)(state, nx);
THCTensor_(free)(state, ny);
THCTensor_(free)(state, nself);
}
namespace {
c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) {
c10::raw::intrusive_ptr::incref(self);
return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self);
}
}
void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
#ifdef THC_REAL_IS_HALF
auto alpha = at::Half(value);
#else
auto alpha = value;
#endif
at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha);
}
void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
auto out = at::Tensor(retainTensorImpl(self_));
at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)));
}
void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("clshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2)
{
#if defined(THC_REAL_IS_HALF)
return THError("crshift not supported for torch.CudaHalfTensor");
#else
THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 3, "sizes do not match");
if (self_ == src1) {
// self /= src2
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src1);
// self = src1 / src2
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2));
THArgCheck(THCTensor_(nElement)(state, src1) ==
THCTensor_(nElement)(state, src2), 2, "sizes do not match");
if (self == src1) {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self, src1);
if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
}
#endif
#endif
|
a5f4297242120a647da04bdce9b9fee76a1fb36d.hip | // !!! This is a file automatically generated by hipify!!!
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* gpu_errchk.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: talemari <talemari@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/06/04 15:30:53 by talemari #+# #+# */
/* Updated: 2017/06/05 10:57:14 by talemari ### ########.fr */
/* */
/* ************************************************************************** */
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include "rt.cuh"
/*
** Checks a cuda function return value for errors and exits with an error
** message.
*/
__host__
void gpu_errchk(int code)
{
int l;
const char *s;
s = hipGetErrorString((hipError_t)code);
l = strlen(s);
if (code != hipSuccess)
{
write(2, "\e[1;93mGPUassert: ", 17);
write(1, "test\n", 5);
write(2, s, l);
write(2, "\e[0m\n", 5);
exit(code);
}
}
| a5f4297242120a647da04bdce9b9fee76a1fb36d.cu | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* gpu_errchk.cu :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: talemari <talemari@student.42.fr> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2017/06/04 15:30:53 by talemari #+# #+# */
/* Updated: 2017/06/05 10:57:14 by talemari ### ########.fr */
/* */
/* ************************************************************************** */
#include <cuda_runtime.h>
#include <stdlib.h>
#include "rt.cuh"
/*
** Checks a cuda function return value for errors and exits with an error
** message.
*/
__host__
void gpu_errchk(int code)
{
int l;
const char *s;
s = cudaGetErrorString((cudaError_t)code);
l = strlen(s);
if (code != cudaSuccess)
{
write(2, "\e[1;93mGPUassert: ", 17);
write(1, "test\n", 5);
write(2, s, l);
write(2, "\e[0m\n", 5);
exit(code);
}
}
|
27861666a2743f765e4b816b769da012fbdfaa9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "check.h"
#include "GPU.h"
#include <iostream>
#include <mutex>
using namespace std;
__device__ int maxConcurrentBlocksVar;
__device__ volatile int maxConcurrentBlockEvalDoneVar;
__device__ int* maxConcurrentBlocks()
{
return &maxConcurrentBlocksVar;
}
__device__ int* maxConcurrentBlockEvalDone()
{
return (int*)&maxConcurrentBlockEvalDoneVar;
}
__device__ volatile float BigData_[1024 * 1024];
__device__ volatile float* BigData()
{
return ::BigData_;
}
template<int ITS, int REGS>
class DelayFMADS
{
public:
__device__ __inline__
static void delay()
{
float values[REGS];
#pragma unroll
for(int r = 0; r < REGS; ++r)
values[r] = BigData()[threadIdx.x + r * 32];
#pragma unroll
for(int i = 0; i < (ITS + REGS - 1) / REGS; ++i)
{
#pragma unroll
for(int r = 0; r < REGS; ++r)
values[r] += values[r] * values[r];
__threadfence_block();
}
#pragma unroll
for(int r = 0; r < REGS; ++r)
BigData()[threadIdx.x + r * 32] = values[r];
}
};
__global__ void maxConcurrentBlockEval()
{
if (*maxConcurrentBlockEvalDone() != 0)
return;
if (threadIdx.x == 0)
atomicAdd(maxConcurrentBlocks(), 1);
DelayFMADS<10000, 32>::delay();
__syncthreads();
*maxConcurrentBlockEvalDone() = 1;
__threadfence();
}
std::mutex gpuMutex;
void GPU::initGPU()
{
if (!gpu.get())
{
gpuMutex.lock();
if (!gpu.get()) gpu.reset(new GPU());
gpuMutex.unlock();
}
}
bool GPU::isAvailable()
{
initGPU();
return (gpu->ngpus > 0);
}
int GPU::getBlockSize()
{
return 128;
}
int GPU::getBlockCount()
{
initGPU();
return gpu->nblocks;
}
int GPU::getSMCount()
{
initGPU();
return gpu->nsms;
}
int GPU::getConstMemSize()
{
initGPU();
return gpu->szcmem;
}
int GPU::getSharedMemSizePerSM()
{
initGPU();
return gpu->szshmem;
}
void* GPU::malloc(size_t size)
{
#define MALLOC_ALIGNMENT 256
initGPU();
if (!gpu->gmem) return NULL;
if (gpu->ptr + size + MALLOC_ALIGNMENT > gpu->gmem + gpu->szgmem)
return NULL;
void* result = gpu->ptr;
gpu->ptr += size;
ptrdiff_t alignment = (ptrdiff_t)gpu->ptr % MALLOC_ALIGNMENT;
if (alignment)
gpu->ptr += MALLOC_ALIGNMENT - alignment;
return result;
}
// Reset free memory pointer to the beginning of preallocated buffer.
void GPU::mfree()
{
initGPU();
gpu->ptr = gpu->gmem;
}
// Check whether the specified memory address belongs to GPU memory allocation.
bool GPU::isAllocatedOnGPU(void* ptr)
{
initGPU();
if (!gpu->gmem) return false;
if ((ptr >= gpu->gmem) && (ptr <= gpu->gmem + gpu->szgmem))
return true;
return false;
}
GPU::GPU() : ngpus(0), gmem(NULL)
{
hipError_t hipError_t = hipGetDeviceCount(&ngpus);
if (hipError_t != hipErrorNoDevice)
CUDA_ERR_CHECK(hipError_t);
if (!ngpus) return;
hipLaunchKernelGGL(( maxConcurrentBlockEval), dim3(1024), dim3(getBlockSize()), 0, 0, );
CUDA_ERR_CHECK(hipGetLastError());
CUDA_ERR_CHECK(hipDeviceSynchronize());
CUDA_ERR_CHECK(hipMemcpyFromSymbol(&nblocks, maxConcurrentBlocksVar, sizeof(int)));
hipDeviceProp_t props;
CUDA_ERR_CHECK(hipGetDeviceProperties(&props, 0));
nsms = props.multiProcessorCount;
szcmem = props.totalConstMem;
szshmem = props.sharedMemPerMultiprocessor;
cout << "Using GPU " << props.name << " : max concurrent blocks = " << nblocks <<
" : " << min(szshmem, (nsms * szshmem) / nblocks) << "B of shmem per block" << endl;
// Preallocate 85% of GPU memory to save on costly subsequent allocations.
size_t available, total;
hipMemGetInfo(&available, &total);
szgmem = 0.85 * available;
CUDA_ERR_CHECK(hipMalloc(&gmem, szgmem));
ptr = gmem;
}
unique_ptr<GPU> GPU::gpu;
| 27861666a2743f765e4b816b769da012fbdfaa9c.cu | #include "check.h"
#include "GPU.h"
#include <iostream>
#include <mutex>
using namespace std;
__device__ int maxConcurrentBlocksVar;
__device__ volatile int maxConcurrentBlockEvalDoneVar;
__device__ int* maxConcurrentBlocks()
{
return &maxConcurrentBlocksVar;
}
__device__ int* maxConcurrentBlockEvalDone()
{
return (int*)&maxConcurrentBlockEvalDoneVar;
}
__device__ volatile float BigData_[1024 * 1024];
__device__ volatile float* BigData()
{
return ::BigData_;
}
template<int ITS, int REGS>
class DelayFMADS
{
public:
__device__ __inline__
static void delay()
{
float values[REGS];
#pragma unroll
for(int r = 0; r < REGS; ++r)
values[r] = BigData()[threadIdx.x + r * 32];
#pragma unroll
for(int i = 0; i < (ITS + REGS - 1) / REGS; ++i)
{
#pragma unroll
for(int r = 0; r < REGS; ++r)
values[r] += values[r] * values[r];
__threadfence_block();
}
#pragma unroll
for(int r = 0; r < REGS; ++r)
BigData()[threadIdx.x + r * 32] = values[r];
}
};
__global__ void maxConcurrentBlockEval()
{
if (*maxConcurrentBlockEvalDone() != 0)
return;
if (threadIdx.x == 0)
atomicAdd(maxConcurrentBlocks(), 1);
DelayFMADS<10000, 32>::delay();
__syncthreads();
*maxConcurrentBlockEvalDone() = 1;
__threadfence();
}
std::mutex gpuMutex;
void GPU::initGPU()
{
if (!gpu.get())
{
gpuMutex.lock();
if (!gpu.get()) gpu.reset(new GPU());
gpuMutex.unlock();
}
}
bool GPU::isAvailable()
{
initGPU();
return (gpu->ngpus > 0);
}
int GPU::getBlockSize()
{
return 128;
}
int GPU::getBlockCount()
{
initGPU();
return gpu->nblocks;
}
int GPU::getSMCount()
{
initGPU();
return gpu->nsms;
}
int GPU::getConstMemSize()
{
initGPU();
return gpu->szcmem;
}
int GPU::getSharedMemSizePerSM()
{
initGPU();
return gpu->szshmem;
}
void* GPU::malloc(size_t size)
{
#define MALLOC_ALIGNMENT 256
initGPU();
if (!gpu->gmem) return NULL;
if (gpu->ptr + size + MALLOC_ALIGNMENT > gpu->gmem + gpu->szgmem)
return NULL;
void* result = gpu->ptr;
gpu->ptr += size;
ptrdiff_t alignment = (ptrdiff_t)gpu->ptr % MALLOC_ALIGNMENT;
if (alignment)
gpu->ptr += MALLOC_ALIGNMENT - alignment;
return result;
}
// Reset free memory pointer to the beginning of preallocated buffer.
void GPU::mfree()
{
initGPU();
gpu->ptr = gpu->gmem;
}
// Check whether the specified memory address belongs to GPU memory allocation.
bool GPU::isAllocatedOnGPU(void* ptr)
{
initGPU();
if (!gpu->gmem) return false;
if ((ptr >= gpu->gmem) && (ptr <= gpu->gmem + gpu->szgmem))
return true;
return false;
}
GPU::GPU() : ngpus(0), gmem(NULL)
{
cudaError_t cudaError = cudaGetDeviceCount(&ngpus);
if (cudaError != cudaErrorNoDevice)
CUDA_ERR_CHECK(cudaError);
if (!ngpus) return;
maxConcurrentBlockEval<<<1024, getBlockSize()>>>();
CUDA_ERR_CHECK(cudaGetLastError());
CUDA_ERR_CHECK(cudaDeviceSynchronize());
CUDA_ERR_CHECK(cudaMemcpyFromSymbol(&nblocks, maxConcurrentBlocksVar, sizeof(int)));
cudaDeviceProp props;
CUDA_ERR_CHECK(cudaGetDeviceProperties(&props, 0));
nsms = props.multiProcessorCount;
szcmem = props.totalConstMem;
szshmem = props.sharedMemPerMultiprocessor;
cout << "Using GPU " << props.name << " : max concurrent blocks = " << nblocks <<
" : " << min(szshmem, (nsms * szshmem) / nblocks) << "B of shmem per block" << endl;
// Preallocate 85% of GPU memory to save on costly subsequent allocations.
size_t available, total;
cudaMemGetInfo(&available, &total);
szgmem = 0.85 * available;
CUDA_ERR_CHECK(cudaMalloc(&gmem, szgmem));
ptr = gmem;
}
unique_ptr<GPU> GPU::gpu;
|
4963f0e750ad3c1e7c821a6a897bfa923f760987.hip | // !!! This is a file automatically generated by hipify!!!
#include "memBenchmark.h"
#include "termcolor.hpp"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
// NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt
#include <roctracer/roctx.h>
#include <cmath>
#include <cstdio>
#include <iomanip>
#include <iostream>
#include <string>
// Initialize sizes
const int sizeX = 1234;
const int sizeY = 3153;
struct DIMS
{
dim3 dimBlock;
dim3 dimGrid;
};
#define CUDA(call) do { \
hipError_t e = (call); \
if (e == hipSuccess) break; \
fprintf(stderr, __FILE__":%d: %s (%d)\n", \
__LINE__, hipGetErrorString(e), e); \
exit(1); \
} while (0)
// This function divides up the n by div - similar to ceil
// Example, divup(10, 3) = 4
inline unsigned divup(unsigned n, unsigned div)
{
return (n + div - 1) / div;
}
void printResults(double timeInMilliseconds, int iterations)
{
// print out the time required for the kernel to finish the transpose operation
double bandwidth = (iterations * 2 * 1000 * (double)(sizeX * sizeY * sizeof(float)))
/ (1000 * 1000 * 1000 * timeInMilliseconds);
std::cout << "Elapsed Time for " << iterations << " runs = " << round(timeInMilliseconds) << "ms" << std::endl;
std::cout << termcolor::bold << termcolor::red << termcolor::on_white
<< "Bandwidth (GB/s) = " << std::setprecision(4) << bandwidth
<< termcolor::reset << std::endl;
}
// Check errors
bool postprocess(const float *ref, const float *res, int n)
{
bool passed = true;
for (int i = 0; i < n; i++)
{
if (res[i] != ref[i])
{
std::cout << "ID: " << i << " \t Res: " << res[i] << " \t Ref: " << ref[i] << std::endl;
std::cout << termcolor::blink << termcolor::white << termcolor::on_red << "*** FAILED ***" << termcolor::reset << std::endl;
passed = false;
break;
}
}
if (passed)
{
std::cout << termcolor::green << "Post process check passed!!" << termcolor::reset << std::endl;
}
return passed;
}
void preprocess(float *res, float *dev_res, int n)
{
std::fill(res, res + n, -1);
hipMemset(dev_res, -1, n * sizeof(float));
}
// TODO: COMPLETE THIS
__global__ void copyKernel(const float* const a, float* const b)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Compute correctly - Global X index
int j = blockIdx.y * blockDim.y + threadIdx.y; // Compute correctly - Global Y index
// Check if i or j are out of bounds. If they are, return.
if (i >= sizeX || j >= sizeY)
{
return;
}
int index = j * sizeX + i; // Compute 1D index from i, j
// Copy data from A to B
b[index] = a[index];
}
// TODO: COMPLETE THIS
__global__ void matrixTransposeNaive(const float* const a, float* const b)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Compute correctly - Global X index
int j = blockIdx.y * blockDim.y + threadIdx.y; // Compute correctly - Global Y index
// Check if i or j are out of bounds. If they are, return.
if (i >= sizeX || j >= sizeY)
{
return;
}
int index_in = j * sizeX + i; // Compute input index (i,j) from matrix A
int index_out = i * sizeY + j; // Compute output index (j,i) in matrix B = transpose(A)
// Copy data from A to B using transpose indices
b[index_out] = a[index_in];
}
// TODO: COMPLETE THIS
template<int BLOCK_SIZE_X, int BLOCK_SIZE_Y>
__global__ void matrixTransposeShared(const float* const a, float* const b)
{
// Allocate appropriate shared memory - use mat as variable name
// Example: <shared specifier> type mat[size][size]; - replace size with the correct values
__shared__ float mat[BLOCK_SIZE_Y][BLOCK_SIZE_X];
// Compute input and output index
int bx = blockIdx.x * BLOCK_SIZE_X; // Compute block offset - this is number of global threads in X before this block
int by = blockIdx.y * BLOCK_SIZE_Y; // Compute block offset - this is number of global threads in Y before this block
int i = bx + threadIdx.x; // Global input x index - Same as previous kernels
int j = by + threadIdx.y; // Global input y index - Same as previous kernels
// We are transposing the blocks here. See how ti uses by and tj uses bx
// We transpose blocks using indices, and transpose with block sub-matrix using the shared memory
int ti = by + threadIdx.x; // Global output x index - remember the transpose
int tj = bx + threadIdx.y; // Global output y index - remember the transpose
// Copy data from input to shared memory
// Check for bounds
if (i < sizeX && j < sizeY)
{
mat[threadIdx.y][threadIdx.x] = a[j * sizeX + i];
}
__syncthreads();
// Copy data from shared memory to global memory
// Check for bounds
if (ti < sizeY && tj < sizeX)
{
b[tj * sizeY + ti] = mat[threadIdx.x][threadIdx.y]; // Switch threadIdx.x and threadIdx.y from input read
}
}
template<int BLOCK_SIZE_X, int BLOCK_SIZE_Y>
__global__ void matrixTransposeSharedwBC(const float* const a, float* const b)
{
// Allocate appropriate shared memory
__shared__ float mat[BLOCK_SIZE_Y][BLOCK_SIZE_X + 1];
// Compute input and output index
int bx = blockIdx.x * BLOCK_SIZE_X; // Compute block offset - this is number of global threads in X before this block
int by = blockIdx.y * BLOCK_SIZE_Y; // Compute block offset - this is number of global threads in Y before this block
int i = bx + threadIdx.x; // Global input x index - Same as previous kernels
int j = by + threadIdx.y; // Global input y index - Same as previous kernels
// We are transposing the blocks here. See how ti uses by and tj uses bx
// We transpose blocks using indices, and transpose with block sub-matrix using the shared memory
int ti = by + threadIdx.x; // Global output x index - remember the transpose
int tj = bx + threadIdx.y; // Global output y index - remember the transpose
// Copy data from input to shared memory
// Check for bounds
if (i < sizeX && j < sizeY)
{
mat[threadIdx.y][threadIdx.x] = a[j * sizeX + i];
}
__syncthreads();
// Copy data from shared memory to global memory
// Check for bounds
if (ti < sizeY && tj < sizeX)
{
b[tj * sizeY + ti] = mat[threadIdx.x][threadIdx.y]; // Switch threadIdx.x and threadIdx.y from input read
}
}
template<int TILE, int SIDE>
__global__ void matrixTransposeUnrolled(const float* a, float* b)
{
// Allocate appropriate shared memory
__shared__ float mat[TILE][TILE + 1];
// Compute input and output index
int x = blockIdx.x * TILE + threadIdx.x;
int y = blockIdx.y * TILE + threadIdx.y;
// copy data from input to shared memory
#pragma unroll
for (int k = 0; k < TILE; k += SIDE) {
if (x < sizeX && y + k < sizeY) {
mat[threadIdx.y + k][threadIdx.x] = a[((y + k) * sizeX) + x];
}
}
__syncthreads();
// compute output index
x = blockIdx.y * TILE + threadIdx.x;
y = blockIdx.x * TILE + threadIdx.y;
#pragma unroll
for (int k = 0; k < TILE; k += SIDE) {
if (x < sizeY && y + k < sizeX) {
b[(y + k) * sizeY + x] = mat[threadIdx.x][threadIdx.y + k];
}
}
}
int main(int argc, char *argv[])
{
//Run Memcpy benchmarks
nvtxRangeId_t cudaBenchmark = roctxRangeStart("CUDA Memcpy Benchmark");
memBenchmark();
roctxRangeStop(cudaBenchmark);
// Host arrays.
float* a = new float[sizeX * sizeY];
float* b = new float[sizeX * sizeY];
float* a_gold = new float[sizeX * sizeY];
float* b_gold = new float[sizeX * sizeY];
// Device arrays
float *d_a, *d_b;
// Allocate memory on the device
CUDA(hipMalloc((void **)&d_a, sizeX * sizeY * sizeof(float)));
CUDA(hipMalloc((void **)&d_b, sizeX * sizeY * sizeof(float)));
// Fill matrix A
for (int i = 0; i < sizeX * sizeY; i++)
a[i] = (float)i;
// Copy array contents of A from the host (CPU) to the device (GPU)
hipMemcpy(d_a, a, sizeX * sizeY * sizeof(float), hipMemcpyHostToDevice);
// Compute "gold" reference standard
for (int jj = 0; jj < sizeY; jj++)
{
for (int ii = 0; ii < sizeX; ii++)
{
a_gold[jj * sizeX + ii] = a[jj * sizeX + ii];
b_gold[ii * sizeY + jj] = a[jj * sizeX + ii];
}
}
std::cout << std::endl;
hipDeviceSynchronize();
// Create CUDA events for timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
#define CPU_TRANSPOSE
#ifdef CPU_TRANSPOSE
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***CPU Transpose***" << std::endl;
{
// start the timer
nvtxRangeId_t cpuBenchmark = roctxRangeStart("CPU Transpose Benchmark");
Timer hTimer;
int iterations = 10;
for (int k = 0; k < iterations; k++)
{
for (int jj = 0; jj < sizeY; jj++)
for (int ii = 0; ii < sizeX; ii++)
b[ii * sizeX + jj] = a[jj * sizeX + ii];
}
double time = hTimer.elapsed() * 1000; //ms
roctxRangeStop(cpuBenchmark);
printResults(time, iterations);
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
#endif
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Device To Device Copy***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// TODO: COMPLETE THIS
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(32, 32, 1);
dims.dimGrid = dim3(divup(sizeX, dims.dimBlock.x),
divup(sizeY, dims.dimBlock.y),
1);
// start the timer
nvtxRangeId_t copyKernelBenchmark = roctxRangeStart("Device to Device Copy");
hipEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
hipLaunchKernelGGL(( copyKernel), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
}
// stop the timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
roctxRangeStop(copyKernelBenchmark);
float time = 0.0f;
hipEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeY * sizeX * sizeof(float), hipMemcpyDeviceToHost);
if (postprocess(a_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Naive Transpose***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// TODO: COMPLETE THIS
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(32, 32, 1);
dims.dimGrid = dim3(divup(sizeX, dims.dimBlock.x),
divup(sizeY, dims.dimBlock.y),
1);
nvtxRangeId_t naiveTransposeBenchmark = roctxRangeStart("Naive Transpose Benchmark");
hipEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
hipLaunchKernelGGL(( matrixTransposeNaive), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
}
// stop the timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
roctxRangeStop(naiveTransposeBenchmark);
float time = 0.0f;
hipEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeY * sizeX * sizeof(float), hipMemcpyDeviceToHost);
if (postprocess(b_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Shared Memory Transpose***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// TODO: COMPLETE THIS
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(32, 32, 1);
dims.dimGrid = dim3(divup(sizeX, dims.dimBlock.x),
divup(sizeY, dims.dimBlock.y),
1);
nvtxRangeId_t sharedMemoryTransposeBenchmark = roctxRangeStart("Shared Memory Transpose Benchmark");
hipEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
hipLaunchKernelGGL(( matrixTransposeShared<32, 32>), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
}
// stop the timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
roctxRangeStop(sharedMemoryTransposeBenchmark);
float time = 0.0f;
hipEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeY * sizeX * sizeof(float), hipMemcpyDeviceToHost);
if (postprocess(b_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Shared Memory Transpose without Bank Conflicts***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(32, 32, 1);
dims.dimGrid = dim3(divup(sizeX, dims.dimBlock.x),
divup(sizeY, dims.dimBlock.y),
1);
nvtxRangeId_t sharedMemoryTransposeWBCBenchmark = roctxRangeStart("Shared Memory Transpose Without Bank Conflict Benchmark");
hipEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
hipLaunchKernelGGL(( matrixTransposeSharedwBC<32, 32>), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
}
// stop the timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
roctxRangeStop(sharedMemoryTransposeWBCBenchmark);
float time = 0.0f;
hipEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeY * sizeX * sizeof(float), hipMemcpyDeviceToHost);
if (postprocess(b_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Unrolled Loop Transpose***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
const int tile = 32;
const int side = 8;
DIMS dims;
dims.dimBlock = dim3(tile, side, 1);
dims.dimGrid = dim3(divup(sizeX, tile),
divup(sizeY, tile),
1);
nvtxRangeId_t sharedMemoryTransposeWBCBenchmark = roctxRangeStart("Shared Memory Transpose Without Bank Conflict Benchmark");
hipEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
hipLaunchKernelGGL(( matrixTransposeUnrolled<tile, side>), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
}
// stop the timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
roctxRangeStop(sharedMemoryTransposeWBCBenchmark);
float time = 0.0f;
hipEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeY * sizeX * sizeof(float), hipMemcpyDeviceToHost);
if (postprocess(b_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
// free device memory
hipFree(d_a);
hipFree(d_b);
// free host memory
delete[] a;
delete[] b;
// CUDA Reset for NVProf
CUDA(hipDeviceReset());
// successful program termination
return 0;
}
| 4963f0e750ad3c1e7c821a6a897bfa923f760987.cu | #include "memBenchmark.h"
#include "termcolor.hpp"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt
#include <nvToolsExt.h>
#include <cmath>
#include <cstdio>
#include <iomanip>
#include <iostream>
#include <string>
// Initialize sizes
const int sizeX = 1234;
const int sizeY = 3153;
struct DIMS
{
dim3 dimBlock;
dim3 dimGrid;
};
#define CUDA(call) do { \
cudaError_t e = (call); \
if (e == cudaSuccess) break; \
fprintf(stderr, __FILE__":%d: %s (%d)\n", \
__LINE__, cudaGetErrorString(e), e); \
exit(1); \
} while (0)
// This function divides up the n by div - similar to ceil
// Example, divup(10, 3) = 4
inline unsigned divup(unsigned n, unsigned div)
{
return (n + div - 1) / div;
}
void printResults(double timeInMilliseconds, int iterations)
{
// print out the time required for the kernel to finish the transpose operation
double bandwidth = (iterations * 2 * 1000 * (double)(sizeX * sizeY * sizeof(float)))
/ (1000 * 1000 * 1000 * timeInMilliseconds);
std::cout << "Elapsed Time for " << iterations << " runs = " << round(timeInMilliseconds) << "ms" << std::endl;
std::cout << termcolor::bold << termcolor::red << termcolor::on_white
<< "Bandwidth (GB/s) = " << std::setprecision(4) << bandwidth
<< termcolor::reset << std::endl;
}
// Check errors
bool postprocess(const float *ref, const float *res, int n)
{
bool passed = true;
for (int i = 0; i < n; i++)
{
if (res[i] != ref[i])
{
std::cout << "ID: " << i << " \t Res: " << res[i] << " \t Ref: " << ref[i] << std::endl;
std::cout << termcolor::blink << termcolor::white << termcolor::on_red << "*** FAILED ***" << termcolor::reset << std::endl;
passed = false;
break;
}
}
if (passed)
{
std::cout << termcolor::green << "Post process check passed!!" << termcolor::reset << std::endl;
}
return passed;
}
void preprocess(float *res, float *dev_res, int n)
{
std::fill(res, res + n, -1);
cudaMemset(dev_res, -1, n * sizeof(float));
}
// TODO: COMPLETE THIS
__global__ void copyKernel(const float* const a, float* const b)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Compute correctly - Global X index
int j = blockIdx.y * blockDim.y + threadIdx.y; // Compute correctly - Global Y index
// Check if i or j are out of bounds. If they are, return.
if (i >= sizeX || j >= sizeY)
{
return;
}
int index = j * sizeX + i; // Compute 1D index from i, j
// Copy data from A to B
b[index] = a[index];
}
// TODO: COMPLETE THIS
__global__ void matrixTransposeNaive(const float* const a, float* const b)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Compute correctly - Global X index
int j = blockIdx.y * blockDim.y + threadIdx.y; // Compute correctly - Global Y index
// Check if i or j are out of bounds. If they are, return.
if (i >= sizeX || j >= sizeY)
{
return;
}
int index_in = j * sizeX + i; // Compute input index (i,j) from matrix A
int index_out = i * sizeY + j; // Compute output index (j,i) in matrix B = transpose(A)
// Copy data from A to B using transpose indices
b[index_out] = a[index_in];
}
// TODO: COMPLETE THIS
template<int BLOCK_SIZE_X, int BLOCK_SIZE_Y>
__global__ void matrixTransposeShared(const float* const a, float* const b)
{
// Allocate appropriate shared memory - use mat as variable name
// Example: <shared specifier> type mat[size][size]; - replace size with the correct values
__shared__ float mat[BLOCK_SIZE_Y][BLOCK_SIZE_X];
// Compute input and output index
int bx = blockIdx.x * BLOCK_SIZE_X; // Compute block offset - this is number of global threads in X before this block
int by = blockIdx.y * BLOCK_SIZE_Y; // Compute block offset - this is number of global threads in Y before this block
int i = bx + threadIdx.x; // Global input x index - Same as previous kernels
int j = by + threadIdx.y; // Global input y index - Same as previous kernels
// We are transposing the blocks here. See how ti uses by and tj uses bx
// We transpose blocks using indices, and transpose with block sub-matrix using the shared memory
int ti = by + threadIdx.x; // Global output x index - remember the transpose
int tj = bx + threadIdx.y; // Global output y index - remember the transpose
// Copy data from input to shared memory
// Check for bounds
if (i < sizeX && j < sizeY)
{
mat[threadIdx.y][threadIdx.x] = a[j * sizeX + i];
}
__syncthreads();
// Copy data from shared memory to global memory
// Check for bounds
if (ti < sizeY && tj < sizeX)
{
b[tj * sizeY + ti] = mat[threadIdx.x][threadIdx.y]; // Switch threadIdx.x and threadIdx.y from input read
}
}
template<int BLOCK_SIZE_X, int BLOCK_SIZE_Y>
__global__ void matrixTransposeSharedwBC(const float* const a, float* const b)
{
// Allocate appropriate shared memory
__shared__ float mat[BLOCK_SIZE_Y][BLOCK_SIZE_X + 1];
// Compute input and output index
int bx = blockIdx.x * BLOCK_SIZE_X; // Compute block offset - this is number of global threads in X before this block
int by = blockIdx.y * BLOCK_SIZE_Y; // Compute block offset - this is number of global threads in Y before this block
int i = bx + threadIdx.x; // Global input x index - Same as previous kernels
int j = by + threadIdx.y; // Global input y index - Same as previous kernels
// We are transposing the blocks here. See how ti uses by and tj uses bx
// We transpose blocks using indices, and transpose with block sub-matrix using the shared memory
int ti = by + threadIdx.x; // Global output x index - remember the transpose
int tj = bx + threadIdx.y; // Global output y index - remember the transpose
// Copy data from input to shared memory
// Check for bounds
if (i < sizeX && j < sizeY)
{
mat[threadIdx.y][threadIdx.x] = a[j * sizeX + i];
}
__syncthreads();
// Copy data from shared memory to global memory
// Check for bounds
if (ti < sizeY && tj < sizeX)
{
b[tj * sizeY + ti] = mat[threadIdx.x][threadIdx.y]; // Switch threadIdx.x and threadIdx.y from input read
}
}
template<int TILE, int SIDE>
__global__ void matrixTransposeUnrolled(const float* a, float* b)
{
// Allocate appropriate shared memory
__shared__ float mat[TILE][TILE + 1];
// Compute input and output index
int x = blockIdx.x * TILE + threadIdx.x;
int y = blockIdx.y * TILE + threadIdx.y;
// copy data from input to shared memory
#pragma unroll
for (int k = 0; k < TILE; k += SIDE) {
if (x < sizeX && y + k < sizeY) {
mat[threadIdx.y + k][threadIdx.x] = a[((y + k) * sizeX) + x];
}
}
__syncthreads();
// compute output index
x = blockIdx.y * TILE + threadIdx.x;
y = blockIdx.x * TILE + threadIdx.y;
#pragma unroll
for (int k = 0; k < TILE; k += SIDE) {
if (x < sizeY && y + k < sizeX) {
b[(y + k) * sizeY + x] = mat[threadIdx.x][threadIdx.y + k];
}
}
}
int main(int argc, char *argv[])
{
//Run Memcpy benchmarks
nvtxRangeId_t cudaBenchmark = nvtxRangeStart("CUDA Memcpy Benchmark");
memBenchmark();
nvtxRangeEnd(cudaBenchmark);
// Host arrays.
float* a = new float[sizeX * sizeY];
float* b = new float[sizeX * sizeY];
float* a_gold = new float[sizeX * sizeY];
float* b_gold = new float[sizeX * sizeY];
// Device arrays
float *d_a, *d_b;
// Allocate memory on the device
CUDA(cudaMalloc((void **)&d_a, sizeX * sizeY * sizeof(float)));
CUDA(cudaMalloc((void **)&d_b, sizeX * sizeY * sizeof(float)));
// Fill matrix A
for (int i = 0; i < sizeX * sizeY; i++)
a[i] = (float)i;
// Copy array contents of A from the host (CPU) to the device (GPU)
cudaMemcpy(d_a, a, sizeX * sizeY * sizeof(float), cudaMemcpyHostToDevice);
// Compute "gold" reference standard
for (int jj = 0; jj < sizeY; jj++)
{
for (int ii = 0; ii < sizeX; ii++)
{
a_gold[jj * sizeX + ii] = a[jj * sizeX + ii];
b_gold[ii * sizeY + jj] = a[jj * sizeX + ii];
}
}
std::cout << std::endl;
cudaDeviceSynchronize();
// Create CUDA events for timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
#define CPU_TRANSPOSE
#ifdef CPU_TRANSPOSE
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***CPU Transpose***" << std::endl;
{
// start the timer
nvtxRangeId_t cpuBenchmark = nvtxRangeStart("CPU Transpose Benchmark");
Timer hTimer;
int iterations = 10;
for (int k = 0; k < iterations; k++)
{
for (int jj = 0; jj < sizeY; jj++)
for (int ii = 0; ii < sizeX; ii++)
b[ii * sizeX + jj] = a[jj * sizeX + ii];
}
double time = hTimer.elapsed() * 1000; //ms
nvtxRangeEnd(cpuBenchmark);
printResults(time, iterations);
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
#endif
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Device To Device Copy***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// TODO: COMPLETE THIS
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(32, 32, 1);
dims.dimGrid = dim3(divup(sizeX, dims.dimBlock.x),
divup(sizeY, dims.dimBlock.y),
1);
// start the timer
nvtxRangeId_t copyKernelBenchmark = nvtxRangeStart("Device to Device Copy");
cudaEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
copyKernel<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
}
// stop the timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
nvtxRangeEnd(copyKernelBenchmark);
float time = 0.0f;
cudaEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeY * sizeX * sizeof(float), cudaMemcpyDeviceToHost);
if (postprocess(a_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Naive Transpose***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// TODO: COMPLETE THIS
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(32, 32, 1);
dims.dimGrid = dim3(divup(sizeX, dims.dimBlock.x),
divup(sizeY, dims.dimBlock.y),
1);
nvtxRangeId_t naiveTransposeBenchmark = nvtxRangeStart("Naive Transpose Benchmark");
cudaEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
matrixTransposeNaive<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
}
// stop the timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
nvtxRangeEnd(naiveTransposeBenchmark);
float time = 0.0f;
cudaEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeY * sizeX * sizeof(float), cudaMemcpyDeviceToHost);
if (postprocess(b_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Shared Memory Transpose***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// TODO: COMPLETE THIS
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(32, 32, 1);
dims.dimGrid = dim3(divup(sizeX, dims.dimBlock.x),
divup(sizeY, dims.dimBlock.y),
1);
nvtxRangeId_t sharedMemoryTransposeBenchmark = nvtxRangeStart("Shared Memory Transpose Benchmark");
cudaEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
matrixTransposeShared<32, 32><<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
}
// stop the timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
nvtxRangeEnd(sharedMemoryTransposeBenchmark);
float time = 0.0f;
cudaEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeY * sizeX * sizeof(float), cudaMemcpyDeviceToHost);
if (postprocess(b_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Shared Memory Transpose without Bank Conflicts***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(32, 32, 1);
dims.dimGrid = dim3(divup(sizeX, dims.dimBlock.x),
divup(sizeY, dims.dimBlock.y),
1);
nvtxRangeId_t sharedMemoryTransposeWBCBenchmark = nvtxRangeStart("Shared Memory Transpose Without Bank Conflict Benchmark");
cudaEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
matrixTransposeSharedwBC<32, 32><<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
}
// stop the timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
nvtxRangeEnd(sharedMemoryTransposeWBCBenchmark);
float time = 0.0f;
cudaEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeY * sizeX * sizeof(float), cudaMemcpyDeviceToHost);
if (postprocess(b_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
std::cout << "****************************************************" << std::endl;
std::cout << "***Unrolled Loop Transpose***" << std::endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
const int tile = 32;
const int side = 8;
DIMS dims;
dims.dimBlock = dim3(tile, side, 1);
dims.dimGrid = dim3(divup(sizeX, tile),
divup(sizeY, tile),
1);
nvtxRangeId_t sharedMemoryTransposeWBCBenchmark = nvtxRangeStart("Shared Memory Transpose Without Bank Conflict Benchmark");
cudaEventRecord(start, 0);
int iterations = 10;
for (int i = 0; i < iterations; i++)
{
// Launch the GPU kernel
matrixTransposeUnrolled<tile, side><<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
}
// stop the timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
nvtxRangeEnd(sharedMemoryTransposeWBCBenchmark);
float time = 0.0f;
cudaEventElapsedTime(&time, start, stop);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeY * sizeX * sizeof(float), cudaMemcpyDeviceToHost);
if (postprocess(b_gold, b, sizeX * sizeY))
{
printResults(time, iterations);
}
}
std::cout << "****************************************************" << std::endl << std::endl;
////////////////////////////////////////////////////////////
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
delete[] a;
delete[] b;
// CUDA Reset for NVProf
CUDA(cudaDeviceReset());
// successful program termination
return 0;
}
|
865ac5b681addcb527a87a3dcd34102afd386851.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "maths.h"
#include "render.h"
#include "util.h"
#include "disney.h"
#include "bvh.h"
#include <map>
struct GPUScene
{
Primitive* primitives;
int numPrimitives;
Primitive* lights;
int numLights;
Sky sky;
BVH bvh;
};
#define kBsdfSamples 1.0f
#define kProbeSamples 1.0f
#define kRayEpsilon 0.0001f
#define LAUNCH_BOUNDS __launch_bounds__(256, 4)
__device__ inline int getGlobalIndex()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
// create a texture object from memory and store it in a 64-bit pointer
void CreateIntTexture(int** deviceBuffer, const int* hostBuffer, int sizeInBytes)
{
int* buffer;
hipMalloc(&buffer, sizeInBytes);
hipMemcpy(buffer, hostBuffer, sizeInBytes, hipMemcpyHostToDevice);
#if USE_TEXTURES
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = (void*)buffer;
resDesc.res.linear.desc.f = hipChannelFormatKindSigned;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = sizeInBytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipTextureObject_t tex;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// cast to pointer
*deviceBuffer = (int*)tex;
#else
*deviceBuffer = buffer;
#endif
}
// create a texture object from memory and store it in a 64-bit pointer
void CreateFloatTexture(float** deviceBuffer, const float* hostBuffer, int sizeInBytes)
{
float* buffer;
hipMalloc(&buffer, sizeInBytes);
hipMemcpy(buffer, hostBuffer, sizeInBytes, hipMemcpyHostToDevice);
#if USE_TEXTURES
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = (void*)buffer;
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = sizeInBytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipTextureObject_t tex;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// cast to pointer
*deviceBuffer = (float*)tex;
#else
*deviceBuffer = buffer;
#endif
}
// create a texture object from memory and store it in a 64-bit pointer
void CreateVec4Texture(Vec4** deviceBuffer, const Vec4* hostBuffer, int sizeInBytes)
{
Vec4* buffer;
hipMalloc(&buffer, sizeInBytes);
hipMemcpy(buffer, hostBuffer, sizeInBytes, hipMemcpyHostToDevice);
#if USE_TEXTURES
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = (void*)buffer;
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.desc.y = 32; // bits per channel
resDesc.res.linear.desc.z = 32; // bits per channel
resDesc.res.linear.desc.w = 32; // bits per channel
resDesc.res.linear.sizeInBytes = sizeInBytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipTextureObject_t tex;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// cast to pointer
*deviceBuffer = (Vec4*)tex;
#else
*deviceBuffer = buffer;
#endif
}
MeshGeometry CreateGPUMesh(const MeshGeometry& hostMesh)
{
const int numVertices = hostMesh.numVertices;
const int numIndices = hostMesh.numIndices;
const int numNodes = hostMesh.numNodes;
MeshGeometry gpuMesh;
#if USE_TEXTURES
// expand positions out to vec4
std::vector<Vec4> positions;
std::vector<Vec4> normals;
for (int i=0; i < numVertices; ++i)
{
positions.push_back(Vec4(hostMesh.positions[i], 1.0f));
normals.push_back(Vec4(hostMesh.normals[i], 0.0f));
}
CreateVec4Texture((Vec4**)&gpuMesh.positions, (Vec4*)&positions[0], sizeof(Vec4)*numVertices);
CreateVec4Texture((Vec4**)&gpuMesh.normals, (Vec4*)&normals[0], sizeof(Vec4)*numVertices);
#else
CreateFloatTexture((float**)&gpuMesh.positions, (float*)&hostMesh.positions[0], sizeof(Vec3)*numVertices);
CreateFloatTexture((float**)&gpuMesh.normals, (float*)&hostMesh.normals[0], sizeof(Vec3)*numVertices);
#endif
CreateIntTexture((int**)&gpuMesh.indices, (int*)&hostMesh.indices[0], sizeof(int)*numIndices);
/*
hipMalloc((Vec3**)&gpuMesh.positions, sizeof(Vec3)*numVertices);
hipMemcpy((Vec3*)gpuMesh.positions, &hostMesh.positions[0], sizeof(Vec3)*numVertices, hipMemcpyHostToDevice);
hipMalloc((Vec3**)&gpuMesh.normals, sizeof(Vec3)*numVertices);
hipMemcpy((Vec3*)gpuMesh.normals, &hostMesh.normals[0], sizeof(Vec3)*numVertices, hipMemcpyHostToDevice);
hipMalloc((int**)&gpuMesh.indices, sizeof(int)*numIndices);
hipMemcpy((int*)gpuMesh.indices, &hostMesh.indices[0], sizeof(int)*numIndices, hipMemcpyHostToDevice);
*/
//hipMalloc((BVHNode**)&gpuMesh.nodes, sizeof(BVHNode)*numNodes);
//hipMemcpy((BVHNode*)gpuMesh.nodes, &hostMesh.nodes[0], sizeof(BVHNode)*numNodes, hipMemcpyHostToDevice);
CreateVec4Texture((Vec4**)&gpuMesh.nodes, (Vec4*)&hostMesh.nodes[0], sizeof(BVHNode)*numNodes);
hipMalloc((float**)&gpuMesh.cdf, sizeof(float)*numIndices/3);
hipMemcpy((float*)gpuMesh.cdf, &hostMesh.cdf[0], sizeof(float)*numIndices/3, hipMemcpyHostToDevice);
gpuMesh.numIndices = numIndices;
gpuMesh.numVertices = numVertices;
gpuMesh.numNodes = numNodes;
gpuMesh.area = hostMesh.area;
return gpuMesh;
}
void DestroyGPUMesh(const MeshGeometry& m)
{
}
Texture CreateGPUTexture(const Texture& tex)
{
const int numTexels = tex.width*tex.height*tex.depth;
Texture gpuTex = tex;
hipMalloc((void**)&gpuTex.data, sizeof(float)*numTexels);
hipMemcpy(gpuTex.data, tex.data, sizeof(float)*numTexels, hipMemcpyHostToDevice);
return gpuTex;
}
Sky CreateGPUSky(const Sky& sky)
{
Sky gpuSky = sky;
// copy probe
if (sky.probe.valid)
{
const int numPixels = sky.probe.width*sky.probe.height;
// copy pixel data
CreateVec4Texture((Vec4**)&gpuSky.probe.data, sky.probe.data, numPixels*sizeof(float)*4);
// copy cdf tables
CreateFloatTexture((float**)&gpuSky.probe.cdfValuesX, sky.probe.cdfValuesX, numPixels*sizeof(float));
CreateFloatTexture((float**)&gpuSky.probe.pdfValuesX, sky.probe.pdfValuesX, numPixels*sizeof(float));
CreateFloatTexture((float**)&gpuSky.probe.cdfValuesY, sky.probe.cdfValuesY, sky.probe.height*sizeof(float));
CreateFloatTexture((float**)&gpuSky.probe.pdfValuesY, sky.probe.pdfValuesY, sky.probe.height*sizeof(float));
}
return gpuSky;
}
void DestroyGPUSky(const Sky& gpuSky)
{
if (gpuSky.probe.valid)
{
// todo
}
}
#if 1
inline __device__ bool Trace(const GPUScene& scene, const Vec3& rayOrigin, const Vec3& rayDir, float rayTime, float& outT, Vec3& outNormal, const Primitive** RESTRICT outPrimitive)
{
int stack[64];
stack[0] = 0;
unsigned int count = 1;
Vec3 dir, rcpDir;
Vec3 origin;
rcpDir.x = 1.0f/rayDir.x;
rcpDir.y = 1.0f/rayDir.y;
rcpDir.z = 1.0f/rayDir.z;
origin = rayOrigin;
dir = rayDir;
const BVHNode* RESTRICT root = scene.bvh.nodes;
MeshGeometry mesh;
int primitiveIndex = -1;
float closestT = FLT_MAX;
//float closestU;
float closestV;
float closestW;
Vec3 closestNormal;
int closestPrimitive = -1;
int closestTri;
while(count)
{
const int nodeIndex = stack[--count];
if (nodeIndex < 0)
{
// reset to scene bvh dir and address
rcpDir.x = 1.0f/rayDir.x;
rcpDir.y = 1.0f/rayDir.y;
rcpDir.z = 1.0f/rayDir.z;
origin = rayOrigin;
dir = rayDir;
root = scene.bvh.nodes;
primitiveIndex = -1;
continue;
}
BVHNode node = fetchNode(root, nodeIndex);
int leftIndex = node.leftIndex;
int rightIndex = node.rightIndex;
if (node.leaf)
{
if (primitiveIndex < 0)
{
const Primitive& p = scene.primitives[leftIndex];
Transform transform = InterpolateTransform(p.startTransform, p.endTransform, rayTime);
switch (p.type)
{
case eSphere:
{
float minT, maxT;
Vec3 n;
bool hit = IntersectRaySphere(transform.p, p.sphere.radius*transform.s, origin, dir, minT, maxT, &n);
if (hit && minT < closestT)
{
closestT = minT;
closestNormal = n;
closestPrimitive = leftIndex;
}
break;
}
case ePlane:
{
float t;
bool hit = IntersectRayPlane(origin, dir, (const Vec4&)p.plane, t);
if (hit && t < closestT)
{
closestT = t;
closestNormal = (const Vec3&)p.plane;
closestPrimitive = leftIndex;
}
break;
}
case eMesh:
{
// push a back-tracking marker in the stack
stack[count++] = -1;
// push root of the mesh bvh
stack[count++] = 0;
// transform ray to primitive local space
origin = InverseTransformPoint(transform, rayOrigin);
dir = InverseTransformVector(transform, rayDir);
rcpDir.x = 1.0f/dir.x;
rcpDir.y = 1.0f/dir.y;
rcpDir.z = 1.0f/dir.z;
// set bvh and mesh sources
root = p.mesh.nodes;
mesh = p.mesh;
primitiveIndex = leftIndex;
break;
}
};
}
else
{
// mesh mode
int i0 = fetchInt(mesh.indices, leftIndex*3+0);
int i1 = fetchInt(mesh.indices, leftIndex*3+1);
int i2 = fetchInt(mesh.indices, leftIndex*3+2);
const Vec3 a = fetchVec3(mesh.positions, i0);
const Vec3 b = fetchVec3(mesh.positions, i1);
const Vec3 c = fetchVec3(mesh.positions, i2);
float t, u, v, w;
float sign;
Vec3 n;
//if (IntersectRayTri(rayOrigin, rayDir, a, b, c, t, u, v, w, &n))
if (IntersectRayTriTwoSided(origin, dir, a, b, c, t, u, v, w, sign, &n))
{
if (t > 0.0f && t < closestT)
{
closestT = t;
//closestU = u;
closestV = v;
closestW = w;
closestTri = leftIndex;
closestNormal = n*sign;
closestPrimitive = primitiveIndex;
}
}
}
}
else
{
// check children
BVHNode left = fetchNode(root, leftIndex);
BVHNode right = fetchNode(root, rightIndex);
float tLeft;
bool hitLeft = IntersectRayAABBFast(origin, rcpDir, left.bounds.lower, left.bounds.upper, tLeft);// && tLeft < closestT;
float tRight;
bool hitRight = IntersectRayAABBFast(origin, rcpDir, right.bounds.lower, right.bounds.upper, tRight);// && tRight < closestT;
// traverse closest first
if (hitLeft && hitRight && (tLeft < tRight))
{
//Swap(leftIndex, rightIndex);
}
if (hitLeft)
stack[count++] = leftIndex;
if (hitRight)
stack[count++] = rightIndex;
}
}
if (closestPrimitive >= 0)
{
const Primitive& p = scene.primitives[closestPrimitive];
if (p.type == eMesh)
{
Transform transform = InterpolateTransform(p.startTransform, p.endTransform, rayTime);
// interpolate vertex normals
int i0 = fetchInt(p.mesh.indices, closestTri*3+0);
int i1 = fetchInt(p.mesh.indices, closestTri*3+1);
int i2 = fetchInt(p.mesh.indices, closestTri*3+2);
const Vec3 n1 = fetchVec3(p.mesh.normals, i0);
const Vec3 n2 = fetchVec3(p.mesh.normals, i1);
const Vec3 n3 = fetchVec3(p.mesh.normals, i2);
Vec3 smoothNormal = (1.0f-closestV-closestW)*n1 + closestV*n2 + closestW*n3;
// ensure smooth normal lies on the same side of the geometric normal
if (Dot(smoothNormal, closestNormal) < 0.0f)
smoothNormal *= -1.0f;
closestNormal = SafeNormalize(TransformVector(transform, smoothNormal), closestNormal);
}
outT = closestT;
outNormal = FaceForward(closestNormal, -rayDir);
if (outPrimitive)
*outPrimitive = &p;
return true;
}
else
{
// no hit
return false;
}
}
#else
// trace a ray against the scene returning the closest intersection
inline __device__ bool Trace(const GPUScene& scene, const Vec3& rayOrigin, const Vec3& rayDir, float rayTime, float& outT, Vec3& outNormal, const Primitive** outPrimitive)
{
#if 0
struct Callback
{
float minT;
Vec3 closestNormal;
const Primitive* closestPrimitive;
const Ray& ray;
const GPUScene& scene;
CUDA_CALLABLE inline Callback(const GPUScene& s, const Ray& r) : minT(REAL_MAX), closestPrimitive(NULL), ray(r), scene(s)
{
}
CUDA_CALLABLE inline void operator()(int index)
{
float t;
Vec3 n, ns;
const Primitive& primitive = scene.primitives[index];
if (PrimitiveIntersect(primitive, ray, t, &n))
{
if (t < minT && t > 0.0f)
{
minT = t;
closestPrimitive = &primitive;
closestNormal = n;
}
}
}
};
Callback callback(scene, ray);
QueryBVH(callback, scene.bvh.nodes, ray.origin, ray.dir);
outT = callback.minT;
outNormal = FaceForward(callback.closestNormal, -ray.dir);
if (outPrimitive)
*outPrimitive = callback.closestPrimitive;
return callback.closestPrimitive != NULL;
#else
float minT = REAL_MAX;
const Primitive* closestPrimitive = NULL;
Vec3 closestNormal(0.0f);
for (int i=0; i < scene.numPrimitives; ++i)
{
const Primitive& primitive = scene.primitives[i];
float t;
Vec3 n;
if (PrimitiveIntersect(primitive, Ray(rayOrigin, rayDir, rayTime), t, &n))
{
if (t < minT && t > 0.0f)
{
minT = t;
closestPrimitive = &primitive;
closestNormal = n;
}
}
}
outT = minT;
outNormal = FaceForward(closestNormal, -rayDir);
if (outPrimitive)
*outPrimitive = closestPrimitive;
return closestPrimitive != NULL;
#endif
}
#endif
__device__ inline float SampleTexture(const Texture& map, int i, int j, int k)
{
int x = int(Abs(i))%map.width;
int y = int(Abs(j))%map.height;
int z = int(Abs(k))%map.depth;
return map.data[z*map.width*map.height + y*map.width + x];
}
__device__ inline float LinearInterp(const Texture& map, const Vec3& pos)
{
int i = floorf(pos.x*map.width);
int j = floorf(pos.y*map.height);
int k = floorf(pos.z*map.depth);
// trilinear interpolation
float tx = pos.x*map.width-i;
float ty = pos.y*map.height-j;
float tz = pos.z*map.depth-k;
float a = Lerp(SampleTexture(map, i, j, k), SampleTexture(map, i, j, k+1), tz);
float b = Lerp(SampleTexture(map, i+1, j, k), SampleTexture(map, i+1, j, k+1), tz);
float c = Lerp(SampleTexture(map, i, j+1, k), SampleTexture(map, i, j+1, k+1), tz);
float d = Lerp(SampleTexture(map, i+1, j+1, k), SampleTexture(map, i+1, j+1, k+1), tz);
float e = Lerp(a, b, tx);
float f = Lerp(c, d, tx);
float g = Lerp(e, f, ty);
return g;
}
__device__ inline Vec3 EvaluateBumpNormal(const Vec3& surfaceNormal, const Vec3& surfacePos, const Texture& bumpMap, const Vec3& bumpTile, float bumpStrength, Random& rand)
{
Vec3 u, v;
BasisFromVector(surfaceNormal, &u, &v);
float eps = 0.01f;
Vec3 dpdu = u + bumpStrength*surfaceNormal*(LinearInterp(bumpMap, bumpTile*(surfacePos)+u*eps) - LinearInterp(bumpMap, bumpTile*surfacePos))/eps;
Vec3 dpdv = v + bumpStrength*surfaceNormal*(LinearInterp(bumpMap, bumpTile*(surfacePos)+v*eps) - LinearInterp(bumpMap, bumpTile*surfacePos))/eps;
return SafeNormalize(Cross(dpdu, dpdv), surfaceNormal);
}
__device__ inline Vec3 SampleLights(const GPUScene& scene, const Primitive& surfacePrimitive, float etaI, float etaO, const Vec3& surfacePos, const Vec3& surfaceNormal, const Vec3& shadingNormal, const Vec3& wo, float time, Random& rand)
{
Vec3 sum(0.0f);
if (scene.sky.probe.valid)
{
for (int i=0; i < kProbeSamples; ++i)
{
Vec3 skyColor;
float skyPdf;
Vec3 wi;
ProbeSample(scene.sky.probe, wi, skyColor, skyPdf, rand);
//wi = UniformSampleSphere(rand);
//skyColor = ProbeEval(scene.sky.probe, ProbeDirToUV(wi));
//skyPdf = 0.5f*kInv2Pi;
//if (Dot(wi, surfaceNormal) <= 0.0f)
// continue;
// check if occluded
float t;
Vec3 n;
if (Trace(scene, surfacePos + FaceForward(surfaceNormal, wi)*kRayEpsilon, wi, time, t, n, NULL) == false)
{
float bsdfPdf = BSDFPdf(surfacePrimitive.material, etaI, etaO, surfacePos, surfaceNormal, wo, wi);
Vec3 f = BSDFEval(surfacePrimitive.material, etaI, etaO, surfacePos, surfaceNormal, wo, wi);
if (bsdfPdf > 0.0f)
{
int N = kProbeSamples+kBsdfSamples;
float cbsdf = kBsdfSamples/N;
float csky = float(kProbeSamples)/N;
float weight = csky*skyPdf/(cbsdf*bsdfPdf + csky*skyPdf);
Validate(weight);
if (weight > 0.0f)
sum += weight*skyColor*f*Abs(Dot(wi, surfaceNormal))/skyPdf;
}
}
}
if (kProbeSamples > 0)
sum /= float(kProbeSamples);
}
for (int i=0; i < scene.numLights; ++i)
{
// assume all lights are area lights for now
const Primitive& lightPrimitive = scene.lights[i];
Vec3 L(0.0f);
int numSamples = lightPrimitive.lightSamples;
if (numSamples == 0)
continue;
for (int s=0; s < numSamples; ++s)
{
// sample light source
Vec3 lightPos;
Vec3 lightNormal;
PrimitiveSample(lightPrimitive, time, lightPos, lightNormal, rand);
Vec3 wi = lightPos-surfacePos;
float dSq = LengthSq(wi);
wi /= sqrtf(dSq);
// light is behind surface
//if (Dot(wi, surfaceNormal) <= 0.0f)
//continue;
// surface is behind light
if (Dot(wi, lightNormal) >= 0.0f)
continue;
// check visibility
float t;
Vec3 n;
if (Trace(scene, surfacePos + FaceForward(surfaceNormal, wi)*kRayEpsilon, wi, time, t, n, NULL))
{
float tSq = t*t;
// if our next hit was further than distance to light then accept
// sample, this works for portal sampling where you have a large light
// that you sample through a small window
const float kTolerance = 1.e-2f;
if (fabsf(t - sqrtf(dSq)) <= kTolerance)
{
const float nl = Abs(Dot(lightNormal, wi));
// light pdf with respect to area and convert to pdf with respect to solid angle
float lightArea = PrimitiveArea(lightPrimitive);
float lightPdf = ((1.0f/lightArea)*tSq)/nl;
// bsdf pdf for light's direction
float bsdfPdf = BSDFPdf(surfacePrimitive.material, etaI, etaO, surfacePos, shadingNormal, wo, wi);
Vec3 f = BSDFEval(surfacePrimitive.material, etaI, etaO, surfacePos, shadingNormal, wo, wi);
// this branch is only necessary to exclude specular paths from light sampling (always have zero brdf)
// todo: make BSDFEval alwasy return zero for pure specular paths and roll specular eval into BSDFSample()
if (bsdfPdf > 0.0f)
{
// calculate relative weighting of the light and bsdf sampling
int N = lightPrimitive.lightSamples+kBsdfSamples;
float cbsdf = kBsdfSamples/N;
float clight = float(lightPrimitive.lightSamples)/N;
float weight = clight*lightPdf/(cbsdf*bsdfPdf + clight*lightPdf);
L += weight*f*lightPrimitive.material.emission*(Abs(Dot(wi, shadingNormal))/Max(1.e-3f, lightPdf));
}
}
}
}
sum += L * (1.0f/numSamples);
}
return sum;
}
struct Tile
{
int x;
int y;
int width;
int height;
};
enum PathMode
{
ePathGenerate,
ePathAdvance,
ePathProbeSample,
ePathLightSample,
ePathBsdfSample,
ePathTerminate,
ePathDisabled,
};
struct PathState
{
Vec3* __restrict__ rayOrigin;
Vec3* __restrict__ rayDir;
float* __restrict__ rayTime;
Vec3* __restrict__ pos;
Vec3* __restrict__ normal;
int* __restrict__ depth;
Vec3* __restrict__ pathThroughput;
Vec3* __restrict__ absorption;
const Primitive** __restrict__ primitive;
Vec3* __restrict__ totalRadiance;
float* __restrict__ etaI;
float* __restrict__ etaO;
PathMode* __restrict__ mode;
// pdf from last brdf sampling
float* __restrict__ bsdfPdf;
BSDFType* __restrict__ bsdfType;
// sample coordinate
float* __restrict__ rasterX;
float* __restrict__ rasterY;
Random* __restrict__ rand;
};
template <typename T>
void Alloc(T** ptr, int num)
{
hipMalloc(ptr, sizeof(T)*num);
hipMemset(*ptr, 0, sizeof(T)*num);
}
PathState AllocatePaths(int num)
{
PathState state;
Alloc(&state.rayOrigin, num);
Alloc(&state.rayDir, num);
Alloc(&state.rayTime, num);
Alloc(&state.pos, num);
Alloc(&state.normal, num);
Alloc(&state.depth, num);
Alloc(&state.pathThroughput, num);
Alloc(&state.absorption, num);
Alloc(&state.primitive, num);
Alloc(&state.totalRadiance, num);
Alloc(&state.etaI, num);
Alloc(&state.etaO, num);
Alloc(&state.mode, num);
Alloc(&state.bsdfPdf, num);
Alloc(&state.bsdfType, num);
Alloc(&state.rasterX, num);
Alloc(&state.rasterY, num);
Alloc(&state.rand, num);
return state;
}
void FreePaths(PathState state)
{
// todo:
}
LAUNCH_BOUNDS
__global__ void TerminatePaths(Color* output, Options options, PathState paths, int numPaths)
{
const int i = getGlobalIndex();
{
if (paths.mode[i] != ePathDisabled)
{
float rasterX = paths.rasterX[i];
float rasterY = paths.rasterY[i];
Vec3 sample = paths.totalRadiance[i];
// sample = paths[i].normal*0.5f + 0.5f;
int width = options.width;
int height = options.height;
Filter filter = options.filter;
switch (filter.type)
{
case eFilterBox:
{
int x = Clamp(int(rasterX), 0, width-1);
int y = Clamp(int(rasterY), 0, height-1);
output[y*width+x] += Color(sample.x, sample.y, sample.z, 1.0f);
break;
}
case eFilterGaussian:
{
int startX = Max(0, int(rasterX - filter.width));
int startY = Max(0, int(rasterY - filter.width));
int endX = Min(int(rasterX + filter.width), width-1);
int endY = Min(int(rasterY + filter.width), height-1);
Vec3 c = ClampLength(sample, options.clamp);
for (int x=startX; x <= endX; ++x)
{
for (int y=startY; y <= endY; ++y)
{
float w = filter.Eval(x-rasterX, y-rasterY);
//output[(height-1-y)*width+x] += Vec3(Min(sample.x, clamp), Min(sample.y, clamp), Min(sample.z, clamp), 1.0f)*w;
const int index = y*width+x;
atomicAdd(&output[index].x, c.x*w);
atomicAdd(&output[index].y, c.y*w);
atomicAdd(&output[index].z, c.z*w);
atomicAdd(&output[index].w, w);
}
}
break;
}
};
}
paths.mode[i] = ePathGenerate;
}
}
LAUNCH_BOUNDS
__global__ void SampleLights(GPUScene scene, PathState paths, int numPaths)
{
const int i = getGlobalIndex();
{
if (paths.mode[i] == ePathLightSample)
{
// calculate a basis for this hit point
const Primitive* hit = paths.primitive[i];
float etaI = paths.etaI[i];
float etaO = paths.etaO[i];
const Vec3 rayDir = paths.rayDir[i];
float rayTime = paths.rayTime[i];
const Vec3 p = paths.pos[i];
const Vec3 n = paths.normal[i];
// integrate direct light over hemisphere
paths.totalRadiance[i] += paths.pathThroughput[i]*SampleLights(scene, *hit, etaI, etaO, p, n, n, -rayDir, rayTime, paths.rand[i]);
paths.mode[i] = ePathBsdfSample;
}
}
}
LAUNCH_BOUNDS
__global__ void SampleBsdfs(PathState paths, int numPaths)
{
const int i = getGlobalIndex();
{
if (paths.mode[i] == ePathBsdfSample)
{
const Vec3 p = paths.pos[i];
const Vec3 n = paths.normal[i];
const Vec3 rayDir = paths.rayDir[i];
const Primitive* hit = paths.primitive[i];
Random& rand = paths.rand[i];
float etaI = paths.etaI[i];
float etaO = paths.etaO[i];
// integrate indirect light by sampling BRDF
Vec3 u, v;
BasisFromVector(n, &u, &v);
Vec3 bsdfDir;
BSDFType bsdfType;
float bsdfPdf;
BSDFSample(hit->material, etaI, etaO, p, u, v, n, -rayDir, bsdfDir, bsdfPdf, bsdfType, rand);
if (bsdfPdf <= 0.0f)
{
paths.mode[i] = ePathTerminate;
}
else
{
// reflectance
Vec3 f = BSDFEval(hit->material, etaI, etaO, p, n, -rayDir, bsdfDir);
// update ray medium if we are transmitting through the material
if (Dot(bsdfDir, n) <= 0.0f)
{
paths.etaI[i] = etaO;
paths.bsdfType[i] = eTransmitted;
if (etaI != 1.0f)
{
// entering a medium, update the aborption (assume zero in air)
paths.absorption[i] = hit->material.absorption;
}
}
else
{
paths.bsdfType[i] = eReflected;
}
// update throughput with primitive reflectance
paths.pathThroughput[i] *= f * Abs(Dot(n, bsdfDir))/bsdfPdf;
paths.bsdfPdf[i] = bsdfPdf;
paths.bsdfType[i] = bsdfType;
paths.rayDir[i] = bsdfDir;
paths.rayOrigin[i] = p + FaceForward(n, bsdfDir)*kRayEpsilon;
paths.mode[i] = ePathAdvance;
}
}
}
}
LAUNCH_BOUNDS
__global__ void SampleProbes(PathState paths, int numPaths)
{
}
LAUNCH_BOUNDS
__global__ void AdvancePaths(GPUScene scene, PathState paths, int numPaths)
{
const int i = getGlobalIndex();
{
if (paths.mode[i] == ePathAdvance)
{
Vec3 rayOrigin = paths.rayOrigin[i];
Vec3 rayDir = paths.rayDir[i];
float rayTime = paths.rayTime[i];
float etaI = paths.etaI[i];
Vec3 pathThroughput = paths.pathThroughput[i];
Vec3 n;
float t;
const Primitive* hit;
// find closest hit
if (Trace(scene, rayOrigin, rayDir, rayTime, t, n, &hit))
{
float etaO;
// index of refraction for transmission, 1.0 corresponds to air
if (etaI == 1.0f)
{
etaO = hit->material.GetIndexOfRefraction();
}
else
{
// returning to free space
etaO = 1.0f;
}
pathThroughput *= Exp(-paths.absorption[i]*t);
if (paths.depth[i] == 0)
{
// first trace is our only chance to add contribution from directly visible light sources
paths.totalRadiance[i] += hit->material.emission;
}
else if (kBsdfSamples > 0)
{
// area pdf that this dir was already included by the light sampling from previous step
float lightArea = PrimitiveArea(*hit);
if (lightArea > 0.0f)
{
// convert to pdf with respect to solid angle
float lightPdf = ((1.0f/lightArea)*t*t)/Clamp(Dot(-rayDir, n), 1.e-3f, 1.0f);
// calculate weight for bsdf sampling
int N = hit->lightSamples+kBsdfSamples;
float cbsdf = kBsdfSamples/N;
float clight = float(hit->lightSamples)/N;
float weight = cbsdf*paths.bsdfPdf[i]/(cbsdf*paths.bsdfPdf[i] + clight*lightPdf);
// specular paths have zero chance of being included by direct light sampling (zero pdf)
if (paths.bsdfType[i] == eSpecular)
weight = 1.0f;
// pathThroughput already includes the bsdf pdf
paths.totalRadiance[i] += weight*pathThroughput*hit->material.emission;
}
}
// terminate ray if we hit a light source
if (hit->lightSamples)
{
paths.mode[i] = ePathTerminate;
}
else
{
// update throughput based on absorption through the medium
paths.pos[i] = rayOrigin + rayDir*t;
paths.normal[i] = n;
paths.primitive[i] = hit;
paths.etaO[i] = etaO;
paths.pathThroughput[i] = pathThroughput;
paths.depth[i] += 1;
paths.mode[i] = ePathLightSample;
}
}
else
{
// todo: sky
// no hit, terminate path
paths.mode[i] = ePathTerminate;
}
}
}
}
LAUNCH_BOUNDS
__global__ void GeneratePaths(Camera camera, CameraSampler sampler, Tile tile, int seed, PathState paths, int numPaths)
{
const int tx = blockIdx.x*blockDim.x;
const int ty = blockIdx.y*blockDim.y;
const int x = tx + threadIdx.x + tile.x;
const int y = ty + threadIdx.y + tile.y;
const int i = getGlobalIndex();
{
if (paths.mode[i] == ePathGenerate || paths.mode[i] == ePathDisabled || paths.mode[i] == ePathTerminate)
{
// if we're inside the tile
if (threadIdx.x < tile.width && threadIdx.y < tile.height)
{
Random rand(i + tile.y*tile.width + tile.x + seed);
// offset
//float x, y, t;
//StratifiedSample2D(i, tile.width, tile.height, rand, x, y);
float t;
StratifiedSample1D(i, 64, rand, t);
// shutter time
float time = Lerp(camera.shutterStart, camera.shutterEnd, t);
//float px = tile.x + x*tile.width;
//float py = tile.y + y*tile.height;
float px = x + rand.Randf(-0.5f, 0.5f);
float py = y + rand.Randf(-0.5f, 0.5f);
Vec3 origin, dir;
sampler.GenerateRay(px, py, origin, dir);
// advance paths
paths.depth[i] = 0;
paths.rayOrigin[i] = origin;
paths.rayDir[i] = dir;
paths.rayTime[i] = time;
paths.mode[i] = ePathAdvance;
paths.rand[i] = rand;
paths.totalRadiance[i] = 0.0f;
paths.pathThroughput[i] = 1.0f;
paths.etaI[i] = 1.0f;
paths.bsdfType[i] = eReflected;
paths.bsdfPdf[i] = 1.0f;
paths.rasterX[i] = px;
paths.rasterY[i] = py;
}
else
{
paths.mode[i] = ePathDisabled;
}
}
}
}
//LAUNCH_BOUNDS
__global__ void VisualizeNormals(GPUScene scene, PathState paths, int numPaths)
{
const int i = getGlobalIndex();
if (i < numPaths)
{
Vec3 rayOrigin = paths.rayOrigin[i];
Vec3 rayDir = paths.rayDir[i];
Vec3 n;
float t;
// find closest hit
if (Trace(scene, rayOrigin, rayDir, 0.0f, t, n, NULL))
{
paths.totalRadiance[i] = n;
}
paths.mode[i] = ePathTerminate;
}
}
struct GpuWaveFrontRenderer : public Renderer
{
Color* output = NULL;
GPUScene sceneGPU;
Random rand;
int tileWidth;
int tileHeight;
PathState paths;
// map id to geometry struct
std::map<int, MeshGeometry> gpuMeshes;
GpuWaveFrontRenderer(const Scene* s)
{
// build GPU primitive and light lists
std::vector<Primitive> primitives;
std::vector<Primitive> lights;
for (int i=0; i < s->primitives.size(); ++i)
{
Primitive primitive = s->primitives[i];
// if mesh primitive then copy to the GPU
if (primitive.type == eMesh)
{
// see if we have already uploaded the mesh to the GPU
if (gpuMeshes.find(primitive.mesh.id) == gpuMeshes.end())
{
MeshGeometry geo = CreateGPUMesh(primitive.mesh);
gpuMeshes[geo.id] = geo;
// replace CPU mesh with GPU copy
primitive.mesh = geo;
}
}
if (primitive.material.bump > 0.0f)
{
primitive.material.bumpMap = CreateGPUTexture(primitive.material.bumpMap);
}
// create explicit list of light primitives
if (primitive.lightSamples)
{
lights.push_back(primitive);
}
primitives.push_back(primitive);
}
// convert scene BVH
CreateVec4Texture((Vec4**)&(sceneGPU.bvh.nodes), (Vec4*)s->bvh.nodes, sizeof(BVHNode)*s->bvh.numNodes);
sceneGPU.bvh.numNodes = s->bvh.numNodes;
// upload to the GPU
sceneGPU.numPrimitives = primitives.size();
sceneGPU.numLights = lights.size();
if (sceneGPU.numLights > 0)
{
hipMalloc(&sceneGPU.lights, sizeof(Primitive)*lights.size());
hipMemcpy(sceneGPU.lights, &lights[0], sizeof(Primitive)*lights.size(), hipMemcpyHostToDevice);
}
if (sceneGPU.numPrimitives > 0)
{
hipMalloc(&sceneGPU.primitives, sizeof(Primitive)*primitives.size());
hipMemcpy(sceneGPU.primitives, &primitives[0], sizeof(Primitive)*primitives.size(), hipMemcpyHostToDevice);
}
// copy sky and probe texture
sceneGPU.sky = CreateGPUSky(s->sky);
tileWidth = 1024;
tileHeight = 1024;
const int numPaths = tileWidth*tileHeight;
// allocate paths
//hipMalloc(&paths, sizeof(PathState)*numPaths);
//hipMemset(paths, 0, sizeof(PathState)*numPaths);
paths = AllocatePaths(numPaths);
}
virtual ~GpuWaveFrontRenderer()
{
hipFree(output);
hipFree(sceneGPU.primitives);
hipFree(sceneGPU.lights);
FreePaths(paths);
}
void Init(int width, int height)
{
hipFree(output);
hipMalloc(&output, sizeof(Color)*width*height);
hipMemset(output, 0, sizeof(Color)*width*height);
}
void Render(const Camera& camera, const Options& options, Color* outputHost)
{
std::vector<Tile> tiles;
const int tilesx = (options.width + tileWidth - 1)/tileWidth;
const int tilesy = (options.height + tileHeight - 1)/tileHeight;
for (int y=0; y < tilesy; ++y)
{
for (int x=0; x < tilesx; ++x)
{
Tile tile;
tile.x = x*tileWidth;
tile.y = y*tileHeight;
tile.width = Min(tileWidth, options.width-tile.x);
tile.height = Min(tileHeight, options.height-tile.y);
tiles.push_back(tile);
}
}
const int numPaths = tileWidth*tileHeight;
// create a sampler for the camera
CameraSampler sampler(
Transform(camera.position, camera.rotation),
camera.fov,
0.001f,
1.0f,
options.width,
options.height);
for (int tileIndex=0; tileIndex < tiles.size(); ++tileIndex)
{
Tile tile = tiles[tileIndex];
// a tile consists of many thread blocks
const int blockWidth = 16;
const int blockHeight = 16;
const int gridWidth = (tile.width + blockWidth - 1)/blockWidth;
const int gridHeight = (tile.height + blockHeight - 1)/blockHeight;
dim3 blockDim(blockWidth, blockHeight);
dim3 gridDim(gridWidth, gridHeight);
/*
const int kNumThreadsPerBlock = 256;
const int kNumBlocks = (numPaths + kNumThreadsPerBlock - 1)/kNumThreadsPerBlock;
dim3 gridDim(kNumBlocks);
dim3 blockDim(kNumThreadsPerBlock);
*/
hipLaunchKernelGGL(( GeneratePaths), dim3(gridDim), dim3(blockDim), 0, 0, camera, sampler, tile, rand.Rand(), paths, numPaths);
if (options.mode == eNormals)
{
hipLaunchKernelGGL(( VisualizeNormals), dim3(gridDim), dim3(blockDim), 0, 0, sceneGPU, paths, numPaths);
}
else
{
for (int i=0; i < options.maxDepth; ++i)
{
hipLaunchKernelGGL(( AdvancePaths), dim3(gridDim), dim3(blockDim), 0, 0, sceneGPU, paths, numPaths);
hipLaunchKernelGGL(( SampleLights), dim3(gridDim), dim3(blockDim), 0, 0, sceneGPU, paths, numPaths);
//SampleProbes();
hipLaunchKernelGGL(( SampleBsdfs), dim3(gridDim), dim3(blockDim), 0, 0, paths, numPaths);
}
}
hipLaunchKernelGGL(( TerminatePaths), dim3(gridDim), dim3(blockDim), 0, 0, output, options, paths, numPaths);
}
// copy back to output
hipMemcpy(outputHost, output, sizeof(Color)*options.width*options.height, hipMemcpyDeviceToHost);
}
};
Renderer* CreateGpuWavefrontRenderer(const Scene* s)
{
return new GpuWaveFrontRenderer(s);
}
| 865ac5b681addcb527a87a3dcd34102afd386851.cu | #include "maths.h"
#include "render.h"
#include "util.h"
#include "disney.h"
#include "bvh.h"
#include <map>
struct GPUScene
{
Primitive* primitives;
int numPrimitives;
Primitive* lights;
int numLights;
Sky sky;
BVH bvh;
};
#define kBsdfSamples 1.0f
#define kProbeSamples 1.0f
#define kRayEpsilon 0.0001f
#define LAUNCH_BOUNDS __launch_bounds__(256, 4)
__device__ inline int getGlobalIndex()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
// create a texture object from memory and store it in a 64-bit pointer
void CreateIntTexture(int** deviceBuffer, const int* hostBuffer, int sizeInBytes)
{
int* buffer;
cudaMalloc(&buffer, sizeInBytes);
cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice);
#if USE_TEXTURES
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = (void*)buffer;
resDesc.res.linear.desc.f = cudaChannelFormatKindSigned;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = sizeInBytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t tex;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// cast to pointer
*deviceBuffer = (int*)tex;
#else
*deviceBuffer = buffer;
#endif
}
// create a texture object from memory and store it in a 64-bit pointer
void CreateFloatTexture(float** deviceBuffer, const float* hostBuffer, int sizeInBytes)
{
float* buffer;
cudaMalloc(&buffer, sizeInBytes);
cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice);
#if USE_TEXTURES
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = (void*)buffer;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = sizeInBytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t tex;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// cast to pointer
*deviceBuffer = (float*)tex;
#else
*deviceBuffer = buffer;
#endif
}
// create a texture object from memory and store it in a 64-bit pointer
void CreateVec4Texture(Vec4** deviceBuffer, const Vec4* hostBuffer, int sizeInBytes)
{
Vec4* buffer;
cudaMalloc(&buffer, sizeInBytes);
cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice);
#if USE_TEXTURES
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = (void*)buffer;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.desc.y = 32; // bits per channel
resDesc.res.linear.desc.z = 32; // bits per channel
resDesc.res.linear.desc.w = 32; // bits per channel
resDesc.res.linear.sizeInBytes = sizeInBytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t tex;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// cast to pointer
*deviceBuffer = (Vec4*)tex;
#else
*deviceBuffer = buffer;
#endif
}
MeshGeometry CreateGPUMesh(const MeshGeometry& hostMesh)
{
const int numVertices = hostMesh.numVertices;
const int numIndices = hostMesh.numIndices;
const int numNodes = hostMesh.numNodes;
MeshGeometry gpuMesh;
#if USE_TEXTURES
// expand positions out to vec4
std::vector<Vec4> positions;
std::vector<Vec4> normals;
for (int i=0; i < numVertices; ++i)
{
positions.push_back(Vec4(hostMesh.positions[i], 1.0f));
normals.push_back(Vec4(hostMesh.normals[i], 0.0f));
}
CreateVec4Texture((Vec4**)&gpuMesh.positions, (Vec4*)&positions[0], sizeof(Vec4)*numVertices);
CreateVec4Texture((Vec4**)&gpuMesh.normals, (Vec4*)&normals[0], sizeof(Vec4)*numVertices);
#else
CreateFloatTexture((float**)&gpuMesh.positions, (float*)&hostMesh.positions[0], sizeof(Vec3)*numVertices);
CreateFloatTexture((float**)&gpuMesh.normals, (float*)&hostMesh.normals[0], sizeof(Vec3)*numVertices);
#endif
CreateIntTexture((int**)&gpuMesh.indices, (int*)&hostMesh.indices[0], sizeof(int)*numIndices);
/*
cudaMalloc((Vec3**)&gpuMesh.positions, sizeof(Vec3)*numVertices);
cudaMemcpy((Vec3*)gpuMesh.positions, &hostMesh.positions[0], sizeof(Vec3)*numVertices, cudaMemcpyHostToDevice);
cudaMalloc((Vec3**)&gpuMesh.normals, sizeof(Vec3)*numVertices);
cudaMemcpy((Vec3*)gpuMesh.normals, &hostMesh.normals[0], sizeof(Vec3)*numVertices, cudaMemcpyHostToDevice);
cudaMalloc((int**)&gpuMesh.indices, sizeof(int)*numIndices);
cudaMemcpy((int*)gpuMesh.indices, &hostMesh.indices[0], sizeof(int)*numIndices, cudaMemcpyHostToDevice);
*/
//cudaMalloc((BVHNode**)&gpuMesh.nodes, sizeof(BVHNode)*numNodes);
//cudaMemcpy((BVHNode*)gpuMesh.nodes, &hostMesh.nodes[0], sizeof(BVHNode)*numNodes, cudaMemcpyHostToDevice);
CreateVec4Texture((Vec4**)&gpuMesh.nodes, (Vec4*)&hostMesh.nodes[0], sizeof(BVHNode)*numNodes);
cudaMalloc((float**)&gpuMesh.cdf, sizeof(float)*numIndices/3);
cudaMemcpy((float*)gpuMesh.cdf, &hostMesh.cdf[0], sizeof(float)*numIndices/3, cudaMemcpyHostToDevice);
gpuMesh.numIndices = numIndices;
gpuMesh.numVertices = numVertices;
gpuMesh.numNodes = numNodes;
gpuMesh.area = hostMesh.area;
return gpuMesh;
}
void DestroyGPUMesh(const MeshGeometry& m)
{
}
Texture CreateGPUTexture(const Texture& tex)
{
const int numTexels = tex.width*tex.height*tex.depth;
Texture gpuTex = tex;
cudaMalloc((void**)&gpuTex.data, sizeof(float)*numTexels);
cudaMemcpy(gpuTex.data, tex.data, sizeof(float)*numTexels, cudaMemcpyHostToDevice);
return gpuTex;
}
Sky CreateGPUSky(const Sky& sky)
{
Sky gpuSky = sky;
// copy probe
if (sky.probe.valid)
{
const int numPixels = sky.probe.width*sky.probe.height;
// copy pixel data
CreateVec4Texture((Vec4**)&gpuSky.probe.data, sky.probe.data, numPixels*sizeof(float)*4);
// copy cdf tables
CreateFloatTexture((float**)&gpuSky.probe.cdfValuesX, sky.probe.cdfValuesX, numPixels*sizeof(float));
CreateFloatTexture((float**)&gpuSky.probe.pdfValuesX, sky.probe.pdfValuesX, numPixels*sizeof(float));
CreateFloatTexture((float**)&gpuSky.probe.cdfValuesY, sky.probe.cdfValuesY, sky.probe.height*sizeof(float));
CreateFloatTexture((float**)&gpuSky.probe.pdfValuesY, sky.probe.pdfValuesY, sky.probe.height*sizeof(float));
}
return gpuSky;
}
void DestroyGPUSky(const Sky& gpuSky)
{
if (gpuSky.probe.valid)
{
// todo
}
}
#if 1
inline __device__ bool Trace(const GPUScene& scene, const Vec3& rayOrigin, const Vec3& rayDir, float rayTime, float& outT, Vec3& outNormal, const Primitive** RESTRICT outPrimitive)
{
int stack[64];
stack[0] = 0;
unsigned int count = 1;
Vec3 dir, rcpDir;
Vec3 origin;
rcpDir.x = 1.0f/rayDir.x;
rcpDir.y = 1.0f/rayDir.y;
rcpDir.z = 1.0f/rayDir.z;
origin = rayOrigin;
dir = rayDir;
const BVHNode* RESTRICT root = scene.bvh.nodes;
MeshGeometry mesh;
int primitiveIndex = -1;
float closestT = FLT_MAX;
//float closestU;
float closestV;
float closestW;
Vec3 closestNormal;
int closestPrimitive = -1;
int closestTri;
while(count)
{
const int nodeIndex = stack[--count];
if (nodeIndex < 0)
{
// reset to scene bvh dir and address
rcpDir.x = 1.0f/rayDir.x;
rcpDir.y = 1.0f/rayDir.y;
rcpDir.z = 1.0f/rayDir.z;
origin = rayOrigin;
dir = rayDir;
root = scene.bvh.nodes;
primitiveIndex = -1;
continue;
}
BVHNode node = fetchNode(root, nodeIndex);
int leftIndex = node.leftIndex;
int rightIndex = node.rightIndex;
if (node.leaf)
{
if (primitiveIndex < 0)
{
const Primitive& p = scene.primitives[leftIndex];
Transform transform = InterpolateTransform(p.startTransform, p.endTransform, rayTime);
switch (p.type)
{
case eSphere:
{
float minT, maxT;
Vec3 n;
bool hit = IntersectRaySphere(transform.p, p.sphere.radius*transform.s, origin, dir, minT, maxT, &n);
if (hit && minT < closestT)
{
closestT = minT;
closestNormal = n;
closestPrimitive = leftIndex;
}
break;
}
case ePlane:
{
float t;
bool hit = IntersectRayPlane(origin, dir, (const Vec4&)p.plane, t);
if (hit && t < closestT)
{
closestT = t;
closestNormal = (const Vec3&)p.plane;
closestPrimitive = leftIndex;
}
break;
}
case eMesh:
{
// push a back-tracking marker in the stack
stack[count++] = -1;
// push root of the mesh bvh
stack[count++] = 0;
// transform ray to primitive local space
origin = InverseTransformPoint(transform, rayOrigin);
dir = InverseTransformVector(transform, rayDir);
rcpDir.x = 1.0f/dir.x;
rcpDir.y = 1.0f/dir.y;
rcpDir.z = 1.0f/dir.z;
// set bvh and mesh sources
root = p.mesh.nodes;
mesh = p.mesh;
primitiveIndex = leftIndex;
break;
}
};
}
else
{
// mesh mode
int i0 = fetchInt(mesh.indices, leftIndex*3+0);
int i1 = fetchInt(mesh.indices, leftIndex*3+1);
int i2 = fetchInt(mesh.indices, leftIndex*3+2);
const Vec3 a = fetchVec3(mesh.positions, i0);
const Vec3 b = fetchVec3(mesh.positions, i1);
const Vec3 c = fetchVec3(mesh.positions, i2);
float t, u, v, w;
float sign;
Vec3 n;
//if (IntersectRayTri(rayOrigin, rayDir, a, b, c, t, u, v, w, &n))
if (IntersectRayTriTwoSided(origin, dir, a, b, c, t, u, v, w, sign, &n))
{
if (t > 0.0f && t < closestT)
{
closestT = t;
//closestU = u;
closestV = v;
closestW = w;
closestTri = leftIndex;
closestNormal = n*sign;
closestPrimitive = primitiveIndex;
}
}
}
}
else
{
// check children
BVHNode left = fetchNode(root, leftIndex);
BVHNode right = fetchNode(root, rightIndex);
float tLeft;
bool hitLeft = IntersectRayAABBFast(origin, rcpDir, left.bounds.lower, left.bounds.upper, tLeft);// && tLeft < closestT;
float tRight;
bool hitRight = IntersectRayAABBFast(origin, rcpDir, right.bounds.lower, right.bounds.upper, tRight);// && tRight < closestT;
// traverse closest first
if (hitLeft && hitRight && (tLeft < tRight))
{
//Swap(leftIndex, rightIndex);
}
if (hitLeft)
stack[count++] = leftIndex;
if (hitRight)
stack[count++] = rightIndex;
}
}
if (closestPrimitive >= 0)
{
const Primitive& p = scene.primitives[closestPrimitive];
if (p.type == eMesh)
{
Transform transform = InterpolateTransform(p.startTransform, p.endTransform, rayTime);
// interpolate vertex normals
int i0 = fetchInt(p.mesh.indices, closestTri*3+0);
int i1 = fetchInt(p.mesh.indices, closestTri*3+1);
int i2 = fetchInt(p.mesh.indices, closestTri*3+2);
const Vec3 n1 = fetchVec3(p.mesh.normals, i0);
const Vec3 n2 = fetchVec3(p.mesh.normals, i1);
const Vec3 n3 = fetchVec3(p.mesh.normals, i2);
Vec3 smoothNormal = (1.0f-closestV-closestW)*n1 + closestV*n2 + closestW*n3;
// ensure smooth normal lies on the same side of the geometric normal
if (Dot(smoothNormal, closestNormal) < 0.0f)
smoothNormal *= -1.0f;
closestNormal = SafeNormalize(TransformVector(transform, smoothNormal), closestNormal);
}
outT = closestT;
outNormal = FaceForward(closestNormal, -rayDir);
if (outPrimitive)
*outPrimitive = &p;
return true;
}
else
{
// no hit
return false;
}
}
#else
// trace a ray against the scene returning the closest intersection
inline __device__ bool Trace(const GPUScene& scene, const Vec3& rayOrigin, const Vec3& rayDir, float rayTime, float& outT, Vec3& outNormal, const Primitive** outPrimitive)
{
#if 0
struct Callback
{
float minT;
Vec3 closestNormal;
const Primitive* closestPrimitive;
const Ray& ray;
const GPUScene& scene;
CUDA_CALLABLE inline Callback(const GPUScene& s, const Ray& r) : minT(REAL_MAX), closestPrimitive(NULL), ray(r), scene(s)
{
}
CUDA_CALLABLE inline void operator()(int index)
{
float t;
Vec3 n, ns;
const Primitive& primitive = scene.primitives[index];
if (PrimitiveIntersect(primitive, ray, t, &n))
{
if (t < minT && t > 0.0f)
{
minT = t;
closestPrimitive = &primitive;
closestNormal = n;
}
}
}
};
Callback callback(scene, ray);
QueryBVH(callback, scene.bvh.nodes, ray.origin, ray.dir);
outT = callback.minT;
outNormal = FaceForward(callback.closestNormal, -ray.dir);
if (outPrimitive)
*outPrimitive = callback.closestPrimitive;
return callback.closestPrimitive != NULL;
#else
float minT = REAL_MAX;
const Primitive* closestPrimitive = NULL;
Vec3 closestNormal(0.0f);
for (int i=0; i < scene.numPrimitives; ++i)
{
const Primitive& primitive = scene.primitives[i];
float t;
Vec3 n;
if (PrimitiveIntersect(primitive, Ray(rayOrigin, rayDir, rayTime), t, &n))
{
if (t < minT && t > 0.0f)
{
minT = t;
closestPrimitive = &primitive;
closestNormal = n;
}
}
}
outT = minT;
outNormal = FaceForward(closestNormal, -rayDir);
if (outPrimitive)
*outPrimitive = closestPrimitive;
return closestPrimitive != NULL;
#endif
}
#endif
__device__ inline float SampleTexture(const Texture& map, int i, int j, int k)
{
int x = int(Abs(i))%map.width;
int y = int(Abs(j))%map.height;
int z = int(Abs(k))%map.depth;
return map.data[z*map.width*map.height + y*map.width + x];
}
__device__ inline float LinearInterp(const Texture& map, const Vec3& pos)
{
int i = floorf(pos.x*map.width);
int j = floorf(pos.y*map.height);
int k = floorf(pos.z*map.depth);
// trilinear interpolation
float tx = pos.x*map.width-i;
float ty = pos.y*map.height-j;
float tz = pos.z*map.depth-k;
float a = Lerp(SampleTexture(map, i, j, k), SampleTexture(map, i, j, k+1), tz);
float b = Lerp(SampleTexture(map, i+1, j, k), SampleTexture(map, i+1, j, k+1), tz);
float c = Lerp(SampleTexture(map, i, j+1, k), SampleTexture(map, i, j+1, k+1), tz);
float d = Lerp(SampleTexture(map, i+1, j+1, k), SampleTexture(map, i+1, j+1, k+1), tz);
float e = Lerp(a, b, tx);
float f = Lerp(c, d, tx);
float g = Lerp(e, f, ty);
return g;
}
__device__ inline Vec3 EvaluateBumpNormal(const Vec3& surfaceNormal, const Vec3& surfacePos, const Texture& bumpMap, const Vec3& bumpTile, float bumpStrength, Random& rand)
{
Vec3 u, v;
BasisFromVector(surfaceNormal, &u, &v);
float eps = 0.01f;
Vec3 dpdu = u + bumpStrength*surfaceNormal*(LinearInterp(bumpMap, bumpTile*(surfacePos)+u*eps) - LinearInterp(bumpMap, bumpTile*surfacePos))/eps;
Vec3 dpdv = v + bumpStrength*surfaceNormal*(LinearInterp(bumpMap, bumpTile*(surfacePos)+v*eps) - LinearInterp(bumpMap, bumpTile*surfacePos))/eps;
return SafeNormalize(Cross(dpdu, dpdv), surfaceNormal);
}
__device__ inline Vec3 SampleLights(const GPUScene& scene, const Primitive& surfacePrimitive, float etaI, float etaO, const Vec3& surfacePos, const Vec3& surfaceNormal, const Vec3& shadingNormal, const Vec3& wo, float time, Random& rand)
{
Vec3 sum(0.0f);
if (scene.sky.probe.valid)
{
for (int i=0; i < kProbeSamples; ++i)
{
Vec3 skyColor;
float skyPdf;
Vec3 wi;
ProbeSample(scene.sky.probe, wi, skyColor, skyPdf, rand);
//wi = UniformSampleSphere(rand);
//skyColor = ProbeEval(scene.sky.probe, ProbeDirToUV(wi));
//skyPdf = 0.5f*kInv2Pi;
//if (Dot(wi, surfaceNormal) <= 0.0f)
// continue;
// check if occluded
float t;
Vec3 n;
if (Trace(scene, surfacePos + FaceForward(surfaceNormal, wi)*kRayEpsilon, wi, time, t, n, NULL) == false)
{
float bsdfPdf = BSDFPdf(surfacePrimitive.material, etaI, etaO, surfacePos, surfaceNormal, wo, wi);
Vec3 f = BSDFEval(surfacePrimitive.material, etaI, etaO, surfacePos, surfaceNormal, wo, wi);
if (bsdfPdf > 0.0f)
{
int N = kProbeSamples+kBsdfSamples;
float cbsdf = kBsdfSamples/N;
float csky = float(kProbeSamples)/N;
float weight = csky*skyPdf/(cbsdf*bsdfPdf + csky*skyPdf);
Validate(weight);
if (weight > 0.0f)
sum += weight*skyColor*f*Abs(Dot(wi, surfaceNormal))/skyPdf;
}
}
}
if (kProbeSamples > 0)
sum /= float(kProbeSamples);
}
for (int i=0; i < scene.numLights; ++i)
{
// assume all lights are area lights for now
const Primitive& lightPrimitive = scene.lights[i];
Vec3 L(0.0f);
int numSamples = lightPrimitive.lightSamples;
if (numSamples == 0)
continue;
for (int s=0; s < numSamples; ++s)
{
// sample light source
Vec3 lightPos;
Vec3 lightNormal;
PrimitiveSample(lightPrimitive, time, lightPos, lightNormal, rand);
Vec3 wi = lightPos-surfacePos;
float dSq = LengthSq(wi);
wi /= sqrtf(dSq);
// light is behind surface
//if (Dot(wi, surfaceNormal) <= 0.0f)
//continue;
// surface is behind light
if (Dot(wi, lightNormal) >= 0.0f)
continue;
// check visibility
float t;
Vec3 n;
if (Trace(scene, surfacePos + FaceForward(surfaceNormal, wi)*kRayEpsilon, wi, time, t, n, NULL))
{
float tSq = t*t;
// if our next hit was further than distance to light then accept
// sample, this works for portal sampling where you have a large light
// that you sample through a small window
const float kTolerance = 1.e-2f;
if (fabsf(t - sqrtf(dSq)) <= kTolerance)
{
const float nl = Abs(Dot(lightNormal, wi));
// light pdf with respect to area and convert to pdf with respect to solid angle
float lightArea = PrimitiveArea(lightPrimitive);
float lightPdf = ((1.0f/lightArea)*tSq)/nl;
// bsdf pdf for light's direction
float bsdfPdf = BSDFPdf(surfacePrimitive.material, etaI, etaO, surfacePos, shadingNormal, wo, wi);
Vec3 f = BSDFEval(surfacePrimitive.material, etaI, etaO, surfacePos, shadingNormal, wo, wi);
// this branch is only necessary to exclude specular paths from light sampling (always have zero brdf)
// todo: make BSDFEval alwasy return zero for pure specular paths and roll specular eval into BSDFSample()
if (bsdfPdf > 0.0f)
{
// calculate relative weighting of the light and bsdf sampling
int N = lightPrimitive.lightSamples+kBsdfSamples;
float cbsdf = kBsdfSamples/N;
float clight = float(lightPrimitive.lightSamples)/N;
float weight = clight*lightPdf/(cbsdf*bsdfPdf + clight*lightPdf);
L += weight*f*lightPrimitive.material.emission*(Abs(Dot(wi, shadingNormal))/Max(1.e-3f, lightPdf));
}
}
}
}
sum += L * (1.0f/numSamples);
}
return sum;
}
struct Tile
{
int x;
int y;
int width;
int height;
};
enum PathMode
{
ePathGenerate,
ePathAdvance,
ePathProbeSample,
ePathLightSample,
ePathBsdfSample,
ePathTerminate,
ePathDisabled,
};
struct PathState
{
Vec3* __restrict__ rayOrigin;
Vec3* __restrict__ rayDir;
float* __restrict__ rayTime;
Vec3* __restrict__ pos;
Vec3* __restrict__ normal;
int* __restrict__ depth;
Vec3* __restrict__ pathThroughput;
Vec3* __restrict__ absorption;
const Primitive** __restrict__ primitive;
Vec3* __restrict__ totalRadiance;
float* __restrict__ etaI;
float* __restrict__ etaO;
PathMode* __restrict__ mode;
// pdf from last brdf sampling
float* __restrict__ bsdfPdf;
BSDFType* __restrict__ bsdfType;
// sample coordinate
float* __restrict__ rasterX;
float* __restrict__ rasterY;
Random* __restrict__ rand;
};
template <typename T>
void Alloc(T** ptr, int num)
{
cudaMalloc(ptr, sizeof(T)*num);
cudaMemset(*ptr, 0, sizeof(T)*num);
}
PathState AllocatePaths(int num)
{
PathState state;
Alloc(&state.rayOrigin, num);
Alloc(&state.rayDir, num);
Alloc(&state.rayTime, num);
Alloc(&state.pos, num);
Alloc(&state.normal, num);
Alloc(&state.depth, num);
Alloc(&state.pathThroughput, num);
Alloc(&state.absorption, num);
Alloc(&state.primitive, num);
Alloc(&state.totalRadiance, num);
Alloc(&state.etaI, num);
Alloc(&state.etaO, num);
Alloc(&state.mode, num);
Alloc(&state.bsdfPdf, num);
Alloc(&state.bsdfType, num);
Alloc(&state.rasterX, num);
Alloc(&state.rasterY, num);
Alloc(&state.rand, num);
return state;
}
void FreePaths(PathState state)
{
// todo:
}
LAUNCH_BOUNDS
__global__ void TerminatePaths(Color* output, Options options, PathState paths, int numPaths)
{
const int i = getGlobalIndex();
{
if (paths.mode[i] != ePathDisabled)
{
float rasterX = paths.rasterX[i];
float rasterY = paths.rasterY[i];
Vec3 sample = paths.totalRadiance[i];
// sample = paths[i].normal*0.5f + 0.5f;
int width = options.width;
int height = options.height;
Filter filter = options.filter;
switch (filter.type)
{
case eFilterBox:
{
int x = Clamp(int(rasterX), 0, width-1);
int y = Clamp(int(rasterY), 0, height-1);
output[y*width+x] += Color(sample.x, sample.y, sample.z, 1.0f);
break;
}
case eFilterGaussian:
{
int startX = Max(0, int(rasterX - filter.width));
int startY = Max(0, int(rasterY - filter.width));
int endX = Min(int(rasterX + filter.width), width-1);
int endY = Min(int(rasterY + filter.width), height-1);
Vec3 c = ClampLength(sample, options.clamp);
for (int x=startX; x <= endX; ++x)
{
for (int y=startY; y <= endY; ++y)
{
float w = filter.Eval(x-rasterX, y-rasterY);
//output[(height-1-y)*width+x] += Vec3(Min(sample.x, clamp), Min(sample.y, clamp), Min(sample.z, clamp), 1.0f)*w;
const int index = y*width+x;
atomicAdd(&output[index].x, c.x*w);
atomicAdd(&output[index].y, c.y*w);
atomicAdd(&output[index].z, c.z*w);
atomicAdd(&output[index].w, w);
}
}
break;
}
};
}
paths.mode[i] = ePathGenerate;
}
}
LAUNCH_BOUNDS
__global__ void SampleLights(GPUScene scene, PathState paths, int numPaths)
{
const int i = getGlobalIndex();
{
if (paths.mode[i] == ePathLightSample)
{
// calculate a basis for this hit point
const Primitive* hit = paths.primitive[i];
float etaI = paths.etaI[i];
float etaO = paths.etaO[i];
const Vec3 rayDir = paths.rayDir[i];
float rayTime = paths.rayTime[i];
const Vec3 p = paths.pos[i];
const Vec3 n = paths.normal[i];
// integrate direct light over hemisphere
paths.totalRadiance[i] += paths.pathThroughput[i]*SampleLights(scene, *hit, etaI, etaO, p, n, n, -rayDir, rayTime, paths.rand[i]);
paths.mode[i] = ePathBsdfSample;
}
}
}
LAUNCH_BOUNDS
__global__ void SampleBsdfs(PathState paths, int numPaths)
{
const int i = getGlobalIndex();
{
if (paths.mode[i] == ePathBsdfSample)
{
const Vec3 p = paths.pos[i];
const Vec3 n = paths.normal[i];
const Vec3 rayDir = paths.rayDir[i];
const Primitive* hit = paths.primitive[i];
Random& rand = paths.rand[i];
float etaI = paths.etaI[i];
float etaO = paths.etaO[i];
// integrate indirect light by sampling BRDF
Vec3 u, v;
BasisFromVector(n, &u, &v);
Vec3 bsdfDir;
BSDFType bsdfType;
float bsdfPdf;
BSDFSample(hit->material, etaI, etaO, p, u, v, n, -rayDir, bsdfDir, bsdfPdf, bsdfType, rand);
if (bsdfPdf <= 0.0f)
{
paths.mode[i] = ePathTerminate;
}
else
{
// reflectance
Vec3 f = BSDFEval(hit->material, etaI, etaO, p, n, -rayDir, bsdfDir);
// update ray medium if we are transmitting through the material
if (Dot(bsdfDir, n) <= 0.0f)
{
paths.etaI[i] = etaO;
paths.bsdfType[i] = eTransmitted;
if (etaI != 1.0f)
{
// entering a medium, update the aborption (assume zero in air)
paths.absorption[i] = hit->material.absorption;
}
}
else
{
paths.bsdfType[i] = eReflected;
}
// update throughput with primitive reflectance
paths.pathThroughput[i] *= f * Abs(Dot(n, bsdfDir))/bsdfPdf;
paths.bsdfPdf[i] = bsdfPdf;
paths.bsdfType[i] = bsdfType;
paths.rayDir[i] = bsdfDir;
paths.rayOrigin[i] = p + FaceForward(n, bsdfDir)*kRayEpsilon;
paths.mode[i] = ePathAdvance;
}
}
}
}
LAUNCH_BOUNDS
__global__ void SampleProbes(PathState paths, int numPaths)
{
}
LAUNCH_BOUNDS
__global__ void AdvancePaths(GPUScene scene, PathState paths, int numPaths)
{
const int i = getGlobalIndex();
{
if (paths.mode[i] == ePathAdvance)
{
Vec3 rayOrigin = paths.rayOrigin[i];
Vec3 rayDir = paths.rayDir[i];
float rayTime = paths.rayTime[i];
float etaI = paths.etaI[i];
Vec3 pathThroughput = paths.pathThroughput[i];
Vec3 n;
float t;
const Primitive* hit;
// find closest hit
if (Trace(scene, rayOrigin, rayDir, rayTime, t, n, &hit))
{
float etaO;
// index of refraction for transmission, 1.0 corresponds to air
if (etaI == 1.0f)
{
etaO = hit->material.GetIndexOfRefraction();
}
else
{
// returning to free space
etaO = 1.0f;
}
pathThroughput *= Exp(-paths.absorption[i]*t);
if (paths.depth[i] == 0)
{
// first trace is our only chance to add contribution from directly visible light sources
paths.totalRadiance[i] += hit->material.emission;
}
else if (kBsdfSamples > 0)
{
// area pdf that this dir was already included by the light sampling from previous step
float lightArea = PrimitiveArea(*hit);
if (lightArea > 0.0f)
{
// convert to pdf with respect to solid angle
float lightPdf = ((1.0f/lightArea)*t*t)/Clamp(Dot(-rayDir, n), 1.e-3f, 1.0f);
// calculate weight for bsdf sampling
int N = hit->lightSamples+kBsdfSamples;
float cbsdf = kBsdfSamples/N;
float clight = float(hit->lightSamples)/N;
float weight = cbsdf*paths.bsdfPdf[i]/(cbsdf*paths.bsdfPdf[i] + clight*lightPdf);
// specular paths have zero chance of being included by direct light sampling (zero pdf)
if (paths.bsdfType[i] == eSpecular)
weight = 1.0f;
// pathThroughput already includes the bsdf pdf
paths.totalRadiance[i] += weight*pathThroughput*hit->material.emission;
}
}
// terminate ray if we hit a light source
if (hit->lightSamples)
{
paths.mode[i] = ePathTerminate;
}
else
{
// update throughput based on absorption through the medium
paths.pos[i] = rayOrigin + rayDir*t;
paths.normal[i] = n;
paths.primitive[i] = hit;
paths.etaO[i] = etaO;
paths.pathThroughput[i] = pathThroughput;
paths.depth[i] += 1;
paths.mode[i] = ePathLightSample;
}
}
else
{
// todo: sky
// no hit, terminate path
paths.mode[i] = ePathTerminate;
}
}
}
}
LAUNCH_BOUNDS
__global__ void GeneratePaths(Camera camera, CameraSampler sampler, Tile tile, int seed, PathState paths, int numPaths)
{
const int tx = blockIdx.x*blockDim.x;
const int ty = blockIdx.y*blockDim.y;
const int x = tx + threadIdx.x + tile.x;
const int y = ty + threadIdx.y + tile.y;
const int i = getGlobalIndex();
{
if (paths.mode[i] == ePathGenerate || paths.mode[i] == ePathDisabled || paths.mode[i] == ePathTerminate)
{
// if we're inside the tile
if (threadIdx.x < tile.width && threadIdx.y < tile.height)
{
Random rand(i + tile.y*tile.width + tile.x + seed);
// offset
//float x, y, t;
//StratifiedSample2D(i, tile.width, tile.height, rand, x, y);
float t;
StratifiedSample1D(i, 64, rand, t);
// shutter time
float time = Lerp(camera.shutterStart, camera.shutterEnd, t);
//float px = tile.x + x*tile.width;
//float py = tile.y + y*tile.height;
float px = x + rand.Randf(-0.5f, 0.5f);
float py = y + rand.Randf(-0.5f, 0.5f);
Vec3 origin, dir;
sampler.GenerateRay(px, py, origin, dir);
// advance paths
paths.depth[i] = 0;
paths.rayOrigin[i] = origin;
paths.rayDir[i] = dir;
paths.rayTime[i] = time;
paths.mode[i] = ePathAdvance;
paths.rand[i] = rand;
paths.totalRadiance[i] = 0.0f;
paths.pathThroughput[i] = 1.0f;
paths.etaI[i] = 1.0f;
paths.bsdfType[i] = eReflected;
paths.bsdfPdf[i] = 1.0f;
paths.rasterX[i] = px;
paths.rasterY[i] = py;
}
else
{
paths.mode[i] = ePathDisabled;
}
}
}
}
//LAUNCH_BOUNDS
__global__ void VisualizeNormals(GPUScene scene, PathState paths, int numPaths)
{
const int i = getGlobalIndex();
if (i < numPaths)
{
Vec3 rayOrigin = paths.rayOrigin[i];
Vec3 rayDir = paths.rayDir[i];
Vec3 n;
float t;
// find closest hit
if (Trace(scene, rayOrigin, rayDir, 0.0f, t, n, NULL))
{
paths.totalRadiance[i] = n;
}
paths.mode[i] = ePathTerminate;
}
}
struct GpuWaveFrontRenderer : public Renderer
{
Color* output = NULL;
GPUScene sceneGPU;
Random rand;
int tileWidth;
int tileHeight;
PathState paths;
// map id to geometry struct
std::map<int, MeshGeometry> gpuMeshes;
GpuWaveFrontRenderer(const Scene* s)
{
// build GPU primitive and light lists
std::vector<Primitive> primitives;
std::vector<Primitive> lights;
for (int i=0; i < s->primitives.size(); ++i)
{
Primitive primitive = s->primitives[i];
// if mesh primitive then copy to the GPU
if (primitive.type == eMesh)
{
// see if we have already uploaded the mesh to the GPU
if (gpuMeshes.find(primitive.mesh.id) == gpuMeshes.end())
{
MeshGeometry geo = CreateGPUMesh(primitive.mesh);
gpuMeshes[geo.id] = geo;
// replace CPU mesh with GPU copy
primitive.mesh = geo;
}
}
if (primitive.material.bump > 0.0f)
{
primitive.material.bumpMap = CreateGPUTexture(primitive.material.bumpMap);
}
// create explicit list of light primitives
if (primitive.lightSamples)
{
lights.push_back(primitive);
}
primitives.push_back(primitive);
}
// convert scene BVH
CreateVec4Texture((Vec4**)&(sceneGPU.bvh.nodes), (Vec4*)s->bvh.nodes, sizeof(BVHNode)*s->bvh.numNodes);
sceneGPU.bvh.numNodes = s->bvh.numNodes;
// upload to the GPU
sceneGPU.numPrimitives = primitives.size();
sceneGPU.numLights = lights.size();
if (sceneGPU.numLights > 0)
{
cudaMalloc(&sceneGPU.lights, sizeof(Primitive)*lights.size());
cudaMemcpy(sceneGPU.lights, &lights[0], sizeof(Primitive)*lights.size(), cudaMemcpyHostToDevice);
}
if (sceneGPU.numPrimitives > 0)
{
cudaMalloc(&sceneGPU.primitives, sizeof(Primitive)*primitives.size());
cudaMemcpy(sceneGPU.primitives, &primitives[0], sizeof(Primitive)*primitives.size(), cudaMemcpyHostToDevice);
}
// copy sky and probe texture
sceneGPU.sky = CreateGPUSky(s->sky);
tileWidth = 1024;
tileHeight = 1024;
const int numPaths = tileWidth*tileHeight;
// allocate paths
//cudaMalloc(&paths, sizeof(PathState)*numPaths);
//cudaMemset(paths, 0, sizeof(PathState)*numPaths);
paths = AllocatePaths(numPaths);
}
virtual ~GpuWaveFrontRenderer()
{
cudaFree(output);
cudaFree(sceneGPU.primitives);
cudaFree(sceneGPU.lights);
FreePaths(paths);
}
void Init(int width, int height)
{
cudaFree(output);
cudaMalloc(&output, sizeof(Color)*width*height);
cudaMemset(output, 0, sizeof(Color)*width*height);
}
void Render(const Camera& camera, const Options& options, Color* outputHost)
{
std::vector<Tile> tiles;
const int tilesx = (options.width + tileWidth - 1)/tileWidth;
const int tilesy = (options.height + tileHeight - 1)/tileHeight;
for (int y=0; y < tilesy; ++y)
{
for (int x=0; x < tilesx; ++x)
{
Tile tile;
tile.x = x*tileWidth;
tile.y = y*tileHeight;
tile.width = Min(tileWidth, options.width-tile.x);
tile.height = Min(tileHeight, options.height-tile.y);
tiles.push_back(tile);
}
}
const int numPaths = tileWidth*tileHeight;
// create a sampler for the camera
CameraSampler sampler(
Transform(camera.position, camera.rotation),
camera.fov,
0.001f,
1.0f,
options.width,
options.height);
for (int tileIndex=0; tileIndex < tiles.size(); ++tileIndex)
{
Tile tile = tiles[tileIndex];
// a tile consists of many thread blocks
const int blockWidth = 16;
const int blockHeight = 16;
const int gridWidth = (tile.width + blockWidth - 1)/blockWidth;
const int gridHeight = (tile.height + blockHeight - 1)/blockHeight;
dim3 blockDim(blockWidth, blockHeight);
dim3 gridDim(gridWidth, gridHeight);
/*
const int kNumThreadsPerBlock = 256;
const int kNumBlocks = (numPaths + kNumThreadsPerBlock - 1)/kNumThreadsPerBlock;
dim3 gridDim(kNumBlocks);
dim3 blockDim(kNumThreadsPerBlock);
*/
GeneratePaths<<<gridDim, blockDim>>>(camera, sampler, tile, rand.Rand(), paths, numPaths);
if (options.mode == eNormals)
{
VisualizeNormals<<<gridDim, blockDim>>>(sceneGPU, paths, numPaths);
}
else
{
for (int i=0; i < options.maxDepth; ++i)
{
AdvancePaths<<<gridDim, blockDim>>>(sceneGPU, paths, numPaths);
SampleLights<<<gridDim, blockDim>>>(sceneGPU, paths, numPaths);
//SampleProbes();
SampleBsdfs<<<gridDim, blockDim>>>(paths, numPaths);
}
}
TerminatePaths<<<gridDim, blockDim>>>(output, options, paths, numPaths);
}
// copy back to output
cudaMemcpy(outputHost, output, sizeof(Color)*options.width*options.height, cudaMemcpyDeviceToHost);
}
};
Renderer* CreateGpuWavefrontRenderer(const Scene* s)
{
return new GpuWaveFrontRenderer(s);
}
|
9a93c4b103179a0531b01b37712060f40bbaca95.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void getValue(float *outdata, float *indata) {
outdata[0] = indata[0] + 3.0f;
} | 9a93c4b103179a0531b01b37712060f40bbaca95.cu | #include "includes.h"
__global__ void getValue(float *outdata, float *indata) {
outdata[0] = indata[0] + 3.0f;
} |
aa22bd859053b7a34a3905dececab1b1b47fb8bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************
*
* This experiment optimizes packet classification
* in the following aspects:
* 1. Thread assignment
* 2. Memory coalescing
*
* Experiment Assumptions:
* 1. 510 Non-overlapping intervals
* 2. 1024 Rules (510 * 1024 element BVs)
* 3. Number of packets varies, 1 kernel
* 4. All packets are already on CPU memory
* 5. All fields needs prefix/range match
*
********************************************************/
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <rocblas.h>
#define FIELD 5
#define RULE 15
#define ALLRULE 128
#define WSIZE 32
#define int_count ALLRULE / (sizeof(int) * 8)
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
using namespace std;
void header_gen(int**, int**, int, int);
void tree_gen(int**, int, int);
void bv_gen( int**, int*, int);
void data_test(int**, int**, int**, int*, int, int);
__global__ void packet_classify(int* gpu_tree, int* gpu_headers, int* gpu_match_result, int packet_num, int block_dim, int* gpu_bv, int* gpu_merge_result, int*gpu_bv_final){
__shared__ int gpu_tree_shared[FIELD*RULE];
int level = 0;
while(level * block_dim + threadIdx.x < FIELD * RULE){
gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x];
level++;
}
__syncthreads();
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = 0;
if (index < packet_num * FIELD){
// printf ("index = %d, packet_num %d\n", index, packet_num);
i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] <= gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 1 + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] > gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 2;
}
gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE;
// printf ("i = %d\n", i);
}
__syncthreads();
// printf("The packet number is: %d, and the packet value is %d. %d Here is a float: %f\n", i, packet[i],intarray[i], floatarray[i]);
// };
// __global__ void packet_merge( int* gpu_bv, int* gpu_match_result, int* gpu_merge_result, int*gpu_bv_final, int packet_num, int block_dim){
// // __global__ void packet_merge( int* gpu_bv, int* gpu_merge_result, int*gpu_bv_final){
int packetIdx = index/int_count;
gpu_merge_result[index] = gpu_bv[gpu_match_result[packetIdx*15]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+1]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+2]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+3]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+4]*int_count + index%int_count];// &
// gpu_bv[gpu_match_result[packetIdx*15+5]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+6]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+7]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+8]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+9]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+10]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+11]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+12]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+13]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+14]*int_count + index%int_count];
// __syncthreads();
// if (blockDim.x * blockIdx.x + threadIdx.x < packet_num){
// gpu_bv_final[blockDim.x*blockIdx.x+threadIdx.x] = gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+1] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+2] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+3] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+4] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+5] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+6] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+7] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+8] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+9] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+10] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+11] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+12] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+13] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+14] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+15] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+16] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+17] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+18] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+19] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+20] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+21] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+22] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+23] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+24] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+25] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+26] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+27] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+28] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+29] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+30] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+31];
//}
};
int main(int argc, char** argv){
if(argc!=4){
// cout<<"usage ./openflow *Packet_num *Grid_dim *Block_dim *Grid_dim_merge *Block_dim_merge"<<endl;
cout<<"usage ./openflow *Packet_num *Grid_dim_merge *Block_dim_merge"<<endl;
return 0;
}
int packet_num = atoi(argv[1]);
// int grid_dim = atoi(argv[2]);
// int block_dim = atoi(argv[3]);
int grid_dim_merge = atoi(argv[2]);
int block_dim_merge = atoi (argv[3]);
// if (grid_dim*block_dim != packet_num*FIELD){
// cout<<"ERROR: Total number of threads in stage 1 must equal packet_num * FIELD"<<endl;
// return 1;
// }
if (grid_dim_merge * block_dim_merge != packet_num * int_count){
cout<<"ERROR: Total number of threads in stage 2 must equal packet_num * int_count"<<endl;
return 1;
}
cout<<"============================ Experiment Starts ============================"<<endl;
// cout<<"grid_dim: "<<grid_dim<<", block_dim: "<<block_dim<<", packet_num: "<<packet_num;
// cout<<", grid_dim_merge: "<<grid_dim_merge<<", block_dim_merge: "<<block_dim_merge<<endl;
/********************************************************
* Preparing Data:
* 1. Generate random headers
* 2. Generate BVs
* 3. Generate random packets
* 4. Deliberately make some rule-matching packets
********************************************************/
srand(time(NULL));
int** tree = new int*[FIELD];
for(int i = 0; i < FIELD; i++){
tree[i] = new int[RULE];
}
int** headers = new int*[FIELD];
for (int i = 0; i < FIELD; i++){
headers[i] = new int[packet_num];
}
int** bv = new int*[FIELD*(RULE+1)];
for(int i = 0; i < FIELD*(RULE+1); i++){
bv[i] = new int[int_count];
}
int* bv_final = new int[packet_num];
int* match_result = new int[packet_num * FIELD];
int* merge_result = new int[int_count*packet_num];
tree_gen(tree, FIELD, RULE);
header_gen(headers, tree, FIELD, packet_num);
bv_gen(bv, bv_final, packet_num);
//data_test(tree, headers, bv, bv_final, packet_num, 3);
/********************************************************
* Flatten All the 2D Arrays
********************************************************/
int* tree_flatten = new int[RULE*FIELD];
int* headers_flatten = new int[packet_num*FIELD];
int* bv_flatten = new int[FIELD*(RULE+1) * int_count];
for (int i = 0; i < FIELD; i++){
for (int j = 0; j < RULE; j++){
tree_flatten[i*RULE+j] = tree[i][j];
}
}
for (int i = 0; i < FIELD; i++){
for (int j = 0; j < packet_num; j++){
headers_flatten[i*packet_num + j] = headers[i][j];
}
}
for (int i = 0; i < FIELD*(RULE+1); i++){
for (int j = 0; j < int_count; j++){
bv_flatten[ i * int_count + j] = bv[i][j];
}
}
/********************************************************
* Declare cuda events for statistical purposes [Search]:
* 1. time_memcpyH2D
* 2. time_memcpyD2H
* 3. time_pc
********************************************************/
float time0, time1, time2, time3, time4;
hipEvent_t time_search_memcpyH2D_start, time_search_memcpyH2D_stop, time_merge_memcpyD2H_start, time_merge_memcpyD2H_stop, time_comp_start, time_comp_stop, time_exp_start,time_exp_stop;
hipEventCreate(&time_search_memcpyH2D_start);
hipEventCreate(&time_search_memcpyH2D_stop);
hipEventCreate(&time_merge_memcpyD2H_start);
hipEventCreate(&time_merge_memcpyD2H_stop);
hipEventCreate(&time_comp_start);
hipEventCreate(&time_comp_stop);
hipEventCreate(&time_exp_start);
hipEventCreate(&time_exp_stop);
/********************************************************
* Allocate Space in Device:
* 1. gpu_tree
* 2. gpu_bv
* 3. gpu_bv_final
* 4. gpu_headers
********************************************************/
dim3 dimGrid_merge(grid_dim_merge,1);
dim3 dimBlock_merge(block_dim_merge,1);
int* gpu_tree;
int* gpu_headers;
int* gpu_match_result;
int* gpu_bv_final;
int* gpu_merge_result;
int* gpu_bv;
// gpu_tree, gpu_headers, gpu_match_result, packet_num, block_dim_merge, gpu_bv, gpu_merge_result, gpu_bv_final
hipMalloc((void**)&gpu_tree, sizeof(int)*FIELD*RULE);
cudaCheckErrors("hipMalloc gpu_tree");
hipMalloc((void**)&gpu_headers, sizeof(int)*FIELD*packet_num);
cudaCheckErrors("hipMalloc gpu_headers");
hipMalloc((void**)&gpu_bv, sizeof( int)*(RULE+1)*FIELD*int_count);
cudaCheckErrors("hipMalloc gpu_bv");
hipMalloc((void**)&gpu_match_result, sizeof(int)*packet_num*FIELD);
cudaCheckErrors("hipMalloc gpu_match_result");
hipMalloc((void**)&gpu_merge_result, sizeof( int)*packet_num*int_count);
cudaCheckErrors("hipMalloc gpu_merge_result");
hipMalloc((void**)&gpu_bv_final, sizeof( int)*packet_num);
cudaCheckErrors("hipMalloc gpu_bv_final");
hipEventRecord(time_exp_start, 0);
hipEventRecord(time_search_memcpyH2D_start, 0);
hipMemcpy(gpu_tree, tree_flatten, sizeof(int)*RULE*FIELD, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy gpu_tree");
hipMemcpy(gpu_headers, headers_flatten, sizeof(int)*FIELD*packet_num, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy gpu_headers");
hipMemcpy(gpu_bv, bv_flatten, sizeof( int)*(RULE+1)*FIELD*int_count, hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy merge gpu_bv");
hipEventRecord(time_search_memcpyH2D_stop, 0);
hipEventSynchronize(time_search_memcpyH2D_stop);
hipEventElapsedTime(&time1, time_search_memcpyH2D_start, time_search_memcpyH2D_stop);
hipEventDestroy(time_search_memcpyH2D_stop);
hipEventDestroy(time_search_memcpyH2D_start);
cout<<endl<<"Number of Packets: "<<packet_num<<endl;
cout<<endl<<">>>>>> "<<"grid_dim: "<<grid_dim_merge<<", block_dim: "<<block_dim_merge<<endl;
cout<<endl<<"* 1. Time for memcpy H2D: "<<time1<<"ms, Total bytes copied: "<<endl;
cout<<" -> Tree: "<< sizeof(int)*RULE*FIELD<<" Bytes"<<endl;
cout<<" -> Headers: "<< sizeof(int)*FIELD*packet_num<<" Bytes"<<endl;
cout<<" -> BV: "<< sizeof( int)*(RULE+1)*FIELD*int_count <<" Bytes"<<endl;
cout<<" -> Total Memory Copy: "<< sizeof(int)*RULE*FIELD + sizeof(int)*FIELD*packet_num + sizeof( int)*(RULE+1)*FIELD*int_count <<" Bytes"<<endl;
/********************************************************
* Main Packet Classification Process [Search]
* 1. Function Call
* 2. Timing
* 3. Memory copy back (gpu_bv_final)
********************************************************/
/********************************************************
* Main Packet Classification Process [Merge]
* 1. Function Call
* 2. Timing
* 3. Memory copy back (gpu_bv_final)
// ********************************************************/
hipEventRecord(time_comp_start, 0);
hipLaunchKernelGGL(( packet_classify), dim3(dimGrid_merge), dim3(dimBlock_merge), 0, 0, gpu_tree, gpu_headers, gpu_match_result, packet_num, block_dim_merge, gpu_bv, gpu_merge_result, gpu_bv_final);
cudaCheckErrors("Computation fail");
hipEventRecord(time_comp_stop, 0);
hipEventSynchronize(time_comp_stop);
hipEventElapsedTime(&time2, time_comp_start, time_comp_stop);
hipEventDestroy(time_comp_stop);
hipEventDestroy(time_comp_start);
cout<<endl<<"* 2. Time for GPU computation: "<<time2<<"ms, GPU throughput: "<<packet_num/time2/1000<<" MPPS"<<endl;
// hipEventRecord(time_search_memcpyD2H_start, 0);
// hipMemcpy(match_result, gpu_match_result, sizeof(int)*packet_num*FIELD, hipMemcpyDeviceToHost);
// hipEventRecord(time_search_memcpyD2H_stop, 0);
// hipEventSynchronize(time_search_memcpyD2H_stop);
// hipEventElapsedTime(&time3, time_search_memcpyD2H_start, time_search_memcpyD2H_stop);
// hipEventDestroy(time_search_memcpyD2H_stop);
// hipEventDestroy(time_search_memcpyD2H_start);
// cout<<endl<<"* 3. Time for memcpy D2H: "<<time3<<"ms, Total bytes copied: "<<endl;
// cout<<" -> Match_result: "<< sizeof(int)*packet_num*FIELD<<" Bytes"<<endl<<endl;
//data_test(tree, headers, bv, bv_final, packet_num, 8);
/********************************************************
* Declare cuda events for statistical purposes [Search]:
* 1. time_merge_memcpyH2D
* 2. time_merge_memcpyD2H
* 3. time_mg
********************************************************/
// dim3 dimGrid_merge(grid_dim_merge,1);
// dim3 dimBlock_merge(block_dim_merge,1);
// float time4, time5, time6, time7;
// hipEvent_t time_merge_memcpyH2D_start, time_merge_memcpyH2D_stop, time_merge_memcpyD2H_start, time_merge_memcpyD2H_stop, time_merge_start, time_merge_stop;
// hipEvent_t time_clean_start, time_clean_stop;
// hipEventCreate(&time_merge_memcpyH2D_start);
// hipEventCreate(&time_merge_memcpyH2D_stop);
// hipEventCreate(&time_merge_memcpyD2H_start);
// hipEventCreate(&time_merge_memcpyD2H_stop);
// hipEventCreate(&time_merge_start);
// hipEventCreate(&time_merge_stop);
// hipEventCreate(&time_clean_start);
// hipEventCreate(&time_clean_stop);
// cout<<"---------------------------------------------------------"<<endl;
// cout<<endl<<">>>>>>[Stage 2: Merge]"<<" grid_dim_merge: "<<grid_dim_merge<<", block_dim_merge: "<<block_dim_merge<<endl;
// hipEventRecord(time_merge_memcpyH2D_start, 0);
// hipMemcpy(gpu_bv, bv_flatten, sizeof( int)*(RULE+1)*FIELD*int_count, hipMemcpyHostToDevice);
// // cudaCheckErrors("hipMemcpy merge gpu_bv");
// hipMemcpy(gpu_match_result, match_result, sizeof(int)*packet_num*FIELD, hipMemcpyHostToDevice);
// cudaCheckErrors("hipMemcpy merge gpu_match_result");
// hipEventRecord(time_merge_memcpyH2D_stop, 0);
// hipEventSynchronize(time_merge_memcpyH2D_stop);
// hipEventElapsedTime(&time4, time_merge_memcpyH2D_start, time_merge_memcpyH2D_stop);
// hipEventDestroy(time_merge_memcpyH2D_stop);
// hipEventDestroy(time_merge_memcpyH2D_start);
// cout<<endl<<"* 1. Time for memcpy H2D: "<<time4<<"ms, Total bytes copied: "<<endl;
// cout<<" -> BV: "<< sizeof( int)*(RULE+1)*FIELD*int_count <<" Bytes"<<endl;
// cout<<" -> match_result: "<< sizeof(int)*FIELD*packet_num<<" Bytes"<<endl;
// cout<<" -> Total Memory Copy: "<< sizeof(int)*FIELD*packet_num + sizeof( int)*(RULE+1)*FIELD*int_count<<" Bytes"<<endl;
/********************************************************
* Main Packet Classification Process [Merge]
* 1. Function Call
* 2. Timing
* 3. Memory copy back (gpu_bv_final)
// ********************************************************/
// hipEventRecord(time_merge_start, 0);
// packet_merge<<<dimGrid_merge, dimBlock_merge>>>(gpu_bv, gpu_match_result, gpu_merge_result, gpu_bv_final, packet_num, block_dim, gpu_merge_result);
// cudaCheckErrors("Merge fail");
// hipEventRecord(time_merge_stop, 0);
// hipEventSynchronize(time_merge_stop);
// hipEventElapsedTime(&time5, time_merge_start, time_merge_stop);
// hipEventDestroy(time_merge_stop);
// hipEventDestroy(time_merge_start);
// cout<<endl<<"* 2. Time for GPU computation: "<<time5<<"ms, GPU throughput: "<<packet_num/time5/1000<<" MPPS"<<endl;
hipEventRecord(time_merge_memcpyD2H_start, 0);
hipMemcpy(bv_final, gpu_bv_final, sizeof( int)*packet_num, hipMemcpyDeviceToHost);
cudaCheckErrors("Cuda Memcpy D2H merge fail");
hipEventRecord(time_merge_memcpyD2H_stop, 0);
hipEventSynchronize(time_merge_memcpyD2H_stop);
hipEventElapsedTime(&time3, time_merge_memcpyD2H_start, time_merge_memcpyD2H_stop);
hipEventDestroy(time_merge_memcpyD2H_stop);
hipEventDestroy(time_merge_memcpyD2H_start);
cout<<endl<<"* 3. Time for memcpy D2H: "<<time3<<"ms, Total bytes copied: "<<endl;
cout<<" -> bv_final: "<< sizeof( int)*packet_num<<" Bytes"<<endl<<endl;
hipEventRecord(time_exp_stop, 0);
hipEventSynchronize(time_exp_stop);
hipEventElapsedTime(&time0, time_exp_start, time_exp_stop);
hipEventDestroy(time_exp_start);
hipEventDestroy(time_exp_stop);
cout<<endl<<"* Total Time for the Experiment: "<<time0<<"ms, throughput: "<<packet_num/time0/1000<<" MPPS"<<endl;
/********************************************************
* Clear Memory:
* 1. Dynamic allocations on host
* 2. cudaFrees
********************************************************/
hipEvent_t time_clean_start, time_clean_stop;
hipEventCreate(&time_clean_start);
hipEventCreate(&time_clean_stop);
hipEventRecord(time_clean_start, 0);
hipFree(gpu_tree);
cudaCheckErrors("Free gpu_tree fail");
hipFree(gpu_headers);
cudaCheckErrors("Free gpu_headers fail");
hipFree(gpu_bv);
cudaCheckErrors("Free bv fail");
hipFree(gpu_bv_final);
cudaCheckErrors("Free gpu_bv_final fail");
hipFree(gpu_match_result);
cudaCheckErrors("Free gpu_match_result fail");
hipFree(gpu_merge_result);
cudaCheckErrors("Free gpu_merge_result fail");
for (int i = 0; i < FIELD; i++){
delete tree[i];
}
for(int i = 0; i < FIELD; i++){
delete headers[i];
}
for(int i = 0; i < FIELD*(RULE+1); i++){
delete bv[i];
}
delete tree;
delete bv;
delete headers;
delete bv_final;
delete match_result;
delete tree_flatten;
delete headers_flatten;
delete bv_flatten;
delete merge_result;
hipEventRecord(time_clean_stop, 0);
hipEventSynchronize(time_clean_start);
hipEventElapsedTime(&time4, time_clean_start, time_clean_stop);
hipEventDestroy(time_clean_stop);
hipEventDestroy(time_clean_start);
//cout<<endl<<"* 4. Time for cleaning memory: "<<time4<<"ms."<<endl<<endl;
cout<<"============================ Experiment Ends ============================"<<endl;
return 0;
}
void tree_gen(int** tree, int field, int rule){
for(int i = 0; i < field; i++){
tree[i][0] = rand() % 100;
int temp[rule];
temp[0] = tree[i][0];
for (int j = 1; j < rule; j++){
temp[j] = temp[j-1] + rand() % 20 + 1;
}
int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2);
int step_index = level;
while (step_index >= 1){
int step = pow(2, (level - step_index + 1));
while (temp_index >= 0){
tree[i][tree_index] = temp[temp_index];
temp_index -= step;
tree_index--;
}
step_index--;
temp_index = rule - 1 - (pow(2, level - step_index) - 1);
}
}
}
void header_gen(int** headers, int** tree, int field, int packet_num){
for (int i = 0; i < field; i++){
for(int j = 0; j < packet_num; j++){
headers[i][j] = rand() % 6000;
}
}
}
void bv_gen( int** bv, int* bv_final, int packet_num){
for (int i = 0; i < int_count; i++){
for (int j = 0; j < FIELD*(RULE+1); j++){
bv[j][i] = rand() % 128;
}
}
// for(int i = 0; i < packet_num; i++){
// bv_final[i] = -1;
// }
}
void data_test(int** tree, int** headers, int** bv, int* bv_final, int packet_num, int type){
if (type > 15 | type == 0){
return;
}
if (type % 2 == 1){
cout<<"Tree: "<<endl;
for(int i = 0; i < RULE; i++){
cout<<"Line: "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<tree[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 4 == 2 | type % 4 == 3){
cout<<endl<<"Headers: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<"Header "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<headers[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){
cout<<endl<<"bv: "<<endl;
for(int i = 0; i < ALLRULE; i++){
cout<<"Line "<<i<<": ";
for (int j = 0; j < FIELD*(RULE+1); j++){
cout<<bv[j][i]<<" ";
}
cout<<endl;
}
}
if (type > 7){
cout<<endl<<"bv_final: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<bv_final[i]<<" ";
}
cout<<endl;
}
cout<<"============== End of Print =============="<<endl;
}
| aa22bd859053b7a34a3905dececab1b1b47fb8bd.cu | /********************************************************
*
* This experiment optimizes packet classification
* in the following aspects:
* 1. Thread assignment
* 2. Memory coalescing
*
* Experiment Assumptions:
* 1. 510 Non-overlapping intervals
* 2. 1024 Rules (510 * 1024 element BVs)
* 3. Number of packets varies, 1 kernel
* 4. All packets are already on CPU memory
* 5. All fields needs prefix/range match
*
********************************************************/
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <cublas.h>
#define FIELD 5
#define RULE 15
#define ALLRULE 128
#define WSIZE 32
#define int_count ALLRULE / (sizeof(int) * 8)
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
using namespace std;
void header_gen(int**, int**, int, int);
void tree_gen(int**, int, int);
void bv_gen( int**, int*, int);
void data_test(int**, int**, int**, int*, int, int);
__global__ void packet_classify(int* gpu_tree, int* gpu_headers, int* gpu_match_result, int packet_num, int block_dim, int* gpu_bv, int* gpu_merge_result, int*gpu_bv_final){
__shared__ int gpu_tree_shared[FIELD*RULE];
int level = 0;
while(level * block_dim + threadIdx.x < FIELD * RULE){
gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x];
level++;
}
__syncthreads();
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = 0;
if (index < packet_num * FIELD){
// printf ("index = %d, packet_num %d\n", index, packet_num);
i = 0;
while (i < RULE){
i = 2 * i + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] <= gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 1 + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] > gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 2;
}
gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE;
// printf ("i = %d\n", i);
}
__syncthreads();
// printf("The packet number is: %d, and the packet value is %d. %d Here is a float: %f\n", i, packet[i],intarray[i], floatarray[i]);
// };
// __global__ void packet_merge( int* gpu_bv, int* gpu_match_result, int* gpu_merge_result, int*gpu_bv_final, int packet_num, int block_dim){
// // __global__ void packet_merge( int* gpu_bv, int* gpu_merge_result, int*gpu_bv_final){
int packetIdx = index/int_count;
gpu_merge_result[index] = gpu_bv[gpu_match_result[packetIdx*15]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+1]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+2]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+3]*int_count + index%int_count] &
gpu_bv[gpu_match_result[packetIdx*15+4]*int_count + index%int_count];// &
// gpu_bv[gpu_match_result[packetIdx*15+5]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+6]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+7]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+8]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+9]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+10]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+11]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+12]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+13]*int_count + index%int_count] &
// gpu_bv[gpu_match_result[packetIdx*15+14]*int_count + index%int_count];
// __syncthreads();
// if (blockDim.x * blockIdx.x + threadIdx.x < packet_num){
// gpu_bv_final[blockDim.x*blockIdx.x+threadIdx.x] = gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+1] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+2] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+3] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+4] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+5] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+6] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+7] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+8] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+9] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+10] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+11] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+12] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+13] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+14] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+15] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+16] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+17] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+18] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+19] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+20] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+21] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+22] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+23] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+24] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)*int_count+25] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+26] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+27] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+28] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+29] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+30] &
// gpu_merge_result[(blockDim.x*blockIdx.x+threadIdx.x)%int_count+31];
//}
};
int main(int argc, char** argv){
if(argc!=4){
// cout<<"usage ./openflow *Packet_num *Grid_dim *Block_dim *Grid_dim_merge *Block_dim_merge"<<endl;
cout<<"usage ./openflow *Packet_num *Grid_dim_merge *Block_dim_merge"<<endl;
return 0;
}
int packet_num = atoi(argv[1]);
// int grid_dim = atoi(argv[2]);
// int block_dim = atoi(argv[3]);
int grid_dim_merge = atoi(argv[2]);
int block_dim_merge = atoi (argv[3]);
// if (grid_dim*block_dim != packet_num*FIELD){
// cout<<"ERROR: Total number of threads in stage 1 must equal packet_num * FIELD"<<endl;
// return 1;
// }
if (grid_dim_merge * block_dim_merge != packet_num * int_count){
cout<<"ERROR: Total number of threads in stage 2 must equal packet_num * int_count"<<endl;
return 1;
}
cout<<"============================ Experiment Starts ============================"<<endl;
// cout<<"grid_dim: "<<grid_dim<<", block_dim: "<<block_dim<<", packet_num: "<<packet_num;
// cout<<", grid_dim_merge: "<<grid_dim_merge<<", block_dim_merge: "<<block_dim_merge<<endl;
/********************************************************
* Preparing Data:
* 1. Generate random headers
* 2. Generate BVs
* 3. Generate random packets
* 4. Deliberately make some rule-matching packets
********************************************************/
srand(time(NULL));
int** tree = new int*[FIELD];
for(int i = 0; i < FIELD; i++){
tree[i] = new int[RULE];
}
int** headers = new int*[FIELD];
for (int i = 0; i < FIELD; i++){
headers[i] = new int[packet_num];
}
int** bv = new int*[FIELD*(RULE+1)];
for(int i = 0; i < FIELD*(RULE+1); i++){
bv[i] = new int[int_count];
}
int* bv_final = new int[packet_num];
int* match_result = new int[packet_num * FIELD];
int* merge_result = new int[int_count*packet_num];
tree_gen(tree, FIELD, RULE);
header_gen(headers, tree, FIELD, packet_num);
bv_gen(bv, bv_final, packet_num);
//data_test(tree, headers, bv, bv_final, packet_num, 3);
/********************************************************
* Flatten All the 2D Arrays
********************************************************/
int* tree_flatten = new int[RULE*FIELD];
int* headers_flatten = new int[packet_num*FIELD];
int* bv_flatten = new int[FIELD*(RULE+1) * int_count];
for (int i = 0; i < FIELD; i++){
for (int j = 0; j < RULE; j++){
tree_flatten[i*RULE+j] = tree[i][j];
}
}
for (int i = 0; i < FIELD; i++){
for (int j = 0; j < packet_num; j++){
headers_flatten[i*packet_num + j] = headers[i][j];
}
}
for (int i = 0; i < FIELD*(RULE+1); i++){
for (int j = 0; j < int_count; j++){
bv_flatten[ i * int_count + j] = bv[i][j];
}
}
/********************************************************
* Declare cuda events for statistical purposes [Search]:
* 1. time_memcpyH2D
* 2. time_memcpyD2H
* 3. time_pc
********************************************************/
float time0, time1, time2, time3, time4;
cudaEvent_t time_search_memcpyH2D_start, time_search_memcpyH2D_stop, time_merge_memcpyD2H_start, time_merge_memcpyD2H_stop, time_comp_start, time_comp_stop, time_exp_start,time_exp_stop;
cudaEventCreate(&time_search_memcpyH2D_start);
cudaEventCreate(&time_search_memcpyH2D_stop);
cudaEventCreate(&time_merge_memcpyD2H_start);
cudaEventCreate(&time_merge_memcpyD2H_stop);
cudaEventCreate(&time_comp_start);
cudaEventCreate(&time_comp_stop);
cudaEventCreate(&time_exp_start);
cudaEventCreate(&time_exp_stop);
/********************************************************
* Allocate Space in Device:
* 1. gpu_tree
* 2. gpu_bv
* 3. gpu_bv_final
* 4. gpu_headers
********************************************************/
dim3 dimGrid_merge(grid_dim_merge,1);
dim3 dimBlock_merge(block_dim_merge,1);
int* gpu_tree;
int* gpu_headers;
int* gpu_match_result;
int* gpu_bv_final;
int* gpu_merge_result;
int* gpu_bv;
// gpu_tree, gpu_headers, gpu_match_result, packet_num, block_dim_merge, gpu_bv, gpu_merge_result, gpu_bv_final
cudaMalloc((void**)&gpu_tree, sizeof(int)*FIELD*RULE);
cudaCheckErrors("cudaMalloc gpu_tree");
cudaMalloc((void**)&gpu_headers, sizeof(int)*FIELD*packet_num);
cudaCheckErrors("cudaMalloc gpu_headers");
cudaMalloc((void**)&gpu_bv, sizeof( int)*(RULE+1)*FIELD*int_count);
cudaCheckErrors("cudaMalloc gpu_bv");
cudaMalloc((void**)&gpu_match_result, sizeof(int)*packet_num*FIELD);
cudaCheckErrors("cudaMalloc gpu_match_result");
cudaMalloc((void**)&gpu_merge_result, sizeof( int)*packet_num*int_count);
cudaCheckErrors("cudaMalloc gpu_merge_result");
cudaMalloc((void**)&gpu_bv_final, sizeof( int)*packet_num);
cudaCheckErrors("cudaMalloc gpu_bv_final");
cudaEventRecord(time_exp_start, 0);
cudaEventRecord(time_search_memcpyH2D_start, 0);
cudaMemcpy(gpu_tree, tree_flatten, sizeof(int)*RULE*FIELD, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy gpu_tree");
cudaMemcpy(gpu_headers, headers_flatten, sizeof(int)*FIELD*packet_num, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy gpu_headers");
cudaMemcpy(gpu_bv, bv_flatten, sizeof( int)*(RULE+1)*FIELD*int_count, cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy merge gpu_bv");
cudaEventRecord(time_search_memcpyH2D_stop, 0);
cudaEventSynchronize(time_search_memcpyH2D_stop);
cudaEventElapsedTime(&time1, time_search_memcpyH2D_start, time_search_memcpyH2D_stop);
cudaEventDestroy(time_search_memcpyH2D_stop);
cudaEventDestroy(time_search_memcpyH2D_start);
cout<<endl<<"Number of Packets: "<<packet_num<<endl;
cout<<endl<<">>>>>> "<<"grid_dim: "<<grid_dim_merge<<", block_dim: "<<block_dim_merge<<endl;
cout<<endl<<"* 1. Time for memcpy H2D: "<<time1<<"ms, Total bytes copied: "<<endl;
cout<<" -> Tree: "<< sizeof(int)*RULE*FIELD<<" Bytes"<<endl;
cout<<" -> Headers: "<< sizeof(int)*FIELD*packet_num<<" Bytes"<<endl;
cout<<" -> BV: "<< sizeof( int)*(RULE+1)*FIELD*int_count <<" Bytes"<<endl;
cout<<" -> Total Memory Copy: "<< sizeof(int)*RULE*FIELD + sizeof(int)*FIELD*packet_num + sizeof( int)*(RULE+1)*FIELD*int_count <<" Bytes"<<endl;
/********************************************************
* Main Packet Classification Process [Search]
* 1. Function Call
* 2. Timing
* 3. Memory copy back (gpu_bv_final)
********************************************************/
/********************************************************
* Main Packet Classification Process [Merge]
* 1. Function Call
* 2. Timing
* 3. Memory copy back (gpu_bv_final)
// ********************************************************/
cudaEventRecord(time_comp_start, 0);
packet_classify<<<dimGrid_merge, dimBlock_merge>>>(gpu_tree, gpu_headers, gpu_match_result, packet_num, block_dim_merge, gpu_bv, gpu_merge_result, gpu_bv_final);
cudaCheckErrors("Computation fail");
cudaEventRecord(time_comp_stop, 0);
cudaEventSynchronize(time_comp_stop);
cudaEventElapsedTime(&time2, time_comp_start, time_comp_stop);
cudaEventDestroy(time_comp_stop);
cudaEventDestroy(time_comp_start);
cout<<endl<<"* 2. Time for GPU computation: "<<time2<<"ms, GPU throughput: "<<packet_num/time2/1000<<" MPPS"<<endl;
// cudaEventRecord(time_search_memcpyD2H_start, 0);
// cudaMemcpy(match_result, gpu_match_result, sizeof(int)*packet_num*FIELD, cudaMemcpyDeviceToHost);
// cudaEventRecord(time_search_memcpyD2H_stop, 0);
// cudaEventSynchronize(time_search_memcpyD2H_stop);
// cudaEventElapsedTime(&time3, time_search_memcpyD2H_start, time_search_memcpyD2H_stop);
// cudaEventDestroy(time_search_memcpyD2H_stop);
// cudaEventDestroy(time_search_memcpyD2H_start);
// cout<<endl<<"* 3. Time for memcpy D2H: "<<time3<<"ms, Total bytes copied: "<<endl;
// cout<<" -> Match_result: "<< sizeof(int)*packet_num*FIELD<<" Bytes"<<endl<<endl;
//data_test(tree, headers, bv, bv_final, packet_num, 8);
/********************************************************
* Declare cuda events for statistical purposes [Search]:
* 1. time_merge_memcpyH2D
* 2. time_merge_memcpyD2H
* 3. time_mg
********************************************************/
// dim3 dimGrid_merge(grid_dim_merge,1);
// dim3 dimBlock_merge(block_dim_merge,1);
// float time4, time5, time6, time7;
// cudaEvent_t time_merge_memcpyH2D_start, time_merge_memcpyH2D_stop, time_merge_memcpyD2H_start, time_merge_memcpyD2H_stop, time_merge_start, time_merge_stop;
// cudaEvent_t time_clean_start, time_clean_stop;
// cudaEventCreate(&time_merge_memcpyH2D_start);
// cudaEventCreate(&time_merge_memcpyH2D_stop);
// cudaEventCreate(&time_merge_memcpyD2H_start);
// cudaEventCreate(&time_merge_memcpyD2H_stop);
// cudaEventCreate(&time_merge_start);
// cudaEventCreate(&time_merge_stop);
// cudaEventCreate(&time_clean_start);
// cudaEventCreate(&time_clean_stop);
// cout<<"---------------------------------------------------------"<<endl;
// cout<<endl<<">>>>>>[Stage 2: Merge]"<<" grid_dim_merge: "<<grid_dim_merge<<", block_dim_merge: "<<block_dim_merge<<endl;
// cudaEventRecord(time_merge_memcpyH2D_start, 0);
// cudaMemcpy(gpu_bv, bv_flatten, sizeof( int)*(RULE+1)*FIELD*int_count, cudaMemcpyHostToDevice);
// // cudaCheckErrors("cudaMemcpy merge gpu_bv");
// cudaMemcpy(gpu_match_result, match_result, sizeof(int)*packet_num*FIELD, cudaMemcpyHostToDevice);
// cudaCheckErrors("cudaMemcpy merge gpu_match_result");
// cudaEventRecord(time_merge_memcpyH2D_stop, 0);
// cudaEventSynchronize(time_merge_memcpyH2D_stop);
// cudaEventElapsedTime(&time4, time_merge_memcpyH2D_start, time_merge_memcpyH2D_stop);
// cudaEventDestroy(time_merge_memcpyH2D_stop);
// cudaEventDestroy(time_merge_memcpyH2D_start);
// cout<<endl<<"* 1. Time for memcpy H2D: "<<time4<<"ms, Total bytes copied: "<<endl;
// cout<<" -> BV: "<< sizeof( int)*(RULE+1)*FIELD*int_count <<" Bytes"<<endl;
// cout<<" -> match_result: "<< sizeof(int)*FIELD*packet_num<<" Bytes"<<endl;
// cout<<" -> Total Memory Copy: "<< sizeof(int)*FIELD*packet_num + sizeof( int)*(RULE+1)*FIELD*int_count<<" Bytes"<<endl;
/********************************************************
* Main Packet Classification Process [Merge]
* 1. Function Call
* 2. Timing
* 3. Memory copy back (gpu_bv_final)
// ********************************************************/
// cudaEventRecord(time_merge_start, 0);
// packet_merge<<<dimGrid_merge, dimBlock_merge>>>(gpu_bv, gpu_match_result, gpu_merge_result, gpu_bv_final, packet_num, block_dim, gpu_merge_result);
// cudaCheckErrors("Merge fail");
// cudaEventRecord(time_merge_stop, 0);
// cudaEventSynchronize(time_merge_stop);
// cudaEventElapsedTime(&time5, time_merge_start, time_merge_stop);
// cudaEventDestroy(time_merge_stop);
// cudaEventDestroy(time_merge_start);
// cout<<endl<<"* 2. Time for GPU computation: "<<time5<<"ms, GPU throughput: "<<packet_num/time5/1000<<" MPPS"<<endl;
cudaEventRecord(time_merge_memcpyD2H_start, 0);
cudaMemcpy(bv_final, gpu_bv_final, sizeof( int)*packet_num, cudaMemcpyDeviceToHost);
cudaCheckErrors("Cuda Memcpy D2H merge fail");
cudaEventRecord(time_merge_memcpyD2H_stop, 0);
cudaEventSynchronize(time_merge_memcpyD2H_stop);
cudaEventElapsedTime(&time3, time_merge_memcpyD2H_start, time_merge_memcpyD2H_stop);
cudaEventDestroy(time_merge_memcpyD2H_stop);
cudaEventDestroy(time_merge_memcpyD2H_start);
cout<<endl<<"* 3. Time for memcpy D2H: "<<time3<<"ms, Total bytes copied: "<<endl;
cout<<" -> bv_final: "<< sizeof( int)*packet_num<<" Bytes"<<endl<<endl;
cudaEventRecord(time_exp_stop, 0);
cudaEventSynchronize(time_exp_stop);
cudaEventElapsedTime(&time0, time_exp_start, time_exp_stop);
cudaEventDestroy(time_exp_start);
cudaEventDestroy(time_exp_stop);
cout<<endl<<"* Total Time for the Experiment: "<<time0<<"ms, throughput: "<<packet_num/time0/1000<<" MPPS"<<endl;
/********************************************************
* Clear Memory:
* 1. Dynamic allocations on host
* 2. cudaFrees
********************************************************/
cudaEvent_t time_clean_start, time_clean_stop;
cudaEventCreate(&time_clean_start);
cudaEventCreate(&time_clean_stop);
cudaEventRecord(time_clean_start, 0);
cudaFree(gpu_tree);
cudaCheckErrors("Free gpu_tree fail");
cudaFree(gpu_headers);
cudaCheckErrors("Free gpu_headers fail");
cudaFree(gpu_bv);
cudaCheckErrors("Free bv fail");
cudaFree(gpu_bv_final);
cudaCheckErrors("Free gpu_bv_final fail");
cudaFree(gpu_match_result);
cudaCheckErrors("Free gpu_match_result fail");
cudaFree(gpu_merge_result);
cudaCheckErrors("Free gpu_merge_result fail");
for (int i = 0; i < FIELD; i++){
delete tree[i];
}
for(int i = 0; i < FIELD; i++){
delete headers[i];
}
for(int i = 0; i < FIELD*(RULE+1); i++){
delete bv[i];
}
delete tree;
delete bv;
delete headers;
delete bv_final;
delete match_result;
delete tree_flatten;
delete headers_flatten;
delete bv_flatten;
delete merge_result;
cudaEventRecord(time_clean_stop, 0);
cudaEventSynchronize(time_clean_start);
cudaEventElapsedTime(&time4, time_clean_start, time_clean_stop);
cudaEventDestroy(time_clean_stop);
cudaEventDestroy(time_clean_start);
//cout<<endl<<"* 4. Time for cleaning memory: "<<time4<<"ms."<<endl<<endl;
cout<<"============================ Experiment Ends ============================"<<endl;
return 0;
}
void tree_gen(int** tree, int field, int rule){
for(int i = 0; i < field; i++){
tree[i][0] = rand() % 100;
int temp[rule];
temp[0] = tree[i][0];
for (int j = 1; j < rule; j++){
temp[j] = temp[j-1] + rand() % 20 + 1;
}
int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2);
int step_index = level;
while (step_index >= 1){
int step = pow(2, (level - step_index + 1));
while (temp_index >= 0){
tree[i][tree_index] = temp[temp_index];
temp_index -= step;
tree_index--;
}
step_index--;
temp_index = rule - 1 - (pow(2, level - step_index) - 1);
}
}
}
void header_gen(int** headers, int** tree, int field, int packet_num){
for (int i = 0; i < field; i++){
for(int j = 0; j < packet_num; j++){
headers[i][j] = rand() % 6000;
}
}
}
void bv_gen( int** bv, int* bv_final, int packet_num){
for (int i = 0; i < int_count; i++){
for (int j = 0; j < FIELD*(RULE+1); j++){
bv[j][i] = rand() % 128;
}
}
// for(int i = 0; i < packet_num; i++){
// bv_final[i] = -1;
// }
}
void data_test(int** tree, int** headers, int** bv, int* bv_final, int packet_num, int type){
if (type > 15 | type == 0){
return;
}
if (type % 2 == 1){
cout<<"Tree: "<<endl;
for(int i = 0; i < RULE; i++){
cout<<"Line: "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<tree[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 4 == 2 | type % 4 == 3){
cout<<endl<<"Headers: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<"Header "<<i<<": ";
for(int j = 0; j < FIELD; j++){
cout<<headers[j][i]<<" ";
}
cout<<endl;
}
}
if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){
cout<<endl<<"bv: "<<endl;
for(int i = 0; i < ALLRULE; i++){
cout<<"Line "<<i<<": ";
for (int j = 0; j < FIELD*(RULE+1); j++){
cout<<bv[j][i]<<" ";
}
cout<<endl;
}
}
if (type > 7){
cout<<endl<<"bv_final: "<<endl;
for(int i = 0; i < packet_num; i++){
cout<<bv_final[i]<<" ";
}
cout<<endl;
}
cout<<"============== End of Print =============="<<endl;
}
|
baccd8fe4cbe0f6196a248cbd275fcca486c47a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/extract_image_patches_impl.cuh"
#include "include/hip/hip_fp16.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
template <typename T>
using Complex = mindspore::utils::Complex<T>;
template <typename T>
__global__ void ExtractImagePatches(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row,
int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride,
int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left,
int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride,
int64_t output_depth, const T *input, T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < output_size; pos += blockDim.x * gridDim.x) {
const int64_t batch_index = need_batch ? (static_cast<int64_t>(pos) / other_stride) : 0;
const int64_t inner_index =
need_batch ? (static_cast<int64_t>(pos) - batch_index * other_stride) : static_cast<int64_t>(pos);
// inner index
const int64_t patch_index = inner_index / patch_stride;
const int64_t patch_offset = (inner_index - patch_index * patch_stride) / output_depth;
// row
const int64_t row_index = patch_index / output_cols;
const int64_t row_offset = patch_offset / row_stride;
const int64_t input_row = row_index * stride_row + row_offset * rate_row - row_padding_top;
if (input_row < 0 || input_row >= input_row_size) {
output[pos] = static_cast<T>(0);
continue;
}
// col
const int64_t col_index = patch_index - row_index * output_cols;
const int64_t col_offset = patch_offset - row_offset * row_stride;
const int64_t input_col = col_index * stride_col + col_offset * rate_col - col_padding_left;
if (input_col < 0 || input_col >= input_col_size) {
output[pos] = static_cast<T>(0);
continue;
}
// depth
const int64_t depth = inner_index - (inner_index / output_depth) * output_depth;
// input index
const int64_t input_index =
depth + input_col * col_input_stride + input_row * row_input_stride + batch_index * patch_input_stride;
output[pos] = input[static_cast<size_t>(input_index)];
}
return;
}
template <typename T>
void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row,
int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride,
int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left,
int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride,
int64_t output_depth, const T *input, T *output, hipStream_t stream) {
hipLaunchKernelGGL(( ExtractImagePatches), dim3(GET_BLOCKS(output_size)), dim3(GET_THREADS), 0, stream,
output_size, stride_row, stride_col, rate_row, rate_col, output_cols, need_batch, row_stride, patch_stride,
other_stride, input_row_size, input_col_size, row_padding_top, col_padding_left, col_input_stride, row_input_stride,
patch_input_stride, output_depth, input, output);
}
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<half>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const half *input, half *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<float>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const float *input, float *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<double>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const double *input, double *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<int8_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int8_t *input, int8_t *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<int16_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int16_t *input, int16_t *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<int32_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int32_t *input, int32_t *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<int64_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int64_t *input, int64_t *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<uint8_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const uint8_t *input, uint8_t *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<uint16_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const uint16_t *input, uint16_t *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<uint32_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const uint32_t *input, uint32_t *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<uint64_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const uint64_t *input, uint64_t *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<Complex<float>>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const Complex<float> *input,
Complex<float> *output, hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<Complex<double>>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const Complex<double> *input,
Complex<double> *output, hipStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<bool>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const bool *input, bool *output,
hipStream_t stream);
| baccd8fe4cbe0f6196a248cbd275fcca486c47a6.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/extract_image_patches_impl.cuh"
#include "include/cuda_fp16.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
template <typename T>
using Complex = mindspore::utils::Complex<T>;
template <typename T>
__global__ void ExtractImagePatches(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row,
int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride,
int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left,
int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride,
int64_t output_depth, const T *input, T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < output_size; pos += blockDim.x * gridDim.x) {
const int64_t batch_index = need_batch ? (static_cast<int64_t>(pos) / other_stride) : 0;
const int64_t inner_index =
need_batch ? (static_cast<int64_t>(pos) - batch_index * other_stride) : static_cast<int64_t>(pos);
// inner index
const int64_t patch_index = inner_index / patch_stride;
const int64_t patch_offset = (inner_index - patch_index * patch_stride) / output_depth;
// row
const int64_t row_index = patch_index / output_cols;
const int64_t row_offset = patch_offset / row_stride;
const int64_t input_row = row_index * stride_row + row_offset * rate_row - row_padding_top;
if (input_row < 0 || input_row >= input_row_size) {
output[pos] = static_cast<T>(0);
continue;
}
// col
const int64_t col_index = patch_index - row_index * output_cols;
const int64_t col_offset = patch_offset - row_offset * row_stride;
const int64_t input_col = col_index * stride_col + col_offset * rate_col - col_padding_left;
if (input_col < 0 || input_col >= input_col_size) {
output[pos] = static_cast<T>(0);
continue;
}
// depth
const int64_t depth = inner_index - (inner_index / output_depth) * output_depth;
// input index
const int64_t input_index =
depth + input_col * col_input_stride + input_row * row_input_stride + batch_index * patch_input_stride;
output[pos] = input[static_cast<size_t>(input_index)];
}
return;
}
template <typename T>
void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row,
int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride,
int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left,
int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride,
int64_t output_depth, const T *input, T *output, cudaStream_t stream) {
ExtractImagePatches<<<GET_BLOCKS(output_size), GET_THREADS, 0, stream>>>(
output_size, stride_row, stride_col, rate_row, rate_col, output_cols, need_batch, row_stride, patch_stride,
other_stride, input_row_size, input_col_size, row_padding_top, col_padding_left, col_input_stride, row_input_stride,
patch_input_stride, output_depth, input, output);
}
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<half>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const half *input, half *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<float>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const float *input, float *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<double>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const double *input, double *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<int8_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int8_t *input, int8_t *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<int16_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int16_t *input, int16_t *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<int32_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int32_t *input, int32_t *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<int64_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const int64_t *input, int64_t *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<uint8_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const uint8_t *input, uint8_t *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<uint16_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const uint16_t *input, uint16_t *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<uint32_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const uint32_t *input, uint32_t *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<uint64_t>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const uint64_t *input, uint64_t *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<Complex<float>>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const Complex<float> *input,
Complex<float> *output, cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<Complex<double>>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const Complex<double> *input,
Complex<double> *output, cudaStream_t stream);
template CUDA_LIB_EXPORT void CalExtractImagePatchesNHWC<bool>(
size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, int64_t rate_col, int64_t output_cols,
bool need_batch, int64_t row_stride, int64_t patch_stride, int64_t other_stride, int64_t input_row_size,
int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, int64_t col_input_stride,
int64_t row_input_stride, int64_t patch_input_stride, int64_t output_depth, const bool *input, bool *output,
cudaStream_t stream);
|
ec7c16171edbe3ea019fb6b137292e74fe482921.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorMCMMonoGPU.cuh"
#include "IntegratorMCMMonoImplicitGPU.cuh"
#include "IntegratorMCMMonoImplicitNewGPU.cuh"
#include "ShapeSphere.h"
namespace mcm
{
namespace detail
{
//! MCM kernels for ShapeSphere
template hipError_t gpu_mcm_free_volume<ShapeSphere>(const mcm_free_volume_args_t &args,
const typename ShapeSphere::param_type *d_params);
template hipError_t gpu_mcm_update<ShapeSphere>(const mcm_args_t& args,
const typename ShapeSphere::param_type *d_params);
template hipError_t gpu_mcm_implicit_count_overlaps<ShapeSphere>(const mcm_implicit_args_t& args,
const typename ShapeSphere::param_type *d_params);
template hipError_t gpu_mcm_implicit_accept_reject<ShapeSphere>(const mcm_implicit_args_t& args,
const typename ShapeSphere::param_type *d_params);
template hipError_t gpu_mcm_insert_depletants_queue<ShapeSphere>(const mcm_implicit_args_new_t& args,
const typename ShapeSphere::param_type *d_params);
template hipError_t gpu_mcm_implicit_accept_reject_new<ShapeSphere>(const mcm_implicit_args_new_t& args,
const typename ShapeSphere::param_type *d_params);
}; // end namespace detail
} // end namespace mcm
| ec7c16171edbe3ea019fb6b137292e74fe482921.cu | // Copyright (c) 2009-2018 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorMCMMonoGPU.cuh"
#include "IntegratorMCMMonoImplicitGPU.cuh"
#include "IntegratorMCMMonoImplicitNewGPU.cuh"
#include "ShapeSphere.h"
namespace mcm
{
namespace detail
{
//! MCM kernels for ShapeSphere
template cudaError_t gpu_mcm_free_volume<ShapeSphere>(const mcm_free_volume_args_t &args,
const typename ShapeSphere::param_type *d_params);
template cudaError_t gpu_mcm_update<ShapeSphere>(const mcm_args_t& args,
const typename ShapeSphere::param_type *d_params);
template cudaError_t gpu_mcm_implicit_count_overlaps<ShapeSphere>(const mcm_implicit_args_t& args,
const typename ShapeSphere::param_type *d_params);
template cudaError_t gpu_mcm_implicit_accept_reject<ShapeSphere>(const mcm_implicit_args_t& args,
const typename ShapeSphere::param_type *d_params);
template cudaError_t gpu_mcm_insert_depletants_queue<ShapeSphere>(const mcm_implicit_args_new_t& args,
const typename ShapeSphere::param_type *d_params);
template cudaError_t gpu_mcm_implicit_accept_reject_new<ShapeSphere>(const mcm_implicit_args_new_t& args,
const typename ShapeSphere::param_type *d_params);
}; // end namespace detail
} // end namespace mcm
|
76826adc8d09102b380c42eda9d56a1bf9165a6e.hip | // !!! This is a file automatically generated by hipify!!!
//bulkBuildSequentialShiftsTest.cu
//Builds a quotient filter all at once, only looping as long as it takes to shift all elements to their final locations.
#include <stdio.h>
#include <assert.h>
#include <string>
#include "../../mt19937ar.h"
#include "quotientFilter.cuh"
#ifndef LOW_BIT_MASK
#define LOW_BIT_MASK(n) ((1U << n) -1U)
#endif
#ifndef NOT_FOUND
#define NOT_FOUND UINT_MAX
#endif
void CUDAErrorCheck()
{
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
errSync = hipGetLastError();
errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("Async kernel error: %s\n", hipGetErrorString(errAsync));
}
void generateRandomNumbers(unsigned int *numberArray, int n)
{
for (int i = 0; i < n; i++){
numberArray[i] = genrand_int32();
}
}
void generateNewRandomNumbers(unsigned int *newNumberArray, unsigned int *comparisonNumberArray, int n)
{
generateRandomNumbers(newNumberArray, n);
unsigned int numNew = 0;
while (numNew < n){
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
if (newNumberArray[i] == comparisonNumberArray[j]){
newNumberArray[i] = genrand_int32();
j = 0;
}
}
numNew++;
}
}
}
int main(int argc, char* argv[])
{
assert(argc == 5);
unsigned int q = atoi(argv[1]);
unsigned int r = atoi(argv[2]);
float alpha = atof(argv[3]);
bool NoDuplicates;
std::string dup(argv[4]);
if(dup == "Dup") NoDuplicates = false;
else if(dup == "NoDup") NoDuplicates = true;
else{
printf("ERROR: Last argument should be Dup or NoDup. \nPlease indicate whether you want to leave duplicate items or remove them.");
return 0;
}
//Initialize filter
printf("Building filter with 2^%u slots and %u-bit remainders....\n", q, r);
struct quotient_filter d_qfilter;
initFilterGPU(&d_qfilter, q, r);
hipMemset(d_qfilter.table, 0, calcNumSlotsGPU(q, r) * sizeof(unsigned char));
//Generate set of random numbers
printf("Generating random numbers to fill the filter to %3.1f percent full...\n", (alpha * 100));
int numValues = alpha * (1 << q);
unsigned int* h_randomValues = new unsigned int[numValues];
init_genrand(time(NULL)); //initialize random number generator
generateRandomNumbers(h_randomValues, numValues);
printf("%d random numbers generated.\n", numValues);
unsigned int* d_randomValues;
hipMalloc((void**) &d_randomValues, numValues * sizeof(unsigned int));
hipMemcpy(d_randomValues, h_randomValues, numValues * sizeof(unsigned int), hipMemcpyHostToDevice);
//Build filter
CUDAErrorCheck();
float filterBuildTime = bulkBuildSequentialShifts(d_qfilter, numValues, d_randomValues, NoDuplicates);
CUDAErrorCheck();
//Print filter
// printQuotientFilterGPU(&d_qfilter);
//Check filter accuracy
unsigned int* d_lookupReturnValues;
hipMalloc((void**) &d_lookupReturnValues, numValues * sizeof(unsigned int));
hipMemset(d_lookupReturnValues, 0, numValues * sizeof(unsigned int));
CUDAErrorCheck();
float lookupTime = launchUnsortedLookups(d_qfilter, numValues, d_randomValues, d_lookupReturnValues);
CUDAErrorCheck();
//Transfer back results of lookups and check that they succeeded
unsigned int* h_lookupReturnValues = new unsigned int[numValues];
for(int i = 0; i < numValues; i++){
h_lookupReturnValues[i] = 0;
}
hipMemcpy(h_lookupReturnValues, d_lookupReturnValues, numValues * sizeof(unsigned int), hipMemcpyDeviceToHost);
int insertFailures = 0;
for(int i = 0; i < numValues; i++){
if (h_lookupReturnValues[i] == NOT_FOUND){
printf("ERROR: %dth value not found.\n", i);
printf("Value: %u \n", h_randomValues[i]);
//unsigned int hashValue = FNVhashGPU(h_randomValues[i], (1 << (q + r)));
unsigned int hashValue = Normal_APHash(h_randomValues[i], (1 << (q + r)));
unsigned int fq = (hashValue >> r) & LOW_BIT_MASK(q);
unsigned int fr = hashValue & LOW_BIT_MASK(r);
printf("Quotient = %u \t Remainder = %u\n", fq, fr);
insertFailures++;
}
}
printf("%d inserted values were not found.\n", insertFailures);
//Calculate false positive rate
//Generate array of values not in filter
unsigned int* h_failedLookupValues = new unsigned int[numValues];
generateNewRandomNumbers(h_failedLookupValues, h_randomValues, numValues);
unsigned int* d_failedLookupValues;
hipMalloc((void**) &d_failedLookupValues, numValues * sizeof(unsigned int));
hipMemcpy(d_failedLookupValues, h_failedLookupValues, numValues * sizeof(unsigned int), hipMemcpyHostToDevice);
//Allocate output array
unsigned int* d_failureReturnValues;
hipMalloc((void**) &d_failureReturnValues, numValues * sizeof(unsigned int));
hipMemset(d_failureReturnValues, 0, numValues * sizeof(unsigned int));
//Perform lookups to find false positive rate
CUDAErrorCheck();
lookupTime = launchUnsortedLookups(d_qfilter, numValues, d_failedLookupValues, d_failureReturnValues);
CUDAErrorCheck();
//Transfer data back and find false positive rate
unsigned int* h_failureReturnValues = new unsigned int[numValues];
for(int i = 0; i < numValues; i++){
h_failureReturnValues[i] = 0;
}
hipMemcpy(h_failureReturnValues, d_failureReturnValues, numValues * sizeof(unsigned int), hipMemcpyDeviceToHost);
int falsePositives = 0;
for(int i = 0; i < numValues; i++){
if(h_failureReturnValues[i] != NOT_FOUND){
falsePositives++;
}
}
printf("False positive rate: %f \n", ((float)falsePositives)/numValues);
//Free memory
delete[] h_randomValues;
hipFree(d_randomValues);
hipFree(d_qfilter.table);
delete[] h_lookupReturnValues;
hipFree(d_lookupReturnValues);
delete[] h_failureReturnValues;
hipFree(d_failureReturnValues);
hipDeviceReset();
return 0;
}
| 76826adc8d09102b380c42eda9d56a1bf9165a6e.cu | //bulkBuildSequentialShiftsTest.cu
//Builds a quotient filter all at once, only looping as long as it takes to shift all elements to their final locations.
#include <stdio.h>
#include <assert.h>
#include <string>
#include "../../mt19937ar.h"
#include "quotientFilter.cuh"
#ifndef LOW_BIT_MASK
#define LOW_BIT_MASK(n) ((1U << n) -1U)
#endif
#ifndef NOT_FOUND
#define NOT_FOUND UINT_MAX
#endif
void CUDAErrorCheck()
{
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
errSync = cudaGetLastError();
errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
}
void generateRandomNumbers(unsigned int *numberArray, int n)
{
for (int i = 0; i < n; i++){
numberArray[i] = genrand_int32();
}
}
void generateNewRandomNumbers(unsigned int *newNumberArray, unsigned int *comparisonNumberArray, int n)
{
generateRandomNumbers(newNumberArray, n);
unsigned int numNew = 0;
while (numNew < n){
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
if (newNumberArray[i] == comparisonNumberArray[j]){
newNumberArray[i] = genrand_int32();
j = 0;
}
}
numNew++;
}
}
}
int main(int argc, char* argv[])
{
assert(argc == 5);
unsigned int q = atoi(argv[1]);
unsigned int r = atoi(argv[2]);
float alpha = atof(argv[3]);
bool NoDuplicates;
std::string dup(argv[4]);
if(dup == "Dup") NoDuplicates = false;
else if(dup == "NoDup") NoDuplicates = true;
else{
printf("ERROR: Last argument should be Dup or NoDup. \nPlease indicate whether you want to leave duplicate items or remove them.");
return 0;
}
//Initialize filter
printf("Building filter with 2^%u slots and %u-bit remainders....\n", q, r);
struct quotient_filter d_qfilter;
initFilterGPU(&d_qfilter, q, r);
cudaMemset(d_qfilter.table, 0, calcNumSlotsGPU(q, r) * sizeof(unsigned char));
//Generate set of random numbers
printf("Generating random numbers to fill the filter to %3.1f percent full...\n", (alpha * 100));
int numValues = alpha * (1 << q);
unsigned int* h_randomValues = new unsigned int[numValues];
init_genrand(time(NULL)); //initialize random number generator
generateRandomNumbers(h_randomValues, numValues);
printf("%d random numbers generated.\n", numValues);
unsigned int* d_randomValues;
cudaMalloc((void**) &d_randomValues, numValues * sizeof(unsigned int));
cudaMemcpy(d_randomValues, h_randomValues, numValues * sizeof(unsigned int), cudaMemcpyHostToDevice);
//Build filter
CUDAErrorCheck();
float filterBuildTime = bulkBuildSequentialShifts(d_qfilter, numValues, d_randomValues, NoDuplicates);
CUDAErrorCheck();
//Print filter
// printQuotientFilterGPU(&d_qfilter);
//Check filter accuracy
unsigned int* d_lookupReturnValues;
cudaMalloc((void**) &d_lookupReturnValues, numValues * sizeof(unsigned int));
cudaMemset(d_lookupReturnValues, 0, numValues * sizeof(unsigned int));
CUDAErrorCheck();
float lookupTime = launchUnsortedLookups(d_qfilter, numValues, d_randomValues, d_lookupReturnValues);
CUDAErrorCheck();
//Transfer back results of lookups and check that they succeeded
unsigned int* h_lookupReturnValues = new unsigned int[numValues];
for(int i = 0; i < numValues; i++){
h_lookupReturnValues[i] = 0;
}
cudaMemcpy(h_lookupReturnValues, d_lookupReturnValues, numValues * sizeof(unsigned int), cudaMemcpyDeviceToHost);
int insertFailures = 0;
for(int i = 0; i < numValues; i++){
if (h_lookupReturnValues[i] == NOT_FOUND){
printf("ERROR: %dth value not found.\n", i);
printf("Value: %u \n", h_randomValues[i]);
//unsigned int hashValue = FNVhashGPU(h_randomValues[i], (1 << (q + r)));
unsigned int hashValue = Normal_APHash(h_randomValues[i], (1 << (q + r)));
unsigned int fq = (hashValue >> r) & LOW_BIT_MASK(q);
unsigned int fr = hashValue & LOW_BIT_MASK(r);
printf("Quotient = %u \t Remainder = %u\n", fq, fr);
insertFailures++;
}
}
printf("%d inserted values were not found.\n", insertFailures);
//Calculate false positive rate
//Generate array of values not in filter
unsigned int* h_failedLookupValues = new unsigned int[numValues];
generateNewRandomNumbers(h_failedLookupValues, h_randomValues, numValues);
unsigned int* d_failedLookupValues;
cudaMalloc((void**) &d_failedLookupValues, numValues * sizeof(unsigned int));
cudaMemcpy(d_failedLookupValues, h_failedLookupValues, numValues * sizeof(unsigned int), cudaMemcpyHostToDevice);
//Allocate output array
unsigned int* d_failureReturnValues;
cudaMalloc((void**) &d_failureReturnValues, numValues * sizeof(unsigned int));
cudaMemset(d_failureReturnValues, 0, numValues * sizeof(unsigned int));
//Perform lookups to find false positive rate
CUDAErrorCheck();
lookupTime = launchUnsortedLookups(d_qfilter, numValues, d_failedLookupValues, d_failureReturnValues);
CUDAErrorCheck();
//Transfer data back and find false positive rate
unsigned int* h_failureReturnValues = new unsigned int[numValues];
for(int i = 0; i < numValues; i++){
h_failureReturnValues[i] = 0;
}
cudaMemcpy(h_failureReturnValues, d_failureReturnValues, numValues * sizeof(unsigned int), cudaMemcpyDeviceToHost);
int falsePositives = 0;
for(int i = 0; i < numValues; i++){
if(h_failureReturnValues[i] != NOT_FOUND){
falsePositives++;
}
}
printf("False positive rate: %f \n", ((float)falsePositives)/numValues);
//Free memory
delete[] h_randomValues;
cudaFree(d_randomValues);
cudaFree(d_qfilter.table);
delete[] h_lookupReturnValues;
cudaFree(d_lookupReturnValues);
delete[] h_failureReturnValues;
cudaFree(d_failureReturnValues);
cudaDeviceReset();
return 0;
}
|
18550e8c65775d9f9c000651de4a759ffb441c8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// Constant values on device
// /!\ undefined in host code, just in kernels /!\ __device__
#define MAX_WEIGHT_VALUES 50
#define MIN_DET FLT_EPSILON
__constant__ __device__ int LK_iteration;
__constant__ __device__ int LK_patch;
__constant__ __device__ int LK_points;
__constant__ __device__ int LK_height;
__constant__ __device__ int LK_width;
__constant__ __device__ int LK_pyr_w;
__constant__ __device__ int LK_pyr_h;
__constant__ __device__ int LK_pyr_level;
__constant__ __device__ int LK_width_offset;
__constant__ __device__ char LK_init_guess;
__constant__ __device__ float LK_scaling;
__constant__ __device__ float LK_threshold;
__constant__ __device__ float LK_Weight[MAX_WEIGHT_VALUES];
__constant__ __device__ int LK_win_size;
// Texture buffer is used for each image for on-the-fly interpolation
texture <float, 2, hipReadModeElementType> texRef_pyramid_prev;
texture <float, 2, hipReadModeElementType> texRef_pyramid_cur;
// Image pyramids -> texture buffers
texture <float, 2, hipReadModeElementType> gpu_textr_pict_0; // pictures > texture space
texture <float, 2, hipReadModeElementType> gpu_textr_pict_1;
texture <float, 2, hipReadModeElementType> gpu_textr_deriv_x; // gradients > texture space
texture <float, 2, hipReadModeElementType> gpu_textr_deriv_y;
// Convert RGB Picture to grey/float
// Convert Grey uchar picture to float
// Downsample picture to build pyramid lower level (naive implementation..)
// Kernel to compute the tracking
// Kernel to compute the tracking
__global__ void convertRGBToGrey(unsigned char *d_in, float *d_out, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < N)
{
d_out[idx] = d_in[idx*3]*0.1144f
+ d_in[idx*3+1]*0.5867f
+ d_in[idx*3+2]*0.2989f;
}
} | 18550e8c65775d9f9c000651de4a759ffb441c8d.cu | #include "includes.h"
// Constant values on device
// /!\ undefined in host code, just in kernels /!\ __device__
#define MAX_WEIGHT_VALUES 50
#define MIN_DET FLT_EPSILON
__constant__ __device__ int LK_iteration;
__constant__ __device__ int LK_patch;
__constant__ __device__ int LK_points;
__constant__ __device__ int LK_height;
__constant__ __device__ int LK_width;
__constant__ __device__ int LK_pyr_w;
__constant__ __device__ int LK_pyr_h;
__constant__ __device__ int LK_pyr_level;
__constant__ __device__ int LK_width_offset;
__constant__ __device__ char LK_init_guess;
__constant__ __device__ float LK_scaling;
__constant__ __device__ float LK_threshold;
__constant__ __device__ float LK_Weight[MAX_WEIGHT_VALUES];
__constant__ __device__ int LK_win_size;
// Texture buffer is used for each image for on-the-fly interpolation
texture <float, 2, cudaReadModeElementType> texRef_pyramid_prev;
texture <float, 2, cudaReadModeElementType> texRef_pyramid_cur;
// Image pyramids -> texture buffers
texture <float, 2, cudaReadModeElementType> gpu_textr_pict_0; // pictures > texture space
texture <float, 2, cudaReadModeElementType> gpu_textr_pict_1;
texture <float, 2, cudaReadModeElementType> gpu_textr_deriv_x; // gradients > texture space
texture <float, 2, cudaReadModeElementType> gpu_textr_deriv_y;
// Convert RGB Picture to grey/float
// Convert Grey uchar picture to float
// Downsample picture to build pyramid lower level (naive implementation..)
// Kernel to compute the tracking
// Kernel to compute the tracking
__global__ void convertRGBToGrey(unsigned char *d_in, float *d_out, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < N)
{
d_out[idx] = d_in[idx*3]*0.1144f
+ d_in[idx*3+1]*0.5867f
+ d_in[idx*3+2]*0.2989f;
}
} |
3222f52559c152bf1cc744898212b2fcbec7e262.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// this is the deleayed learning, which gives an effect similar to batching
#define BATCHING 512
// this pushes a prepared sense data into a subspace on device
// often this will be prepared on the host
__global__ void pushSenseData() {
}
// this pulls a calculated sense data from a subspace on device
// often this will be displayed on the host
__global__ void pullSenseData() {
}
// this create blank sense data for a subspace on device
// not sure if this makes sense
__global__ void blankSenseData() {
}
// this pushes prepared states into short time (state) memory on device
__global__ void pushStates() {
}
// this pulls calculated states from short time (state) memory on device
__global__ void pullStates() {
}
// this create blank states for short time (state) memory on device
__global__ void blankStates() {
}
// this pushes a weight tensor for the complete manifold on device
// should perhaps be segmented
__global__ void pushWeights() {
}
// this pulls a weight tensor for the complete manifold on device
// should perhaps be segmented
__global__ void pullWeights() {
}
// this create a blank weight tensor for the complete manifold on device
__global__ void blankWeights() {
}
// Evaluate apical dendrites and set firing of synapses
// current activation of synapses are kept for later
__global__ void activateApicalDendrite() {
// activateSynapses();
// tentativeLearnSynapses();
}
// Evaluate basal dendrites and set firing of synapses
// current activation of synapses are kept for later
__global__ void activateBasalDendrite(int layer) {
// activateSynapses();
// tentativeLearnSynapses();
}
// Evaluate dendrites and set tentative learning states of synapses
__global__ void spikeNode(int layer) {
// previousActivation = currentActivation;
// activateAxon();
}
int main() {
// declare all variables
unsigned int = 0;
bool die = false;
// allocate host memory
// malloc();
// allocate device memory
// hipMalloc();
// load weights from store on host
//
// load state from store on host
while (!die) {
tick++;
// push sense data from host to device
// pushSenseData();
//-- neo --
// updates after sense data
// evaluate apical state on device
// activateApicalDendrite<<<numblocks, numthreads>>>();
// evaluate each of the layers
// activateBasalDendrite<<<numblocks, numthreads>>>(1);
// spikeNode<<<numblocks, numthreads>>>(1);
// activateBasalDendrite<<<numblocks, numthreads>>>(2);
// spikeNode<<<numblocks, numthreads>>>(2);
// activateBasalDendrite<<<numblocks, numthreads>>>(3);
// spikeNode<<<numblocks, numthreads>>>(3);
// activateBasalDendrite<<<numblocks, numthreads>>>(4);
// spikeNode<<<numblocks, numthreads>>>(4);
// activateBasalDendrite<<<numblocks, numthreads>>>(5);
// spikeNode<<<numblocks, numthreads>>>(5);
//-- basket --
// should this go first?
// activateBasket<<<numblocks, numthreads>>>();
//-- candle --
// updates after neo
// activateCandle<<<numblocks, numthreads>>>();
//-- learn --
// updates after candle
if (tick % BATCHING) {
tick = 0;
//learnSynapses<<numblocks, numthreads>>();
}
//-- state --
// updates after candle
//-- assoc --
// updates after candle
// pull sense data from device to host
// pullSenseData();
}
// save state to store on host
// pullState();
// free host memory
// hipFree();
// free device memory
// free();
} | 3222f52559c152bf1cc744898212b2fcbec7e262.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// this is the deleayed learning, which gives an effect similar to batching
#define BATCHING 512
// this pushes a prepared sense data into a subspace on device
// often this will be prepared on the host
__global__ void pushSenseData() {
}
// this pulls a calculated sense data from a subspace on device
// often this will be displayed on the host
__global__ void pullSenseData() {
}
// this create blank sense data for a subspace on device
// not sure if this makes sense
__global__ void blankSenseData() {
}
// this pushes prepared states into short time (state) memory on device
__global__ void pushStates() {
}
// this pulls calculated states from short time (state) memory on device
__global__ void pullStates() {
}
// this create blank states for short time (state) memory on device
__global__ void blankStates() {
}
// this pushes a weight tensor for the complete manifold on device
// should perhaps be segmented
__global__ void pushWeights() {
}
// this pulls a weight tensor for the complete manifold on device
// should perhaps be segmented
__global__ void pullWeights() {
}
// this create a blank weight tensor for the complete manifold on device
__global__ void blankWeights() {
}
// Evaluate apical dendrites and set firing of synapses
// current activation of synapses are kept for later
__global__ void activateApicalDendrite() {
// activateSynapses();
// tentativeLearnSynapses();
}
// Evaluate basal dendrites and set firing of synapses
// current activation of synapses are kept for later
__global__ void activateBasalDendrite(int layer) {
// activateSynapses();
// tentativeLearnSynapses();
}
// Evaluate dendrites and set tentative learning states of synapses
__global__ void spikeNode(int layer) {
// previousActivation = currentActivation;
// activateAxon();
}
int main() {
// declare all variables
unsigned int = 0;
bool die = false;
// allocate host memory
// malloc();
// allocate device memory
// cudaMalloc();
// load weights from store on host
//
// load state from store on host
while (!die) {
tick++;
// push sense data from host to device
// pushSenseData();
//-- neo --
// updates after sense data
// evaluate apical state on device
// activateApicalDendrite<<<numblocks, numthreads>>>();
// evaluate each of the layers
// activateBasalDendrite<<<numblocks, numthreads>>>(1);
// spikeNode<<<numblocks, numthreads>>>(1);
// activateBasalDendrite<<<numblocks, numthreads>>>(2);
// spikeNode<<<numblocks, numthreads>>>(2);
// activateBasalDendrite<<<numblocks, numthreads>>>(3);
// spikeNode<<<numblocks, numthreads>>>(3);
// activateBasalDendrite<<<numblocks, numthreads>>>(4);
// spikeNode<<<numblocks, numthreads>>>(4);
// activateBasalDendrite<<<numblocks, numthreads>>>(5);
// spikeNode<<<numblocks, numthreads>>>(5);
//-- basket --
// should this go first?
// activateBasket<<<numblocks, numthreads>>>();
//-- candle --
// updates after neo
// activateCandle<<<numblocks, numthreads>>>();
//-- learn --
// updates after candle
if (tick % BATCHING) {
tick = 0;
//learnSynapses<<numblocks, numthreads>>();
}
//-- state --
// updates after candle
//-- assoc --
// updates after candle
// pull sense data from device to host
// pullSenseData();
}
// save state to store on host
// pullState();
// free host memory
// cudaFree();
// free device memory
// free();
} |
73dc34713926f2589ac9b00a00675a447a411c70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_math.h>
#include <helper_string.h>
#include <opencv2/opencv.hpp>
#include <opencv2/gpu/gpu.hpp>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/replace.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <hip/hip_vector_types.h>
#include "node.h"
#define MAX_DEPTH 16
#define INSERTION_SORT 32
#define gpuErrchk(ans) { \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) {
exit(code);
}
}
}
__device__ void selection_sort(float3* data, int left, int right, int (*compar)(const float3*, const float3*)) {
for (int i = left ; i <= right ; ++i) {
float3 min_val = data[i];
int min_idx = i;
for (int j = i+1 ; j <= right ; ++j) {
float3 val_j = data[j];
if (compar(&val_j, &min_val) == -1) {
min_idx = j;
min_val = val_j;
}
}
// Swap the values.
if (i != min_idx) {
data[min_idx] = data[i];
data[i] = min_val;
}
}
}
__global__ void cdp_simple_quicksort(float3* data, int left, int right, int (*compar)(const float3*, const float3*), int depth=0) {
if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT) {
selection_sort(data, left, right, compar);
return;
}
float3* lptr = data + left;
float3* rptr = data + right;
float3 pivot = data[(left + right) / 2];
while (lptr <= rptr) {
float3 lval = *lptr;
float3 rval = *rptr;
while (compar(&lval, &pivot) == -1) {
lptr++;
lval = *lptr;
}
while (compar(&lval, &pivot) == 1) {
rptr--;
rval = *rptr;
}
if (lptr <= rptr) {
*lptr++ = rval;
*rptr-- = lval;
}
}
int nright = rptr - data;
int nleft = lptr - data;
if (left < (rptr - data)) {
hipStream_t s;
hipStreamCreateWithFlags(&s, hipStreamNonBlocking);
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s, data, left, nright, compar, depth + 1);
hipStreamDestroy(s);
}
if ((lptr - data) < right) {
hipStream_t s1;
hipStreamCreateWithFlags(&s1, hipStreamNonBlocking);
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, s1, data, nleft, right, compar, depth + 1);
hipStreamDestroy(s1);
}
}
__global__ void bilateral_kernel(const cv::gpu::PtrStepSz<float3> src,
cv::gpu::PtrStep<float3> dst) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//TODO: bilateral filter
dst.ptr(y)[x] = src.ptr(y)[x];
}
int divUp(int total, int grain) {
return (total + grain - 1) / grain;
}
void bilateralFilter(const cv::Mat& src_host, cv::Mat& dst_host, Node* hostKdTree) {
dim3 block(32, 8);
dim3 grid(divUp(src_host.cols, block.x), divUp(src_host.rows, block.y));
hipFuncSetCacheConfig(bilateral_kernel, hipFuncCachePreferL1);
cv::gpu::GpuMat src_device(src_host), dst_device(src_host.rows, src_host.cols, src_host.type());
//hipMalloc((void **) deviceKdTree, sizeof(hostKdTree));
hipLaunchKernelGGL(( bilateral_kernel), dim3(grid), dim3(block), 0, 0,
src_device,
dst_device
);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
dst_device.download(dst_host);
}
__device__
int compareX(const float3* a, const float3* b) {
float arg1 = a->x;
float arg2 = b->x;
if (arg1 < arg2) return -1;
if (arg1 > arg2) return 1;
return 0;
}
__device__
int compareY(const float3* a, const float3* b) {
float arg1 = a->x;
float arg2 = b->x;
if (arg1 < arg2) return -1;
if (arg1 > arg2) return 1;
return 0;
}
__device__
int compareZ(const float3* a, const float3* b) {
float arg1 = a->x;
float arg2 = b->x;
if (arg1 < arg2) return -1;
if (arg1 > arg2) return 1;
return 0;
}
__global__
void createKdTree(Node* parent, float3* data, int numPoints, int depth=0) {
printf("Depth: %i, numPoints: %i", depth, numPoints);
if (numPoints == 0) {
parent = NULL;
return;
} else {
parent = new Node;
}
int axis = depth % 3;
int (*compar)(const float3*, const float3*);
switch (axis) {
case 0:
compar = &compareX;
break;
case 1:
compar = &compareY;
break;
case 2:
compar = &compareZ;
break;
}
hipLaunchKernelGGL(( cdp_simple_quicksort), dim3(1), dim3(1), 0, 0, data, 0, numPoints - 1, compar);
int median = numPoints / 2;
parent->location = &data[median];
hipStream_t s;
hipStreamCreateWithFlags(&s, hipStreamNonBlocking);
printf("Launching left createKdTree kernel from createKdTreeKernel");
hipLaunchKernelGGL(( createKdTree), dim3(1), dim3(1), 0, s, parent->left, data, median, depth + 1);
hipStreamDestroy(s);
hipStream_t s1;
hipStreamCreateWithFlags(&s1, hipStreamNonBlocking);
printf("Launching right createKdTree kernel from createKdTreeKernel");
hipLaunchKernelGGL(( createKdTree), dim3(1), dim3(1), 0, s1, parent->right, data + (median + 1), numPoints - median - 1, depth + 1);
hipStreamDestroy(s1);
}
__global__
void bilateral_kernel2(float3* pointList, int n) {
Node* tree = 0;
printf("Launching createKdTree kernel from bilateral_kernel2");
hipLaunchKernelGGL(( createKdTree), dim3(1), dim3(1), 0, 0, tree, pointList, n);
}
void bilateralFilter2(const float3* pointList, int n) {
checkCudaErrors(hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, MAX_DEPTH));
float3* device_pointList;
hipMalloc(&device_pointList, n * sizeof(float3));
hipMemcpy(device_pointList, pointList, n * sizeof(float3), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( bilateral_kernel2), dim3(1), dim3(1), 0, 0, device_pointList, n);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
}
| 73dc34713926f2589ac9b00a00675a447a411c70.cu | #include <cstdio>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_math.h>
#include <helper_string.h>
#include <opencv2/opencv.hpp>
#include <opencv2/gpu/gpu.hpp>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/replace.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <vector_types.h>
#include "node.h"
#define MAX_DEPTH 16
#define INSERTION_SORT 32
#define gpuErrchk(ans) { \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) {
exit(code);
}
}
}
__device__ void selection_sort(float3* data, int left, int right, int (*compar)(const float3*, const float3*)) {
for (int i = left ; i <= right ; ++i) {
float3 min_val = data[i];
int min_idx = i;
for (int j = i+1 ; j <= right ; ++j) {
float3 val_j = data[j];
if (compar(&val_j, &min_val) == -1) {
min_idx = j;
min_val = val_j;
}
}
// Swap the values.
if (i != min_idx) {
data[min_idx] = data[i];
data[i] = min_val;
}
}
}
__global__ void cdp_simple_quicksort(float3* data, int left, int right, int (*compar)(const float3*, const float3*), int depth=0) {
if (depth >= MAX_DEPTH || right-left <= INSERTION_SORT) {
selection_sort(data, left, right, compar);
return;
}
float3* lptr = data + left;
float3* rptr = data + right;
float3 pivot = data[(left + right) / 2];
while (lptr <= rptr) {
float3 lval = *lptr;
float3 rval = *rptr;
while (compar(&lval, &pivot) == -1) {
lptr++;
lval = *lptr;
}
while (compar(&lval, &pivot) == 1) {
rptr--;
rval = *rptr;
}
if (lptr <= rptr) {
*lptr++ = rval;
*rptr-- = lval;
}
}
int nright = rptr - data;
int nleft = lptr - data;
if (left < (rptr - data)) {
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
cdp_simple_quicksort<<<1, 1, 0, s>>>(data, left, nright, compar, depth + 1);
cudaStreamDestroy(s);
}
if ((lptr - data) < right) {
cudaStream_t s1;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
cdp_simple_quicksort<<<1, 1, 0, s1>>>(data, nleft, right, compar, depth + 1);
cudaStreamDestroy(s1);
}
}
__global__ void bilateral_kernel(const cv::gpu::PtrStepSz<float3> src,
cv::gpu::PtrStep<float3> dst) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//TODO: bilateral filter
dst.ptr(y)[x] = src.ptr(y)[x];
}
int divUp(int total, int grain) {
return (total + grain - 1) / grain;
}
void bilateralFilter(const cv::Mat& src_host, cv::Mat& dst_host, Node* hostKdTree) {
dim3 block(32, 8);
dim3 grid(divUp(src_host.cols, block.x), divUp(src_host.rows, block.y));
cudaFuncSetCacheConfig(bilateral_kernel, cudaFuncCachePreferL1);
cv::gpu::GpuMat src_device(src_host), dst_device(src_host.rows, src_host.cols, src_host.type());
//cudaMalloc((void **) deviceKdTree, sizeof(hostKdTree));
bilateral_kernel<<<grid, block>>>(
src_device,
dst_device
);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
dst_device.download(dst_host);
}
__device__
int compareX(const float3* a, const float3* b) {
float arg1 = a->x;
float arg2 = b->x;
if (arg1 < arg2) return -1;
if (arg1 > arg2) return 1;
return 0;
}
__device__
int compareY(const float3* a, const float3* b) {
float arg1 = a->x;
float arg2 = b->x;
if (arg1 < arg2) return -1;
if (arg1 > arg2) return 1;
return 0;
}
__device__
int compareZ(const float3* a, const float3* b) {
float arg1 = a->x;
float arg2 = b->x;
if (arg1 < arg2) return -1;
if (arg1 > arg2) return 1;
return 0;
}
__global__
void createKdTree(Node* parent, float3* data, int numPoints, int depth=0) {
printf("Depth: %i, numPoints: %i", depth, numPoints);
if (numPoints == 0) {
parent = NULL;
return;
} else {
parent = new Node;
}
int axis = depth % 3;
int (*compar)(const float3*, const float3*);
switch (axis) {
case 0:
compar = &compareX;
break;
case 1:
compar = &compareY;
break;
case 2:
compar = &compareZ;
break;
}
cdp_simple_quicksort<<<1, 1>>>(data, 0, numPoints - 1, compar);
int median = numPoints / 2;
parent->location = &data[median];
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
printf("Launching left createKdTree kernel from createKdTreeKernel");
createKdTree<<<1, 1, 0, s>>>(parent->left, data, median, depth + 1);
cudaStreamDestroy(s);
cudaStream_t s1;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
printf("Launching right createKdTree kernel from createKdTreeKernel");
createKdTree<<<1, 1, 0, s1>>>(parent->right, data + (median + 1), numPoints - median - 1, depth + 1);
cudaStreamDestroy(s1);
}
__global__
void bilateral_kernel2(float3* pointList, int n) {
Node* tree = 0;
printf("Launching createKdTree kernel from bilateral_kernel2");
createKdTree<<<1, 1>>>(tree, pointList, n);
}
void bilateralFilter2(const float3* pointList, int n) {
checkCudaErrors(cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, MAX_DEPTH));
float3* device_pointList;
cudaMalloc(&device_pointList, n * sizeof(float3));
cudaMemcpy(device_pointList, pointList, n * sizeof(float3), cudaMemcpyHostToDevice);
bilateral_kernel2<<<1, 1>>>(device_pointList, n);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
|
2a90d2356c204462c806d266251b533a11272812.hip | // !!! This is a file automatically generated by hipify!!!
#include <device/gen_random.h>
#include <device/cuda_utils.h>
#include <device/handles.h>
#include <core/errors.h>
#include <core/datadefs.h>
#include <time.h>
void sparseRandomMatrix( int rows, int cols, real density,
real *hostPtr, real *devPtr)
{
int numElements = int (rows * cols * density );
memset( hostPtr, 0, rows * cols * sizeof(real) );
getRandomVector( numElements, hostPtr, devPtr, RAND_UNIFORM );
randomShuffle<real>( hostPtr, rows * cols );
copy_host_device( hostPtr, devPtr, sizeof(real) * rows * cols,
hipMemcpyHostToDevice, ERROR_MEMCPY_DEVICE_HOST );
}
void getRandomVector (int n, real *hostPtr, real *devPtr, RAND_GENERATOR r) {
//hiprandGenerator_t gen ;
int m = n + n % 2;
/* Create pseudo - random number generator */
//curandCheckError ( hiprandCreateGenerator (&gen , HIPRAND_RNG_PSEUDO_DEFAULT ) );
/* Set seed */
//curandCheckError ( hiprandSetPseudoRandomGeneratorSeed ( gen , 1234ULL )) ;
//curandCheckError ( hiprandSetPseudoRandomGeneratorSeed ( curandGeneratorHandle , time(NULL) )) ;
/* Generate n floats on device */
//standard normal distribution.
if (r == RAND_NORMAL)
curandCheckError ( hiprandGenerateNormalDouble ( curandGeneratorHandle, devPtr , m, 0, 1.)) ;
else
curandCheckError ( hiprandGenerateUniformDouble ( curandGeneratorHandle, devPtr , m)) ;
/* Copy device memory to host */
if (hostPtr != NULL) {
copy_host_device( hostPtr, devPtr, sizeof(real) * n, hipMemcpyDeviceToHost,
ERROR_MEMCPY_DEVICE_HOST );
}
/* Cleanup */
//curandCheckError ( hiprandDestroyGenerator ( gen ) );
}
/*
Random Shuffle Here.
https://stackoverflow.com/questions/15961119/how-to-create-a-random-permutation-of-an-array
*/
template <class T>
void randomShuffle( T *idx, int n)
{
int j;
T temp;
for (int i = n - 1; i >= 0; i --){
j = rand () % (i+1);
temp = idx[i];
idx[i] = idx[j];
idx[j] = temp;
}
}
/*
Floyd's algorithm Here.
https://stackoverflow.com/questions/1608181/unique-random-numbers-in-an-integer-array-in-the-c-programming-language
*/
void genRandomVector( int *idx, int m, int n ) {
int in, im;
int rn, rm;
im = 0;
for (in = 0; in < n && im < m; ++in ){
rn = n - in;
rm = m - im;
if (rand () % rn < rm ){
//idx[ im ++] = in + 1;
idx[ im ++] = in ;
}
}
if ( im != m ){
fprintf( stderr, "Failed to generate required number of random numbers ... (%d, %d) ", im, m);
exit (-1);
}
randomShuffle<int>( idx, m );
}
| 2a90d2356c204462c806d266251b533a11272812.cu | #include <device/gen_random.h>
#include <device/cuda_utils.h>
#include <device/handles.h>
#include <core/errors.h>
#include <core/datadefs.h>
#include <time.h>
void sparseRandomMatrix( int rows, int cols, real density,
real *hostPtr, real *devPtr)
{
int numElements = int (rows * cols * density );
memset( hostPtr, 0, rows * cols * sizeof(real) );
getRandomVector( numElements, hostPtr, devPtr, RAND_UNIFORM );
randomShuffle<real>( hostPtr, rows * cols );
copy_host_device( hostPtr, devPtr, sizeof(real) * rows * cols,
cudaMemcpyHostToDevice, ERROR_MEMCPY_DEVICE_HOST );
}
void getRandomVector (int n, real *hostPtr, real *devPtr, RAND_GENERATOR r) {
//curandGenerator_t gen ;
int m = n + n % 2;
/* Create pseudo - random number generator */
//curandCheckError ( curandCreateGenerator (&gen , CURAND_RNG_PSEUDO_DEFAULT ) );
/* Set seed */
//curandCheckError ( curandSetPseudoRandomGeneratorSeed ( gen , 1234ULL )) ;
//curandCheckError ( curandSetPseudoRandomGeneratorSeed ( curandGeneratorHandle , time(NULL) )) ;
/* Generate n floats on device */
//standard normal distribution.
if (r == RAND_NORMAL)
curandCheckError ( curandGenerateNormalDouble ( curandGeneratorHandle, devPtr , m, 0, 1.)) ;
else
curandCheckError ( curandGenerateUniformDouble ( curandGeneratorHandle, devPtr , m)) ;
/* Copy device memory to host */
if (hostPtr != NULL) {
copy_host_device( hostPtr, devPtr, sizeof(real) * n, cudaMemcpyDeviceToHost,
ERROR_MEMCPY_DEVICE_HOST );
}
/* Cleanup */
//curandCheckError ( curandDestroyGenerator ( gen ) );
}
/*
Random Shuffle Here.
https://stackoverflow.com/questions/15961119/how-to-create-a-random-permutation-of-an-array
*/
template <class T>
void randomShuffle( T *idx, int n)
{
int j;
T temp;
for (int i = n - 1; i >= 0; i --){
j = rand () % (i+1);
temp = idx[i];
idx[i] = idx[j];
idx[j] = temp;
}
}
/*
Floyd's algorithm Here.
https://stackoverflow.com/questions/1608181/unique-random-numbers-in-an-integer-array-in-the-c-programming-language
*/
void genRandomVector( int *idx, int m, int n ) {
int in, im;
int rn, rm;
im = 0;
for (in = 0; in < n && im < m; ++in ){
rn = n - in;
rm = m - im;
if (rand () % rn < rm ){
//idx[ im ++] = in + 1;
idx[ im ++] = in ;
}
}
if ( im != m ){
fprintf( stderr, "Failed to generate required number of random numbers ... (%d, %d) ", im, m);
exit (-1);
}
randomShuffle<int>( idx, m );
}
|
8c1a42bd6694af50ac143fb5b2a5dd835883bec4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2021 Innovation Academy for Microsatellites of CAS
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Wang Junyong (wangjunyong@microsate.com)
*/
#include "adi-util.h"
#include "adi-satellite.h"
#include "adi-satellite-kernel.h"
#include "adi-satellite-list.h"
extern const double K_RE;
namespace adi {
std::string
Satellite::Element::ToString () const
{
std::stringstream ss;
ss.setf (std::ios::fixed);
ss.precision (4);
ss.width (10);
ss << " semi-major axis: " << std::setw (10) << sma << std::endl
<< " eccentricity: " << std::setw (10) << ecc << std::endl
<< " inclination: " << std::setw (10) << RadiansToDegrees (inc) << std::endl
<< "right ascension of ascending node: " << std::setw (10) << RadiansToDegrees (raan)<< std::endl
<< " argument of perigee: " << std::setw (10) << RadiansToDegrees (aop) << std::endl
<< " mean anomaly: " << std::setw (10) << RadiansToDegrees (ma) << std::endl;
return ss.str ();
}
bool
Satellite::Ele::operator== (Satellite::Ele& ele) const
{
return sma == ele.sma && ecc == ele.ecc && inc == ele.inc
&& raan == ele.raan && aop == ele.aop && ma == ele.ma;
}
std::string
Satellite::Parameter::ToString () const
{
std::stringstream ss;
ss.setf (std::ios::fixed);
ss.precision (4);
ss.width (10);
ss << elem.ToString ();
ss << " angular rate: " << std::setw (10) << ar << std::endl
<< " semi-latus rectum: " << std::setw (10) << slr << std::endl
<< "rate of " << std::endl
<< "right ascension of ascending node: " << std::setw (10) << RadiansToDegrees (draan) << std::endl
<< " argument of perigee: " << std::setw (10) << RadiansToDegrees (daop) << std::endl
<< " mean anomaly: " << std::setw (10) << RadiansToDegrees (dma) << std::endl;
return ss.str ();
}
std::string
Satellite::State::ToString () const
{
std::stringstream ss;
ss.setf (std::ios::fixed);
ss.precision (4);
ss.width (10);
ss << " current radius: " << std::setw (10) << RadiansToDegrees (radius)<< std::endl
<< "current right ascension of ascending node: " << std::setw (10) << RadiansToDegrees (raan) << std::endl
<< " current argument of perigee: " << std::setw (10) << RadiansToDegrees (aop) << std::endl
<< " current mean anomaly: " << std::setw (10) << RadiansToDegrees (ma) << std::endl
<< " current true anomaly: " << std::setw (10) << RadiansToDegrees (ta) << std::endl;
return ss.str ();
}
Satellite::Satellite ()
: Object ()
, m_par (Par ())
, d_par (NULL)
, d_sta (NULL)
{
}
Satellite::Satellite (const Satellite& sat)
: Object (sat)
, m_par (sat.m_par)
, d_par (sat.d_par)
, d_sta (sat.d_sta)
{
}
Satellite::~Satellite ()
{
}
void
Satellite::SetElement (Ele ele)
{
m_par.elem = ele;
Construct ();
}
void
Satellite::SetElement (
double sma,
double ecc,
double inc,
double raan,
double aop,
double ma,
bool isDegree)
{
if (isDegree)
{
inc = DegreesToRadians (inc);
raan = DegreesToRadians (raan);
aop = DegreesToRadians (aop);
ma = DegreesToRadians (ma);
}
inc = WrapTwoPI (inc);
raan = WrapTwoPI (raan);
aop = WrapTwoPI (aop);
ma = WrapTwoPI (ma);
m_par.elem = Ele {sma, ecc, inc, raan, aop, ma};
Construct ();
}
std::string
Satellite::GetName () const
{
return m_name;
}
size_t
Satellite::GetId () const
{
return m_uid;
}
Object::Type
Satellite::GetType () const
{
return SATELLITE;
}
Trajectory
Satellite::CalcTrajectory (Matrix** dmat)
{
Initialzie ();
size_t N_TIME = GetIntervalTicks ();
std::vector<double> h_times = Interval::CreateSeconds (m_intervals, m_epoch, m_step);
double* d_times = NULL;
hipMalloc ((void**)&d_times, sizeof (double) * N_TIME);
hipMemcpy (d_times, &h_times[0], sizeof (double) * N_TIME, hipMemcpyHostToDevice);
int minGridSize, BlockSize;
dim3 gridSize, blockSize;
// calculate states of satellite
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcState, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
// std::cout << blockSize << std::endl;
// std::cout << gridSize << std::endl;
hipLaunchKernelGGL(( cuda::satellite::CalcState) , dim3(gridSize), dim3(blockSize) , 0, 0, d_sta, d_par, d_times, 1, N_TIME);
cudaCheck;
hipFree (d_times);
cudaCheck;
// calculate true anomaly
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcTureAnomaly, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( cuda::satellite::CalcTureAnomaly) , dim3(gridSize), dim3(blockSize) , 0, 0, d_sta, d_par, 1, N_TIME);
cudaCheck;
// {
// size_t bytes = sizeof (Satellite::Sta) * N_TIME;
// Satellite::Sta* h_sta = new Satellite::Sta[N_TIME];
// hipMemcpy (h_sta, d_sta, bytes, hipMemcpyDeviceToHost);
// for (size_t i = 0;i < N_TIME;i++)
// std::cout << h_sta[i] << std::endl;
// }
// calculate position in orbital coordinate system
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcOrbitPosition, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( cuda::satellite::CalcOrbitPosition) , dim3(gridSize), dim3(blockSize) , 0, 0, d_pos, d_par, d_sta, 1, N_TIME);
cudaCheck;
// calculate velocity in orbital coordinate system
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcOrbitVelocity, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( cuda::satellite::CalcOrbitVelocity) , dim3(gridSize), dim3(blockSize) , 0, 0, d_vel, d_par, d_sta, 1, N_TIME);
cudaCheck;
// calculate transform matrix from orbital to Earth-centered-inertial coordinate system
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcMatrixFromOrbToEci, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( cuda::satellite::CalcMatrixFromOrbToEci) , dim3(gridSize), dim3(blockSize) , 0, 0, d_mat, d_par, d_sta, 1, N_TIME);
cudaCheck;
// calculate position in Earth-centered-inertial coordinate system
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::CalcMatsMulVecs, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( cuda::CalcMatsMulVecs) , dim3(gridSize), dim3(blockSize) , 0, 0, d_mat, d_pos, 1, N_TIME);
cudaCheck;
// {
// size_t bytes = sizeof (Vector) * N_TIME;
// Vector* h_pos = new Vector[N_TIME];
// hipMemcpy (h_pos, d_pos, bytes, hipMemcpyDeviceToHost);
// for (size_t i = 0;i < N_TIME;i++)
// std::cout << h_pos[i] << std::endl;
// }
// calculate velocity in Earth-centered-inertial coordinate system
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::CalcMatsMulVecs, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( cuda::CalcMatsMulVecs) , dim3(gridSize), dim3(blockSize) , 0, 0, d_mat, d_vel, 1, N_TIME);
cudaCheck;
// {
// size_t bytes = sizeof (Vector) * N_TIME;
// Vector* h_vel = new Vector[N_TIME];
// hipMemcpy (h_vel, d_vel, bytes, hipMemcpyDeviceToHost);
// for (size_t i = 0;i < N_TIME;i++)
// std::cout << h_vel[i] << std::endl;
// }
// calculate the transform matrix from Earth-centered-inertial to body coordinate system
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcMatrixFromEciToBody, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( cuda::satellite::CalcMatrixFromEciToBody) , dim3(gridSize), dim3(blockSize) , 0, 0, d_mat, d_par, d_sta, 1, N_TIME);
cudaCheck;
if (dmat != NULL)
{
Matrix* d_dmat;
hipMalloc ((void**)&d_dmat, sizeof (Matrix) * N_TIME);
cudaCheck;
// calculate the derivative transform matrix from Earth-centered-inertial to body coordinate system if need
hipOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcDerivMatrixFromEciToBody, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
hipLaunchKernelGGL(( cuda::satellite::CalcDerivMatrixFromEciToBody) , dim3(gridSize), dim3(blockSize) , 0, 0, d_dmat, d_mat, d_par, d_sta, 1, N_TIME);
cudaCheck;
*dmat = d_dmat;
}
Trajectory tra;
tra.pos = d_pos;
tra.vel = d_vel;
tra.mat = d_mat;
tra.num = N_TIME;
return tra;
}
Turntable*
Satellite::GetTurntable (Face face)
{
if (m_turntables.find (face) == m_turntables.end ())
{
return NULL;
}
return m_turntables[face];
}
void
Satellite::Construct ()
{
if (m_uid == 0)
m_uid = SatelliteList::Add (this);
hipFree (d_par);
cudaCheck;
hipMalloc ((void**)&d_par, sizeof (Satellite::Par));
cudaCheck;
// memory copy
hipMemcpy ((void*)d_par, (void*)&m_par, sizeof (Satellite::Par), hipMemcpyHostToDevice);
cudaCheck;
// calculate the parameters of satellite
dim3 blockSize = dim3 (1);
dim3 gridSize = dim3 (1);
hipLaunchKernelGGL(( cuda::satellite::CalcParam) , dim3(gridSize), dim3(blockSize) , 0, 0, d_par, 1);
cudaCheck;
m_name = "Sat-" + std::to_string (m_uid);
}
void
Satellite::Initialzie ()
{
Release ();
size_t N_TIME = GetIntervalTicks ();
// allocating memory
hipMalloc ((void**)&d_sta, sizeof (Sta) * N_TIME);
hipMalloc ((void**)&d_pos, sizeof (Vector) * N_TIME);
hipMalloc ((void**)&d_vel, sizeof (Vector) * N_TIME);
hipMalloc ((void**)&d_mat, sizeof (Matrix) * N_TIME);
}
void
Satellite::Release ()
{
hipFree (d_sta);
cudaCheck;
hipFree (d_pos);
cudaCheck;
hipFree (d_vel);
cudaCheck;
hipFree (d_mat);
cudaCheck;
}
std::ostream& operator<< (std::ostream& os, const Satellite::Ele& ele)
{
os << ele.ToString ();
return os;
}
std::ostream& operator<< (std::ostream& os, const Satellite::Par& par)
{
os << par.ToString ();
return os;
}
std::ostream& operator<< (std::ostream& os, const Satellite::Sta& sta)
{
os << sta.ToString ();
return os;
}
} | 8c1a42bd6694af50ac143fb5b2a5dd835883bec4.cu | /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2021 Innovation Academy for Microsatellites of CAS
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Wang Junyong (wangjunyong@microsate.com)
*/
#include "adi-util.h"
#include "adi-satellite.h"
#include "adi-satellite-kernel.h"
#include "adi-satellite-list.h"
extern const double K_RE;
namespace adi {
std::string
Satellite::Element::ToString () const
{
std::stringstream ss;
ss.setf (std::ios::fixed);
ss.precision (4);
ss.width (10);
ss << " semi-major axis: " << std::setw (10) << sma << std::endl
<< " eccentricity: " << std::setw (10) << ecc << std::endl
<< " inclination: " << std::setw (10) << RadiansToDegrees (inc) << std::endl
<< "right ascension of ascending node: " << std::setw (10) << RadiansToDegrees (raan)<< std::endl
<< " argument of perigee: " << std::setw (10) << RadiansToDegrees (aop) << std::endl
<< " mean anomaly: " << std::setw (10) << RadiansToDegrees (ma) << std::endl;
return ss.str ();
}
bool
Satellite::Ele::operator== (Satellite::Ele& ele) const
{
return sma == ele.sma && ecc == ele.ecc && inc == ele.inc
&& raan == ele.raan && aop == ele.aop && ma == ele.ma;
}
std::string
Satellite::Parameter::ToString () const
{
std::stringstream ss;
ss.setf (std::ios::fixed);
ss.precision (4);
ss.width (10);
ss << elem.ToString ();
ss << " angular rate: " << std::setw (10) << ar << std::endl
<< " semi-latus rectum: " << std::setw (10) << slr << std::endl
<< "rate of " << std::endl
<< "right ascension of ascending node: " << std::setw (10) << RadiansToDegrees (draan) << std::endl
<< " argument of perigee: " << std::setw (10) << RadiansToDegrees (daop) << std::endl
<< " mean anomaly: " << std::setw (10) << RadiansToDegrees (dma) << std::endl;
return ss.str ();
}
std::string
Satellite::State::ToString () const
{
std::stringstream ss;
ss.setf (std::ios::fixed);
ss.precision (4);
ss.width (10);
ss << " current radius: " << std::setw (10) << RadiansToDegrees (radius)<< std::endl
<< "current right ascension of ascending node: " << std::setw (10) << RadiansToDegrees (raan) << std::endl
<< " current argument of perigee: " << std::setw (10) << RadiansToDegrees (aop) << std::endl
<< " current mean anomaly: " << std::setw (10) << RadiansToDegrees (ma) << std::endl
<< " current true anomaly: " << std::setw (10) << RadiansToDegrees (ta) << std::endl;
return ss.str ();
}
Satellite::Satellite ()
: Object ()
, m_par (Par ())
, d_par (NULL)
, d_sta (NULL)
{
}
Satellite::Satellite (const Satellite& sat)
: Object (sat)
, m_par (sat.m_par)
, d_par (sat.d_par)
, d_sta (sat.d_sta)
{
}
Satellite::~Satellite ()
{
}
void
Satellite::SetElement (Ele ele)
{
m_par.elem = ele;
Construct ();
}
void
Satellite::SetElement (
double sma,
double ecc,
double inc,
double raan,
double aop,
double ma,
bool isDegree)
{
if (isDegree)
{
inc = DegreesToRadians (inc);
raan = DegreesToRadians (raan);
aop = DegreesToRadians (aop);
ma = DegreesToRadians (ma);
}
inc = WrapTwoPI (inc);
raan = WrapTwoPI (raan);
aop = WrapTwoPI (aop);
ma = WrapTwoPI (ma);
m_par.elem = Ele {sma, ecc, inc, raan, aop, ma};
Construct ();
}
std::string
Satellite::GetName () const
{
return m_name;
}
size_t
Satellite::GetId () const
{
return m_uid;
}
Object::Type
Satellite::GetType () const
{
return SATELLITE;
}
Trajectory
Satellite::CalcTrajectory (Matrix** dmat)
{
Initialzie ();
size_t N_TIME = GetIntervalTicks ();
std::vector<double> h_times = Interval::CreateSeconds (m_intervals, m_epoch, m_step);
double* d_times = NULL;
cudaMalloc ((void**)&d_times, sizeof (double) * N_TIME);
cudaMemcpy (d_times, &h_times[0], sizeof (double) * N_TIME, cudaMemcpyHostToDevice);
int minGridSize, BlockSize;
dim3 gridSize, blockSize;
// calculate states of satellite
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcState, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
// std::cout << blockSize << std::endl;
// std::cout << gridSize << std::endl;
cuda::satellite::CalcState <<< gridSize, blockSize >>> (d_sta, d_par, d_times, 1, N_TIME);
cudaCheck;
cudaFree (d_times);
cudaCheck;
// calculate true anomaly
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcTureAnomaly, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
cuda::satellite::CalcTureAnomaly <<< gridSize, blockSize >>> (d_sta, d_par, 1, N_TIME);
cudaCheck;
// {
// size_t bytes = sizeof (Satellite::Sta) * N_TIME;
// Satellite::Sta* h_sta = new Satellite::Sta[N_TIME];
// cudaMemcpy (h_sta, d_sta, bytes, cudaMemcpyDeviceToHost);
// for (size_t i = 0;i < N_TIME;i++)
// std::cout << h_sta[i] << std::endl;
// }
// calculate position in orbital coordinate system
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcOrbitPosition, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
cuda::satellite::CalcOrbitPosition <<< gridSize, blockSize >>> (d_pos, d_par, d_sta, 1, N_TIME);
cudaCheck;
// calculate velocity in orbital coordinate system
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcOrbitVelocity, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
cuda::satellite::CalcOrbitVelocity <<< gridSize, blockSize >>> (d_vel, d_par, d_sta, 1, N_TIME);
cudaCheck;
// calculate transform matrix from orbital to Earth-centered-inertial coordinate system
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcMatrixFromOrbToEci, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
cuda::satellite::CalcMatrixFromOrbToEci <<< gridSize, blockSize >>> (d_mat, d_par, d_sta, 1, N_TIME);
cudaCheck;
// calculate position in Earth-centered-inertial coordinate system
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::CalcMatsMulVecs, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
cuda::CalcMatsMulVecs <<< gridSize, blockSize >>> (d_mat, d_pos, 1, N_TIME);
cudaCheck;
// {
// size_t bytes = sizeof (Vector) * N_TIME;
// Vector* h_pos = new Vector[N_TIME];
// cudaMemcpy (h_pos, d_pos, bytes, cudaMemcpyDeviceToHost);
// for (size_t i = 0;i < N_TIME;i++)
// std::cout << h_pos[i] << std::endl;
// }
// calculate velocity in Earth-centered-inertial coordinate system
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::CalcMatsMulVecs, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
cuda::CalcMatsMulVecs <<< gridSize, blockSize >>> (d_mat, d_vel, 1, N_TIME);
cudaCheck;
// {
// size_t bytes = sizeof (Vector) * N_TIME;
// Vector* h_vel = new Vector[N_TIME];
// cudaMemcpy (h_vel, d_vel, bytes, cudaMemcpyDeviceToHost);
// for (size_t i = 0;i < N_TIME;i++)
// std::cout << h_vel[i] << std::endl;
// }
// calculate the transform matrix from Earth-centered-inertial to body coordinate system
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcMatrixFromEciToBody, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
cuda::satellite::CalcMatrixFromEciToBody <<< gridSize, blockSize >>> (d_mat, d_par, d_sta, 1, N_TIME);
cudaCheck;
if (dmat != NULL)
{
Matrix* d_dmat;
cudaMalloc ((void**)&d_dmat, sizeof (Matrix) * N_TIME);
cudaCheck;
// calculate the derivative transform matrix from Earth-centered-inertial to body coordinate system if need
cudaOccupancyMaxPotentialBlockSize (&minGridSize, &BlockSize, cuda::satellite::CalcDerivMatrixFromEciToBody, 0, N_TIME);
blockSize = dim3 (std::min (N_TIME, (size_t)BlockSize));
gridSize = dim3 ((N_TIME + blockSize.x - 1) / blockSize.x);
cuda::satellite::CalcDerivMatrixFromEciToBody <<< gridSize, blockSize >>> (d_dmat, d_mat, d_par, d_sta, 1, N_TIME);
cudaCheck;
*dmat = d_dmat;
}
Trajectory tra;
tra.pos = d_pos;
tra.vel = d_vel;
tra.mat = d_mat;
tra.num = N_TIME;
return tra;
}
Turntable*
Satellite::GetTurntable (Face face)
{
if (m_turntables.find (face) == m_turntables.end ())
{
return NULL;
}
return m_turntables[face];
}
void
Satellite::Construct ()
{
if (m_uid == 0)
m_uid = SatelliteList::Add (this);
cudaFree (d_par);
cudaCheck;
cudaMalloc ((void**)&d_par, sizeof (Satellite::Par));
cudaCheck;
// memory copy
cudaMemcpy ((void*)d_par, (void*)&m_par, sizeof (Satellite::Par), cudaMemcpyHostToDevice);
cudaCheck;
// calculate the parameters of satellite
dim3 blockSize = dim3 (1);
dim3 gridSize = dim3 (1);
cuda::satellite::CalcParam <<< gridSize, blockSize >>> (d_par, 1);
cudaCheck;
m_name = "Sat-" + std::to_string (m_uid);
}
void
Satellite::Initialzie ()
{
Release ();
size_t N_TIME = GetIntervalTicks ();
// allocating memory
cudaMalloc ((void**)&d_sta, sizeof (Sta) * N_TIME);
cudaMalloc ((void**)&d_pos, sizeof (Vector) * N_TIME);
cudaMalloc ((void**)&d_vel, sizeof (Vector) * N_TIME);
cudaMalloc ((void**)&d_mat, sizeof (Matrix) * N_TIME);
}
void
Satellite::Release ()
{
cudaFree (d_sta);
cudaCheck;
cudaFree (d_pos);
cudaCheck;
cudaFree (d_vel);
cudaCheck;
cudaFree (d_mat);
cudaCheck;
}
std::ostream& operator<< (std::ostream& os, const Satellite::Ele& ele)
{
os << ele.ToString ();
return os;
}
std::ostream& operator<< (std::ostream& os, const Satellite::Par& par)
{
os << par.ToString ();
return os;
}
std::ostream& operator<< (std::ostream& os, const Satellite::Sta& sta)
{
os << sta.ToString ();
return os;
}
} |
86f1c0cfb02b256fa93c75f11e3ec93d463832a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
module load cudatoolkit
qsub -q gpu -l nodes=1:ppn=1,walltime=00:20:00 -I
nvcc matrixTranspose.cu
*/
#include <stdio.h>
#define DIM 32
__global__ void transposeNaive(double *odata, const double *idata,int BLOCK_ROWS)
{
int x = blockIdx.x * DIM + threadIdx.x;
int y = blockIdx.y * DIM + threadIdx.y;
int width = gridDim.x * DIM;
for (int j = 0; j < DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
__global__ void transposeFast(double *odata, double *idata, int size_x, int size_y, int BLOCK_ROWS)
{
__shared__ double tile[DIM][DIM];
int xIndex = blockIdx.x * DIM + threadIdx.x;
int yIndex = blockIdx.y * DIM + threadIdx.y;
int index_in = xIndex + (yIndex) * size_x;
xIndex = blockIdx.y * DIM + threadIdx.x;
yIndex = blockIdx.x * DIM + threadIdx.y;
int index_out = xIndex + (yIndex)* size_y;
for (int i = 0; i < DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*size_x];
}
__syncthreads();
for (int i = 0; i < DIM; i+=BLOCK_ROWS) {
odata[index_out+i*size_y] = tile[threadIdx.x][threadIdx.y+i];
}
}
int main(int argc, char const *argv[]) {
const int size_x = 8192;
const int size_y = 8192;
int BLOCK_ROWS = argc>=2 ? atoi(argv[1]) : 2; // default case: 2 --> 64 threads
//execution configuration parameters
dim3 grid(size_x/DIM, size_y/DIM);
dim3 block (DIM, BLOCK_ROWS);
//size of memory required to store the matrix
const int mem_size = sizeof(double) * size_x*size_y;
//allocate host memory
double *h_idata = (double*) malloc(mem_size);
double *h_odata = (double*) malloc(mem_size);
//allocate device memory
double *d_idata;
double *d_odata;
hipMalloc((void**) &d_idata, mem_size);
hipMalloc((void**) &d_odata, mem_size);
// objects to timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//initialize host data
for (int i = 0; i < (size_x*size_y); i++)
h_idata[i] = (double) i;
//copy host data to device
hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice);
printf("\nMatrix size: %dx%d, block: %dx%d, nthreads: %d\n",size_x,size_y, DIM, BLOCK_ROWS, BLOCK_ROWS*DIM );
/****** Naive transpose ******/
hipEventRecord(start, 0);
hipLaunchKernelGGL(( transposeNaive), dim3(grid),dim3(block), 0, 0, d_idata, d_odata, BLOCK_ROWS);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsTimeNaive;
hipEventElapsedTime(&elapsTimeNaive, start, stop);
hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost);
//the bandwidth is twice the size of the matrix divided by the time execution
float bandNaive = (2 * mem_size) / elapsTimeNaive/1e6;
printf("Naive bandwidth = %f, time = %f\n",bandNaive,elapsTimeNaive );
/****** Fast transpose ******/
hipEventRecord(start, 0);
hipLaunchKernelGGL(( transposeFast), dim3(grid),dim3(block), 0, 0, d_idata, d_odata, size_x,size_y,BLOCK_ROWS);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsTimeFast;
hipEventElapsedTime(&elapsTimeFast, start, stop);
hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost);
//the bandwidth is twice the size of the matrix divided by the time execution
float bandFast = (2 * mem_size) / elapsTimeFast/1e6;
printf("Fast bandwidth = %f, time = %f\n",bandFast,elapsTimeFast );
//free memory
free(h_idata);
free(h_odata);
hipFree(d_idata);
hipFree(d_odata);
return 0;
}
| 86f1c0cfb02b256fa93c75f11e3ec93d463832a4.cu | /*
module load cudatoolkit
qsub -q gpu -l nodes=1:ppn=1,walltime=00:20:00 -I
nvcc matrixTranspose.cu
*/
#include <stdio.h>
#define DIM 32
__global__ void transposeNaive(double *odata, const double *idata,int BLOCK_ROWS)
{
int x = blockIdx.x * DIM + threadIdx.x;
int y = blockIdx.y * DIM + threadIdx.y;
int width = gridDim.x * DIM;
for (int j = 0; j < DIM; j+= BLOCK_ROWS)
odata[x*width + (y+j)] = idata[(y+j)*width + x];
}
__global__ void transposeFast(double *odata, double *idata, int size_x, int size_y, int BLOCK_ROWS)
{
__shared__ double tile[DIM][DIM];
int xIndex = blockIdx.x * DIM + threadIdx.x;
int yIndex = blockIdx.y * DIM + threadIdx.y;
int index_in = xIndex + (yIndex) * size_x;
xIndex = blockIdx.y * DIM + threadIdx.x;
yIndex = blockIdx.x * DIM + threadIdx.y;
int index_out = xIndex + (yIndex)* size_y;
for (int i = 0; i < DIM; i+=BLOCK_ROWS) {
tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*size_x];
}
__syncthreads();
for (int i = 0; i < DIM; i+=BLOCK_ROWS) {
odata[index_out+i*size_y] = tile[threadIdx.x][threadIdx.y+i];
}
}
int main(int argc, char const *argv[]) {
const int size_x = 8192;
const int size_y = 8192;
int BLOCK_ROWS = argc>=2 ? atoi(argv[1]) : 2; // default case: 2 --> 64 threads
//execution configuration parameters
dim3 grid(size_x/DIM, size_y/DIM);
dim3 block (DIM, BLOCK_ROWS);
//size of memory required to store the matrix
const int mem_size = sizeof(double) * size_x*size_y;
//allocate host memory
double *h_idata = (double*) malloc(mem_size);
double *h_odata = (double*) malloc(mem_size);
//allocate device memory
double *d_idata;
double *d_odata;
cudaMalloc((void**) &d_idata, mem_size);
cudaMalloc((void**) &d_odata, mem_size);
// objects to timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//initialize host data
for (int i = 0; i < (size_x*size_y); i++)
h_idata[i] = (double) i;
//copy host data to device
cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice);
printf("\nMatrix size: %dx%d, block: %dx%d, nthreads: %d\n",size_x,size_y, DIM, BLOCK_ROWS, BLOCK_ROWS*DIM );
/****** Naive transpose ******/
cudaEventRecord(start, 0);
transposeNaive<<<grid,block>>>(d_idata, d_odata, BLOCK_ROWS);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsTimeNaive;
cudaEventElapsedTime(&elapsTimeNaive, start, stop);
cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost);
//the bandwidth is twice the size of the matrix divided by the time execution
float bandNaive = (2 * mem_size) / elapsTimeNaive/1e6;
printf("Naive bandwidth = %f, time = %f\n",bandNaive,elapsTimeNaive );
/****** Fast transpose ******/
cudaEventRecord(start, 0);
transposeFast<<<grid,block>>>(d_idata, d_odata, size_x,size_y,BLOCK_ROWS);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsTimeFast;
cudaEventElapsedTime(&elapsTimeFast, start, stop);
cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost);
//the bandwidth is twice the size of the matrix divided by the time execution
float bandFast = (2 * mem_size) / elapsTimeFast/1e6;
printf("Fast bandwidth = %f, time = %f\n",bandFast,elapsTimeFast );
//free memory
free(h_idata);
free(h_odata);
cudaFree(d_idata);
cudaFree(d_odata);
return 0;
}
|
ff3c530a8f3bf8c8eb9cf381e044312b82fd419a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Short job 1 MI-PRC, 2019/2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <math.h>
struct atom{
float x,y,z,charge;
//x,y,z, a naboj pro kazdy atom
//x,y,z, and charge info for each atom
};
struct grid_s{
int size_x,size_y,size_z;
// rozmery gridu (mrizky))
// sizes of grid (in x-,y-,and z- dimension)
float spacing_x,spacing_y,spacing_z;
// mezibodova vzdalenost v gridu
// distances in grid
float offset_x,offset_y,offset_z;
// posun gridu
// offsets of grid
float * pot;
float * d_pot;
// vypocitany potencial v CPU a GPU pameti
// computed potential in grid points
} grid;
struct atom * atoms;
struct atom * d_atoms;
int no_atoms;
// pocet atomu a pole s jejich parametry v CPU a GPU pameti
static void HandleError( hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void init()
{
// setup the grid and atoms
grid.spacing_x=0.15;
grid.offset_x=0.5;
grid.spacing_y=0.08;
grid.offset_y=-0.4;
grid.spacing_z=0.22;
grid.offset_z=0.3;
for (int na=0; na<no_atoms; na++) {
atoms[na].x=(na%47)+0.229;
atoms[na].y=(na%31)-10.29;
atoms[na].z=(na%19)+50.311;
atoms[na].charge=(na%8)+0.5;
}}
float body(float t,int n)
{
float b;
if (n<5) return 0.0;
if (t>6.0) return 0.0;
b=12.0*1.6/t;
if (b>18.0) b=18.0;
return b;
}
// zacatek casti k modifikaci
// beginning of part for modification
// muzete pridat vlastni funkce nebo datove struktury, you can also add new functions or data structures
__global__
void compute(int gsizex, int gsizey, int gsizez, float gsx, float gsy, float gsz, float gox, float goy, float goz, struct atom* atoms, int no_atoms, float* gpot) {
__shared__ struct atom atomcache[1024];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z/2 + threadIdx.z/2;
int numThreads = blockDim.x * blockDim.y * blockDim.z/2;
int threadId = threadIdx.z/2 * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
// umisteni bodu, location of grid point
float x = gsx * (float) i + gox;
float y = gsy * (float) j + goy;
float z = gsz * (float) k + goz;
float pot = 0.0f;
gpot[k * gsizex * gsizey + j * gsizex + i] = 0.0f;
for (int offset = 0; offset < no_atoms; offset += numThreads) {
__syncthreads();
if (offset + threadId < no_atoms) {
atomcache[threadId] = atoms[offset + threadId];
}
__syncthreads();
if (threadIdx.z % 2 == 0) {
for (int na = 0; na < min(numThreads, no_atoms - offset) / 2; na++) {
float dx = x - atomcache[na].x;
float dy = y - atomcache[na].y;
float dz = z - atomcache[na].z;
float charge = atomcache[na].charge;
pot += charge / sqrt(dx * dx + dy * dy + dz * dz);
}
}
else {
for (int na = min(numThreads, no_atoms - offset) / 2; na < min(numThreads, no_atoms - offset); na++) {
float dx = x - atomcache[na].x;
float dy = y - atomcache[na].y;
float dz = z - atomcache[na].z;
float charge = atomcache[na].charge;
pot += charge / sqrt(dx * dx + dy * dy + dz * dz);
}
}
}
if (i < gsizex && j < gsizey && k < gsizez) {
atomicAdd(&gpot[k * gsizex * gsizey + j * gsizex + i], pot);
}
}
void c_energy(int gsizex, int gsizey, int gsizez, float gsx, float gsy, float gsz, float gox, float goy, float goz, struct atom* atoms, int no_atoms, float* gpot) {
int tot = gsizex * gsizey * gsizez;
dim3 grid((gsizex + 7) / 8, (gsizey + 7) / 8, (gsizez + 7) / 8);
dim3 block_size(8, 8, 16);
hipLaunchKernelGGL(( compute), dim3(grid), dim3(block_size), 0, 0, gsizex, gsizey, gsizez, gsx, gsy, gsz, gox, goy, goz, atoms, no_atoms, gpot);
// compute<<<1, 1, 512 * sizeof(struct atom)>>>(gsizex, gsizey, gsizez, gsx, gsy, gsz, gox, goy, goz, atoms, no_atoms, gpot);
}
// end of part for modification
// konec casti k modifikaci
int check(int N,float *correct,int gsizex,int gsizey,int gsizez,float *gpot){
// overeni spravnosti, check the correctness
float crc[8];
int si,si2;
for (int i=0; i<8; i++) crc[i]=0.0;
for (int i=0; i<grid.size_x; i++) {
for (int j=0; j<grid.size_y; j++) {
for (int k=0; k<grid.size_z; k++) {
float x=gpot[(k)*gsizex*gsizey+(j)*gsizex + (i)];//DATA(i,j,k);
si=(i&1)+(j&1)*2+(k&1)*4;
/*si2=(i&2)^(j&2)^(k&2);
if (si2) crc[si]+=x;
else crc[si]-=x;
*/
crc[si]+=x;
}}}
/*
for (int i=0; i<8; i++) printf("%g,",crc[i]);
printf("\n");
for (int i=0; i<8; i++) printf("%g,%g ",crc[i],correct[N*10+i]);
printf("\n");
*/
for(int i=0;i<8;i++)
if (fabs(1.0-crc[i]/correct[N*10+i])>0.06)
{
printf("ERROR in CRC!!!!\n");
return 1;
}
return 0;
}
int main( void ) {
clock_t start_time,end_time;
int soucet=0,N,i,j,k,n,m,*pomo,v;
int ri,rj,rk;
double delta,s_delta=0.0,timea[16];
float *mA, *mB,*mX,*mX2,s;
//int tn[4]={1000,1500,2000,2500};
float correct[50]={128619,128714,128630,128725,129043,129139,129054,129150, 0, 0,
1.2849e+06,1.28585e+06,1.28501e+06,1.28596e+06,1.28913e+06,1.29009e+06,1.28924e+06,1.2902e+06, 0, 0,
1.285e+08,1.28594e+08,1.28511e+08,1.28605e+08,1.28917e+08,1.29012e+08,1.28929e+08,1.29024e+08, 0, 0,
2.09323e+08,2.09448e+08,2.09287e+08,2.09413e+08,2.10481e+08,2.10609e+08,2.10445e+08,2.10573e+08, 0, 0,
2.31853e+08,2.32026e+08,2.31867e+08,2.3204e+08,2.327e+08,2.32875e+08,2.32715e+08,2.3289e+08, 0, 0};
int tgx[5]={20,20,20,200,64};
int tgy[5]={20,20,20,200,64};
int tgz[5]={20,20,20,200,64};
int ta[5]={2000,20000,2000000,2000,100000};
// 16*10^8,81*10^8,64*10^8,52*10^8
srand (time(NULL));
pomo=(int *)malloc(32*1024*1024);
v=0;
for(N=0;N<16;N++) timea[N]=0.0;
float s_t=0.0;
for(N=0;N<5;N++)
{
grid.size_x=tgx[N];
grid.size_y=tgy[N];
grid.size_z=tgz[N];
no_atoms=ta[N];
atoms=(struct atom *)malloc(no_atoms * sizeof(struct atom));
HANDLE_ERROR(hipMalloc(&d_atoms, no_atoms * sizeof(struct atom)));
if ((atoms==NULL)||(d_atoms==NULL))
{
printf("Alloc error\n");
return 0;
}
grid.pot=(float *)malloc(grid.size_x * grid.size_y * grid.size_z * sizeof(float));
HANDLE_ERROR(hipMalloc(&grid.d_pot, grid.size_x * grid.size_y * grid.size_z * sizeof(float)));
if ((grid.pot==NULL)||(grid.d_pot==NULL))
{
printf("Alloc error\n");
return 0;
}
init();
HANDLE_ERROR(hipMemcpy(d_atoms, atoms, no_atoms * sizeof(struct atom), hipMemcpyHostToDevice));
//soucet+=vyprazdni(pomo,v);
start_time=clock();
// improve performance of this call
// vylepsit vykonnost tohoto volani
c_energy(grid.size_x,grid.size_y,grid.size_z,grid.spacing_x,grid.spacing_y,grid.spacing_z,grid.offset_x,grid.offset_y,grid.offset_z,d_atoms,no_atoms,grid.d_pot);
hipDeviceSynchronize();
end_time=clock();
HANDLE_ERROR(hipMemcpy(grid.pot, grid.d_pot, grid.size_x * grid.size_y * grid.size_z * sizeof(float), hipMemcpyDeviceToHost));
delta=((double)(end_time-start_time))/CLOCKS_PER_SEC;
timea[N]=delta;
s_t+=delta;
rj=check(N,correct,grid.size_x,grid.size_y,grid.size_z,grid.pot);
if (rj==1)
{
printf("BAD result!\n");
return 0;
}
free(atoms);
free(grid.pot);
hipFree(d_atoms);
hipFree(grid.d_pot);
if (s_t>6.0)
{
printf("Time limit (6 seconds) is reached (time=%g s). SJ1 points: 0\n",s_t);
return 0;
}
} // end of N
printf("%i\n",soucet);
for(N=0;N<5;N++)
{
printf("Time %i=%g",N,timea[N]);
if (N>=2)
{
delta=11;
delta*=tgx[N];
delta*=tgy[N];
delta*=tgz[N];
delta*=ta[N];
delta/=timea[N];
printf(" Perf=%g",delta);
}
printf("\n");
}
printf("Sum of time=%g\n",s_t);
printf("SJ1 points:%.2f\n",body(s_t,5));
return 0;
}
| ff3c530a8f3bf8c8eb9cf381e044312b82fd419a.cu | /*
Short job 1 MI-PRC, 2019/2020
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <math.h>
struct atom{
float x,y,z,charge;
//x,y,z, a naboj pro kazdy atom
//x,y,z, and charge info for each atom
};
struct grid_s{
int size_x,size_y,size_z;
// rozmery gridu (mrizky))
// sizes of grid (in x-,y-,and z- dimension)
float spacing_x,spacing_y,spacing_z;
// mezibodova vzdalenost v gridu
// distances in grid
float offset_x,offset_y,offset_z;
// posun gridu
// offsets of grid
float * pot;
float * d_pot;
// vypocitany potencial v CPU a GPU pameti
// computed potential in grid points
} grid;
struct atom * atoms;
struct atom * d_atoms;
int no_atoms;
// pocet atomu a pole s jejich parametry v CPU a GPU pameti
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void init()
{
// setup the grid and atoms
grid.spacing_x=0.15;
grid.offset_x=0.5;
grid.spacing_y=0.08;
grid.offset_y=-0.4;
grid.spacing_z=0.22;
grid.offset_z=0.3;
for (int na=0; na<no_atoms; na++) {
atoms[na].x=(na%47)+0.229;
atoms[na].y=(na%31)-10.29;
atoms[na].z=(na%19)+50.311;
atoms[na].charge=(na%8)+0.5;
}}
float body(float t,int n)
{
float b;
if (n<5) return 0.0;
if (t>6.0) return 0.0;
b=12.0*1.6/t;
if (b>18.0) b=18.0;
return b;
}
// zacatek casti k modifikaci
// beginning of part for modification
// muzete pridat vlastni funkce nebo datove struktury, you can also add new functions or data structures
__global__
void compute(int gsizex, int gsizey, int gsizez, float gsx, float gsy, float gsz, float gox, float goy, float goz, struct atom* atoms, int no_atoms, float* gpot) {
__shared__ struct atom atomcache[1024];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z/2 + threadIdx.z/2;
int numThreads = blockDim.x * blockDim.y * blockDim.z/2;
int threadId = threadIdx.z/2 * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
// umisteni bodu, location of grid point
float x = gsx * (float) i + gox;
float y = gsy * (float) j + goy;
float z = gsz * (float) k + goz;
float pot = 0.0f;
gpot[k * gsizex * gsizey + j * gsizex + i] = 0.0f;
for (int offset = 0; offset < no_atoms; offset += numThreads) {
__syncthreads();
if (offset + threadId < no_atoms) {
atomcache[threadId] = atoms[offset + threadId];
}
__syncthreads();
if (threadIdx.z % 2 == 0) {
for (int na = 0; na < min(numThreads, no_atoms - offset) / 2; na++) {
float dx = x - atomcache[na].x;
float dy = y - atomcache[na].y;
float dz = z - atomcache[na].z;
float charge = atomcache[na].charge;
pot += charge / sqrt(dx * dx + dy * dy + dz * dz);
}
}
else {
for (int na = min(numThreads, no_atoms - offset) / 2; na < min(numThreads, no_atoms - offset); na++) {
float dx = x - atomcache[na].x;
float dy = y - atomcache[na].y;
float dz = z - atomcache[na].z;
float charge = atomcache[na].charge;
pot += charge / sqrt(dx * dx + dy * dy + dz * dz);
}
}
}
if (i < gsizex && j < gsizey && k < gsizez) {
atomicAdd(&gpot[k * gsizex * gsizey + j * gsizex + i], pot);
}
}
void c_energy(int gsizex, int gsizey, int gsizez, float gsx, float gsy, float gsz, float gox, float goy, float goz, struct atom* atoms, int no_atoms, float* gpot) {
int tot = gsizex * gsizey * gsizez;
dim3 grid((gsizex + 7) / 8, (gsizey + 7) / 8, (gsizez + 7) / 8);
dim3 block_size(8, 8, 16);
compute<<<grid, block_size>>>(gsizex, gsizey, gsizez, gsx, gsy, gsz, gox, goy, goz, atoms, no_atoms, gpot);
// compute<<<1, 1, 512 * sizeof(struct atom)>>>(gsizex, gsizey, gsizez, gsx, gsy, gsz, gox, goy, goz, atoms, no_atoms, gpot);
}
// end of part for modification
// konec casti k modifikaci
int check(int N,float *correct,int gsizex,int gsizey,int gsizez,float *gpot){
// overeni spravnosti, check the correctness
float crc[8];
int si,si2;
for (int i=0; i<8; i++) crc[i]=0.0;
for (int i=0; i<grid.size_x; i++) {
for (int j=0; j<grid.size_y; j++) {
for (int k=0; k<grid.size_z; k++) {
float x=gpot[(k)*gsizex*gsizey+(j)*gsizex + (i)];//DATA(i,j,k);
si=(i&1)+(j&1)*2+(k&1)*4;
/*si2=(i&2)^(j&2)^(k&2);
if (si2) crc[si]+=x;
else crc[si]-=x;
*/
crc[si]+=x;
}}}
/*
for (int i=0; i<8; i++) printf("%g,",crc[i]);
printf("\n");
for (int i=0; i<8; i++) printf("%g,%g ",crc[i],correct[N*10+i]);
printf("\n");
*/
for(int i=0;i<8;i++)
if (fabs(1.0-crc[i]/correct[N*10+i])>0.06)
{
printf("ERROR in CRC!!!!\n");
return 1;
}
return 0;
}
int main( void ) {
clock_t start_time,end_time;
int soucet=0,N,i,j,k,n,m,*pomo,v;
int ri,rj,rk;
double delta,s_delta=0.0,timea[16];
float *mA, *mB,*mX,*mX2,s;
//int tn[4]={1000,1500,2000,2500};
float correct[50]={128619,128714,128630,128725,129043,129139,129054,129150, 0, 0,
1.2849e+06,1.28585e+06,1.28501e+06,1.28596e+06,1.28913e+06,1.29009e+06,1.28924e+06,1.2902e+06, 0, 0,
1.285e+08,1.28594e+08,1.28511e+08,1.28605e+08,1.28917e+08,1.29012e+08,1.28929e+08,1.29024e+08, 0, 0,
2.09323e+08,2.09448e+08,2.09287e+08,2.09413e+08,2.10481e+08,2.10609e+08,2.10445e+08,2.10573e+08, 0, 0,
2.31853e+08,2.32026e+08,2.31867e+08,2.3204e+08,2.327e+08,2.32875e+08,2.32715e+08,2.3289e+08, 0, 0};
int tgx[5]={20,20,20,200,64};
int tgy[5]={20,20,20,200,64};
int tgz[5]={20,20,20,200,64};
int ta[5]={2000,20000,2000000,2000,100000};
// 16*10^8,81*10^8,64*10^8,52*10^8
srand (time(NULL));
pomo=(int *)malloc(32*1024*1024);
v=0;
for(N=0;N<16;N++) timea[N]=0.0;
float s_t=0.0;
for(N=0;N<5;N++)
{
grid.size_x=tgx[N];
grid.size_y=tgy[N];
grid.size_z=tgz[N];
no_atoms=ta[N];
atoms=(struct atom *)malloc(no_atoms * sizeof(struct atom));
HANDLE_ERROR(cudaMalloc(&d_atoms, no_atoms * sizeof(struct atom)));
if ((atoms==NULL)||(d_atoms==NULL))
{
printf("Alloc error\n");
return 0;
}
grid.pot=(float *)malloc(grid.size_x * grid.size_y * grid.size_z * sizeof(float));
HANDLE_ERROR(cudaMalloc(&grid.d_pot, grid.size_x * grid.size_y * grid.size_z * sizeof(float)));
if ((grid.pot==NULL)||(grid.d_pot==NULL))
{
printf("Alloc error\n");
return 0;
}
init();
HANDLE_ERROR(cudaMemcpy(d_atoms, atoms, no_atoms * sizeof(struct atom), cudaMemcpyHostToDevice));
//soucet+=vyprazdni(pomo,v);
start_time=clock();
// improve performance of this call
// vylepsit vykonnost tohoto volani
c_energy(grid.size_x,grid.size_y,grid.size_z,grid.spacing_x,grid.spacing_y,grid.spacing_z,grid.offset_x,grid.offset_y,grid.offset_z,d_atoms,no_atoms,grid.d_pot);
cudaDeviceSynchronize();
end_time=clock();
HANDLE_ERROR(cudaMemcpy(grid.pot, grid.d_pot, grid.size_x * grid.size_y * grid.size_z * sizeof(float), cudaMemcpyDeviceToHost));
delta=((double)(end_time-start_time))/CLOCKS_PER_SEC;
timea[N]=delta;
s_t+=delta;
rj=check(N,correct,grid.size_x,grid.size_y,grid.size_z,grid.pot);
if (rj==1)
{
printf("BAD result!\n");
return 0;
}
free(atoms);
free(grid.pot);
cudaFree(d_atoms);
cudaFree(grid.d_pot);
if (s_t>6.0)
{
printf("Time limit (6 seconds) is reached (time=%g s). SJ1 points: 0\n",s_t);
return 0;
}
} // end of N
printf("%i\n",soucet);
for(N=0;N<5;N++)
{
printf("Time %i=%g",N,timea[N]);
if (N>=2)
{
delta=11;
delta*=tgx[N];
delta*=tgy[N];
delta*=tgz[N];
delta*=ta[N];
delta/=timea[N];
printf(" Perf=%g",delta);
}
printf("\n");
}
printf("Sum of time=%g\n",s_t);
printf("SJ1 points:%.2f\n",body(s_t,5));
return 0;
}
|
955389d0fbc94ac7d68c3fccb30b8dabb7f1dff9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define PRECISION 0.00001
#define TAM_BLOCO 16
#define uN 5.0
#define uS 5.0
#define uW 0.0
#define uE 10.0
//Variveis CPU
double h_h1, h_h2;
double h_denominador1, h_denominador2;
double *h_m, *d_m;
int h_dimensaoX, h_dimensaoY, laps = 0, i;
//Variveis GPU
__device__ double omega = 1.5;
__device__ double d_h1, d_h2;
__device__ double d_denominador1, d_denominador2;
__device__ int d_dimensaoX, d_dimensaoY;
__device__ double d_PI = 3.14159265358979323846;
FILE *arquivo;
clock_t start, end;
double tempo;
//Funes da CPU
//Funcao que imprime a matriz no arquivo de saida
void printMat(){
int i, j;
for(i = 0; i < h_dimensaoX; i++){
for(j = 0; j < h_dimensaoY; j++){
fprintf(arquivo, "%lf", h_m[i * h_dimensaoY + j]);
if(j != h_dimensaoY - 1) fprintf(arquivo, " ");
}
if(i != h_dimensaoX - 1)
fprintf(arquivo, "\n");
}
}
//Funcao que inicializa a matriz com os valores de contorno especificados pelo problema
void setupM(){
int i,j;
for(i = 0; i < h_dimensaoX; i++){
for(j = 0; j < h_dimensaoY; j++){
if(i == 0){
h_m[i * h_dimensaoY + j] = uN;
}else if(i == (h_dimensaoX - 1)){
h_m[i * h_dimensaoY + j] = uS;
}else if(j == 0){
h_m[i * h_dimensaoY + j] = uW;
}else if(j == h_dimensaoY - 1){
h_m[i * h_dimensaoY + j] = uE;
}
}
}
}
//Funes da GPU
//Funcoes "a" e "b" especificada pelo problema
__device__ double a(int i, int j){
double x = i * d_h1;
double y = i * d_h2;
return 500 * x * (1 - x) * (0.5 - y);
}
__device__ double b(int i, int j){
double x = i * d_h1;
double y = i * d_h2;
return 500 * y * (y - 1) * (x - 0.5);
}
//Funcoes "n", "s", "w", "e" especificadas pelo problema
__device__ double n(int i, int j){
return (2.0 - d_h2 * b(i,j))/d_denominador2;
}
__device__ double s(int i, int j){
return (2.0 + d_h2 * b(i,j))/d_denominador2;
}
__device__ double e(int i, int j){
return (2.0 - d_h1 * a(i,j))/d_denominador1;
}
__device__ double w(int i, int j){
return (2.0 + d_h1 * a(i,j))/d_denominador1;
}
//Funcao que faz a media ponderada dos valores vizinhos ao ponto que est sendo atualizado
__device__ double somaDosPontosVizinhos(int i, int j, double *m){
double temp = 0;
temp += w(i,j) * m[(i - 1) * d_dimensaoY + j];
temp += e(i,j) * m[(i + 1) * d_dimensaoY + j];
temp += s(i,j) * m[i * d_dimensaoY + (j - 1)];
temp += n(i,j) * m[i * d_dimensaoY + (j + 1)];
return temp;
}
//As funcoes "funcaoOmega()" e " letraGrega()" abaixo fazem um calculo do omega especifico
//para cada ponto que et sendo analisado. A ideia a mesma do outro programa,a de fazer uma
//media ponderada, porem aqui as condicoes sao mais especificas
__device__ double letraGrega(int i, int j){
double raiz1, raiz2, total;
raiz1 = e(i,j) * w(i,j);
raiz1 = pow(raiz1, 0.5);
raiz1 = raiz1 * cos(d_h1*M_PI);
raiz2 = s(i,j) * n(i,j);
raiz2 = pow(raiz2, 0.5);
raiz2 = raiz2 * cos(d_h2*M_PI);
total = 2*(raiz1 + raiz2);
return total;
}
__device__ double funcaoOmega(int i, int j){
double raiz, total;
raiz = 1 - pow(letraGrega(i, j), 2);
raiz = pow(raiz, 0.5);
total = 2/(1 + raiz);
return total;
}
//Kernels principais do programa. Cada um trabalho em um conjunto de pontos da matriz
//fazendo uma media ponderada entre o valor atual do ponto que est sendo analisado e
//seus quatro pontos adjacentes. O quanto cada valor vai pesar determinado pelo mega
//da funcao que, nesse caso, calculado mais a fundo
__global__ void vermelhos(double *m){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy != 0 && tidy < d_dimensaoY - 1){
if((tidx + tidy) % 2 == 0){
double omega = funcaoOmega(tidx, tidy);
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m);
}
}
}
__global__ void azuis(double *m){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy != 0 && tidy < d_dimensaoY - 1){
if((tidx + tidy) % 2 == 1){
double omega = funcaoOmega(tidx, tidy);
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m);
}
}
}
int main(int argc, char** argv){
//Especificacoes iniciais para garantir que o programa ser rodado com as
//condicoes iniciais corretas
if(argc != 4){
printf("Nmero incorreto de parmetros:\n");
printf("Insira as dimensoes e a quantidade de iteraes\n");
printf("\tUtilize o formato: %s <Dimensao X> <Dimensao Y> <Iteraes>\n", argv[0]);
exit(-1);
}
//Inicializando todos os valores necessrios para transferir para a GPU e para realizar
//os calculos do programa
h_dimensaoX = atoi(argv[1]);
h_dimensaoY = atoi(argv[2]);
laps = atoi(argv[3]);
h_h1 = 1.0/(h_dimensaoX + 1);
h_h2 = 1.0/(h_dimensaoY + 1);
h_dimensaoX += 2;
h_dimensaoY += 2;
h_denominador1 = 4*(1 + (pow(h_h1,2)/pow(h_h2,2)));
h_denominador2 = 4*(1 + (pow(h_h2,2)/pow(h_h1,2)));
//Alocando a matriz na CPU e inicializando
h_m = (double *) calloc(h_dimensaoX * h_dimensaoY, sizeof(double));
setupM();
//Alocando a matriz na GPU
hipMalloc(&d_m, h_dimensaoX * h_dimensaoY * sizeof(double));
//Transferindo as informaes necessrias para a GPU
hipMemcpy(d_m, h_m, h_dimensaoX * h_dimensaoY * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, hipMemcpyHostToDevice);
//Iniciando a contagem do tempo
start = clock();
//Calculando a quantidade de blocos e threads que serao lancados
dim3 nthreads(TAM_BLOCO,TAM_BLOCO);
dim3 nblocos((h_dimensaoX + nthreads.x - 1)/nthreads.x, (h_dimensaoY + nthreads.y - 1)/nthreads.y);
//Fazendo os clculos
for(i = 0; i < laps; i++){
hipLaunchKernelGGL(( vermelhos), dim3(nblocos), dim3(nthreads), 0, 0, d_m);
hipLaunchKernelGGL(( azuis), dim3(nblocos), dim3(nthreads), 0, 0, d_m);
}
//Trazendo a matriz de volta para a CPU
hipMemcpy(h_m, d_m, h_dimensaoX * h_dimensaoY * sizeof(double), hipMemcpyDeviceToHost);
//Reseta a GPU para liberar todos os recursos
hipDeviceReset();
//Imprimindo a matriz no arquivo e fechando-o
arquivo = fopen("sample.txt", "w");
printMat();
fclose(arquivo);
//Termina de calcular o tempo que demorou o programa
end = clock();
tempo = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Tempo total: %lfs...\n", tempo);
return 0;
} | 955389d0fbc94ac7d68c3fccb30b8dabb7f1dff9.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define PRECISION 0.00001
#define TAM_BLOCO 16
#define uN 5.0
#define uS 5.0
#define uW 0.0
#define uE 10.0
//Variáveis CPU
double h_h1, h_h2;
double h_denominador1, h_denominador2;
double *h_m, *d_m;
int h_dimensaoX, h_dimensaoY, laps = 0, i;
//Variáveis GPU
__device__ double omega = 1.5;
__device__ double d_h1, d_h2;
__device__ double d_denominador1, d_denominador2;
__device__ int d_dimensaoX, d_dimensaoY;
__device__ double d_PI = 3.14159265358979323846;
FILE *arquivo;
clock_t start, end;
double tempo;
//Funções da CPU
//Funcao que imprime a matriz no arquivo de saida
void printMat(){
int i, j;
for(i = 0; i < h_dimensaoX; i++){
for(j = 0; j < h_dimensaoY; j++){
fprintf(arquivo, "%lf", h_m[i * h_dimensaoY + j]);
if(j != h_dimensaoY - 1) fprintf(arquivo, " ");
}
if(i != h_dimensaoX - 1)
fprintf(arquivo, "\n");
}
}
//Funcao que inicializa a matriz com os valores de contorno especificados pelo problema
void setupM(){
int i,j;
for(i = 0; i < h_dimensaoX; i++){
for(j = 0; j < h_dimensaoY; j++){
if(i == 0){
h_m[i * h_dimensaoY + j] = uN;
}else if(i == (h_dimensaoX - 1)){
h_m[i * h_dimensaoY + j] = uS;
}else if(j == 0){
h_m[i * h_dimensaoY + j] = uW;
}else if(j == h_dimensaoY - 1){
h_m[i * h_dimensaoY + j] = uE;
}
}
}
}
//Funções da GPU
//Funcoes "a" e "b" especificada pelo problema
__device__ double a(int i, int j){
double x = i * d_h1;
double y = i * d_h2;
return 500 * x * (1 - x) * (0.5 - y);
}
__device__ double b(int i, int j){
double x = i * d_h1;
double y = i * d_h2;
return 500 * y * (y - 1) * (x - 0.5);
}
//Funcoes "n", "s", "w", "e" especificadas pelo problema
__device__ double n(int i, int j){
return (2.0 - d_h2 * b(i,j))/d_denominador2;
}
__device__ double s(int i, int j){
return (2.0 + d_h2 * b(i,j))/d_denominador2;
}
__device__ double e(int i, int j){
return (2.0 - d_h1 * a(i,j))/d_denominador1;
}
__device__ double w(int i, int j){
return (2.0 + d_h1 * a(i,j))/d_denominador1;
}
//Funcao que faz a media ponderada dos valores vizinhos ao ponto que está sendo atualizado
__device__ double somaDosPontosVizinhos(int i, int j, double *m){
double temp = 0;
temp += w(i,j) * m[(i - 1) * d_dimensaoY + j];
temp += e(i,j) * m[(i + 1) * d_dimensaoY + j];
temp += s(i,j) * m[i * d_dimensaoY + (j - 1)];
temp += n(i,j) * m[i * d_dimensaoY + (j + 1)];
return temp;
}
//As funcoes "funcaoOmega()" e " letraGrega()" abaixo fazem um calculo do omega especifico
//para cada ponto que etá sendo analisado. A ideia é a mesma do outro programa,a de fazer uma
//media ponderada, porem aqui as condicoes sao mais especificas
__device__ double letraGrega(int i, int j){
double raiz1, raiz2, total;
raiz1 = e(i,j) * w(i,j);
raiz1 = pow(raiz1, 0.5);
raiz1 = raiz1 * cos(d_h1*M_PI);
raiz2 = s(i,j) * n(i,j);
raiz2 = pow(raiz2, 0.5);
raiz2 = raiz2 * cos(d_h2*M_PI);
total = 2*(raiz1 + raiz2);
return total;
}
__device__ double funcaoOmega(int i, int j){
double raiz, total;
raiz = 1 - pow(letraGrega(i, j), 2);
raiz = pow(raiz, 0.5);
total = 2/(1 + raiz);
return total;
}
//Kernels principais do programa. Cada um trabalho em um conjunto de pontos da matriz
//fazendo uma media ponderada entre o valor atual do ponto que está sendo analisado e
//seus quatro pontos adjacentes. O quanto cada valor vai pesar é determinado pelo ômega
//da funcao que, nesse caso, é calculado mais a fundo
__global__ void vermelhos(double *m){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy != 0 && tidy < d_dimensaoY - 1){
if((tidx + tidy) % 2 == 0){
double omega = funcaoOmega(tidx, tidy);
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m);
}
}
}
__global__ void azuis(double *m){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy != 0 && tidy < d_dimensaoY - 1){
if((tidx + tidy) % 2 == 1){
double omega = funcaoOmega(tidx, tidy);
m[tidx * d_dimensaoY + tidy] *= (1 - omega);
m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m);
}
}
}
int main(int argc, char** argv){
//Especificacoes iniciais para garantir que o programa será rodado com as
//condicoes iniciais corretas
if(argc != 4){
printf("Número incorreto de parâmetros:\n");
printf("Insira as dimensoes e a quantidade de iterações\n");
printf("\tUtilize o formato: %s <Dimensao X> <Dimensao Y> <Iterações>\n", argv[0]);
exit(-1);
}
//Inicializando todos os valores necessários para transferir para a GPU e para realizar
//os calculos do programa
h_dimensaoX = atoi(argv[1]);
h_dimensaoY = atoi(argv[2]);
laps = atoi(argv[3]);
h_h1 = 1.0/(h_dimensaoX + 1);
h_h2 = 1.0/(h_dimensaoY + 1);
h_dimensaoX += 2;
h_dimensaoY += 2;
h_denominador1 = 4*(1 + (pow(h_h1,2)/pow(h_h2,2)));
h_denominador2 = 4*(1 + (pow(h_h2,2)/pow(h_h1,2)));
//Alocando a matriz na CPU e inicializando
h_m = (double *) calloc(h_dimensaoX * h_dimensaoY, sizeof(double));
setupM();
//Alocando a matriz na GPU
cudaMalloc(&d_m, h_dimensaoX * h_dimensaoY * sizeof(double));
//Transferindo as informações necessárias para a GPU
cudaMemcpy(d_m, h_m, h_dimensaoX * h_dimensaoY * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, cudaMemcpyHostToDevice);
//Iniciando a contagem do tempo
start = clock();
//Calculando a quantidade de blocos e threads que serao lancados
dim3 nthreads(TAM_BLOCO,TAM_BLOCO);
dim3 nblocos((h_dimensaoX + nthreads.x - 1)/nthreads.x, (h_dimensaoY + nthreads.y - 1)/nthreads.y);
//Fazendo os cálculos
for(i = 0; i < laps; i++){
vermelhos<<<nblocos, nthreads>>>(d_m);
azuis<<<nblocos, nthreads>>>(d_m);
}
//Trazendo a matriz de volta para a CPU
cudaMemcpy(h_m, d_m, h_dimensaoX * h_dimensaoY * sizeof(double), cudaMemcpyDeviceToHost);
//Reseta a GPU para liberar todos os recursos
cudaDeviceReset();
//Imprimindo a matriz no arquivo e fechando-o
arquivo = fopen("sample.txt", "w");
printMat();
fclose(arquivo);
//Termina de calcular o tempo que demorou o programa
end = clock();
tempo = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Tempo total: %lfs...\n", tempo);
return 0;
} |
fa551aabba71bf5ef3d24212dcfdba9b6492db0e.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../graph_parser/parse.h"
#include "parse_oo.h"
#include "../graph_parser/util.h"
#include "kernel.hip"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n", num_edges, hipGetErrorString(err));
return -1;
}
// Create buffers for cc
err = hipMalloc(&cc_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc cc_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n", num_nodes, hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext* context;
err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int>*));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n", num_edges, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&context, sizeof(GraphChiContext));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n", num_edges, hipGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
hipLaunchKernelGGL(( initContext), dim3(1), dim3(1), 0, 0, context, num_nodes, num_edges);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n", hipGetErrorString(err));
return -1;
}
printf("Start initObj\n");
hipLaunchKernelGGL(( initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d, inrow_d, incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n", hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n", hipGetErrorString(err));
return -1;
}
// Run CC for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
hipLaunchKernelGGL(( ConnectedComponent), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
printf("Finish ConnectedComponent\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n", hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack) , dim3(grid), dim3(threads), 0, 0, vertex, context, cc_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n", hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, cc_d, num_nodes * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n", hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
| fa551aabba71bf5ef3d24212dcfdba9b6492db0e.cu | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../graph_parser/parse.h"
#include "parse_oo.h"
#include "../graph_parser/util.h"
#include "kernel.cu"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *cc_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for cc
err = cudaMalloc(&cc_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc cc_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
ChiVertex<int, int> **vertex;
GraphChiContext* context;
err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int>*));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&context, sizeof(GraphChiContext));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n", num_edges, cudaGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
initContext<<<1, 1>>>(context, num_nodes, num_edges);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n", cudaGetErrorString(err));
return -1;
}
printf("Start initObj\n");
initObject<<<grid, threads>>>(vertex, context, row_d, col_d, inrow_d, incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n", cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n", cudaGetErrorString(err));
return -1;
}
// Run CC for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start ConnectedComponent\n");
ConnectedComponent<<<grid, threads>>>(vertex, context, i);
printf("Finish ConnectedComponent\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n", cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
copyBack <<<grid, threads>>>(vertex, context, cc_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n", cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, cc_d, num_nodes * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n", cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(cc_d);
return 0;
}
void print_vectorf(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
c797c08d18164b7f83168c934b39cb0e78eb5b75.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef WCUDA
#include <hip/hip_runtime.h>
#include "ray.cuh"
#include "ray.h"
#include <hiprand/hiprand_kernel.h>
#include "CUDAhelpers.h"
#include "mcgrid.h"
#include "shaderec.h"
static __device__ bool inside_bb(const CUDAreal3 &p0, const CUDAreal3 &p1, const CUDAreal3 &point) {
return
point.x > p0.x && point.x < p1.x &&
point.y > p0.y && point.y < p1.x &&
point.z > p0.z && point.z < p1.z;
}
static __device__ CUDAreal clamp(CUDAreal value, CUDAreal a, CUDAreal b) {
if (value < a) return a;
if (value > b) return b;
return value;
}
__device__ bool MCGridCUDA::shadow_hit(const rayCU& ray, CUDAreal& tmin) const
{
return false;
}
__device__ bool MCGridCUDA::hit(const rayCU& ray, CUDAreal& tmin, ShadeRecCUDA& sr) const
{
//Material* mat_ptr = sr.material_ptr;
// if it's a kernel:
/*int column = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
rayCU ray = rays[column + stride * row];*/
//else:
//MCGridCUDA grid = *gr;
//int nx = grid.nx;
//int ny = grid.ny;
//int nz = grid.nz;
CUDAreal ox = ray.o.x;
CUDAreal oy = ray.o.y;
CUDAreal oz = ray.o.z;
CUDAreal dx = ray.d.x;
CUDAreal dy = ray.d.y;
CUDAreal dz = ray.d.z;
CUDAreal x0 = p0.x;
CUDAreal y0 = p0.y;
CUDAreal z0 = p0.z;
CUDAreal x1 = p1.x;
CUDAreal y1 = p1.y;
CUDAreal z1 = p1.z;
CUDAreal tx_min, ty_min, tz_min;
CUDAreal tx_max, ty_max, tz_max;
// the following code includes modifications from Shirley and Morley (2003)
CUDAreal a = 1.0 / dx;
if (a >= 0) {
tx_min = (x0 - ox) * a;
tx_max = (x1 - ox) * a;
}
else {
tx_min = (x1 - ox) * a;
tx_max = (x0 - ox) * a;
}
CUDAreal b = 1.0 / dy;
if (b >= 0) {
ty_min = (y0 - oy) * b;
ty_max = (y1 - oy) * b;
}
else {
ty_min = (y1 - oy) * b;
ty_max = (y0 - oy) * b;
}
CUDAreal c = 1.0 / dz;
if (c >= 0) {
tz_min = (z0 - oz) * c;
tz_max = (z1 - oz) * c;
}
else {
tz_min = (z1 - oz) * c;
tz_max = (z0 - oz) * c;
}
CUDAreal t0, t1;
if (tx_min > ty_min)
t0 = tx_min;
else
t0 = ty_min;
if (tz_min > t0)
t0 = tz_min;
if (tx_max < ty_max)
t1 = tx_max;
else
t1 = ty_max;
if (tz_max < t1)
t1 = tz_max;
if (t0 > t1)
return (false);
// initial cell coordinates
int ix, iy, iz;
if (inside_bb(p0, p1, ray.o)) { // does the ray start inside the grid?
ix = clamp((ox - x0) * nx / (x1 - x0), 0, nx - 1);
iy = clamp((oy - y0) * ny / (y1 - y0), 0, ny - 1);
iz = clamp((oz - z0) * nz / (z1 - z0), 0, nz - 1);
}
else {
CUDAreal3 p = ray.o + t0 * ray.d; // initial hit point with grid's bounding box
ix = clamp((p.x - x0) * nx / (x1 - x0), 0, nx - 1);
iy = clamp((p.y - y0) * ny / (y1 - y0), 0, ny - 1);
iz = clamp((p.z - z0) * nz / (z1 - z0), 0, nz - 1);
}
// ray parameter increments per cell in the x, y, and z directions
CUDAreal dtx = (tx_max - tx_min) / nx;
CUDAreal dty = (ty_max - ty_min) / ny;
CUDAreal dtz = (tz_max - tz_min) / nz;
CUDAreal tx_next, ty_next, tz_next;
int ix_step, iy_step, iz_step;
int ix_stop, iy_stop, iz_stop;
if (dx > 0) {
tx_next = tx_min + (ix + 1) * dtx;
ix_step = +1;
ix_stop = nx;
}
else {
tx_next = tx_min + (nx - ix) * dtx;
ix_step = -1;
ix_stop = -1;
}
if (dx == 0.0) {
tx_next = kHugeValueCUDA;
ix_step = -1;
ix_stop = -1;
}
if (dy > 0) {
ty_next = ty_min + (iy + 1) * dty;
iy_step = +1;
iy_stop = ny;
}
else {
ty_next = ty_min + (ny - iy) * dty;
iy_step = -1;
iy_stop = -1;
}
if (dy == 0.0) {
ty_next = kHugeValueCUDA;
iy_step = -1;
iy_stop = -1;
}
if (dz > 0) {
tz_next = tz_min + (iz + 1) * dtz;
iz_step = +1;
iz_stop = nz;
}
else {
tz_next = tz_min + (nz - iz) * dtz;
iz_step = -1;
iz_stop = -1;
}
if (dz == 0.0) {
tz_next = kHugeValueCUDA;
iz_step = -1;
iz_stop = -1;
}
// if (tx_next < 0) tx_next = kHugeValueCUDA;
// if (ty_next < 0) ty_next = kHugeValueCUDA;
// if (tz_next < 0) tz_next = kHugeValueCUDA;
// Test if there is a block face glued to the bounding box:
int block_id = cells[ix + nx * iy + nx * ny * iz];
CUDAreal3 block_p0 = __make_CUDAreal3(x0 + nx * BLOCKLENGTH_CUDA, y0 + ny * BLOCKLENGTH_CUDA, z0 + nz * BLOCKLENGTH_CUDA);
if (block_id != 0) {
real t_before = kHugeValueCUDA;
real tx_min_pp = tx_next - dtx;
real ty_min_pp = ty_next - dty;
real tz_min_pp = tz_next - dtz;
if (ix != 0 && ix != (nx - 1)) tx_min_pp = -kHugeValueCUDA;
if (iy != 0 && iy != (ny - 1)) ty_min_pp = -kHugeValueCUDA;
if (iz != 0 && iz != (nz - 1)) tz_min_pp = -kHugeValueCUDA;
if (tx_min_pp > ty_min_pp && tx_min_pp > tz_min_pp) {
(sr).normal = __make_CUDAreal3(-(CUDAreal)ix_step, 0, 0);
//sr.hdir = ix_step > 0 ? ShadeRec::South : ShadeRec::North;
t_before = tx_min_pp;
}
else if (ty_min_pp > tz_min_pp) {
(sr).normal = __make_CUDAreal3(0, -(CUDAreal)iy_step, 0);
//sr.hdir = iy_step > 0 ? ShadeRec::Bottom : ShadeRec::Top;
t_before = ty_min_pp;
}
else {
(sr).normal = __make_CUDAreal3(0, 0, -(CUDAreal)iz_step);
//sr.hdir = iz_step > 0 ? ShadeRec::West : ShadeRec::East;
t_before = tz_min_pp;
}
if (true /*block_ptr->block_hit(ray, block_p0, t_before, sr)*/) {
tmin = t_before;
return (true);
}
}
// traverse the grid
tmin = kHugeValueCUDA;
CUDAreal t_before = kHugeValueCUDA;
while (true) {
// MCBlock* block_ptr = cells[ix + nx * iy + nx * ny * iz];
if (tx_next < ty_next && tx_next < tz_next) {
//real tmin = tx_next - kEpsilonCUDACUDA;
//Material* mptr = sr.material_ptr;
(sr).normal = __make_CUDAreal3(-(CUDAreal)ix_step, 0, 0);
//sr.hdir = ix_step > 0 ? ShadeRec::South : ShadeRec::North;
//sr.t_Before = t_before;
t_before = tx_next;
tx_next += dtx;
ix += ix_step;
if (ix == ix_stop) {
(sr).material = 0;
return (false);
}
int block_ptr = cells[ix + nx * iy + nx * ny * iz];
CUDAreal3 block_p0 = __make_CUDAreal3(x0 + nx * BLOCKLENGTH_CUDA, y0 + ny * BLOCKLENGTH_CUDA, z0 + nz * BLOCKLENGTH_CUDA);
if (block_ptr /* && block_ptr->block_hit(ray, block_p0, t_before, sr)*/) {
tmin = t_before;
return (true);
}
//sr.material_ptr = mptr;
}
else {
if (ty_next < tz_next) {
//Material* mptr = sr.material_ptr;
(sr).normal = __make_CUDAreal3(0.0, -(CUDAreal)iy_step, 0);
//sr.hdir = iy_step > 0 ? ShadeRec::Bottom : ShadeRec::Top;
//sr.t_Before = t_before;
t_before = ty_next;
ty_next += dty;
iy += iy_step;
if (iy == iy_stop) {
//(*sr).material = mat_ptr;
return (false);
}
int block_ptr = cells[ix + nx * iy + nx * ny * iz];
CUDAreal3 block_p0 = __make_CUDAreal3(x0 + nx * BLOCKLENGTH_CUDA, y0 + ny * BLOCKLENGTH_CUDA, z0 + nz * BLOCKLENGTH_CUDA);
if (block_ptr /*&& block_ptr->block_hit(ray, block_p0, t_before, sr)*/) {
//material_ptr = object_ptr->get_material();
tmin = t_before;
//t = ty_next;
return (true);
}
//sr.material_ptr = mptr;
//mat_ptr
}
else {
//Material* mptr = sr.material_ptr;
(sr).normal = __make_CUDAreal3(0.0, 0.0, -(CUDAreal)iz_step);
//sr.hdir = iz_step > 0 ? ShadeRec::West : ShadeRec::East;
//sr.t_Before = t_before;
t_before = tz_next;
tz_next += dtz;
iz += iz_step;
if (iz == iz_stop) {
//sr.material_ptr = mat_ptr;
return (false);
}
int block_ptr = cells[ix + nx * iy + nx * ny * iz];
CUDAreal3 block_p0 = __make_CUDAreal3(x0 + nx * BLOCKLENGTH_CUDA, y0 + ny * BLOCKLENGTH_CUDA, z0 + nz * BLOCKLENGTH_CUDA);
//material_ptr = sr.material_ptr;
if (block_ptr/* && block_ptr->block_hit(ray, block_p0, t_before, sr)*/) {
//material_ptr = object_ptr->get_material();
//sr.material_ptr = material_ptr;
tmin = t_before;
// t = tz_next;
return (true);
}
//sr.material_ptr = mptr;
}
}
}
}
#endif // WCUDA
| c797c08d18164b7f83168c934b39cb0e78eb5b75.cu | #ifdef WCUDA
#include <cuda_runtime.h>
#include "ray.cuh"
#include "ray.h"
#include <curand_kernel.h>
#include "CUDAhelpers.h"
#include "mcgrid.h"
#include "shaderec.h"
static __device__ bool inside_bb(const CUDAreal3 &p0, const CUDAreal3 &p1, const CUDAreal3 &point) {
return
point.x > p0.x && point.x < p1.x &&
point.y > p0.y && point.y < p1.x &&
point.z > p0.z && point.z < p1.z;
}
static __device__ CUDAreal clamp(CUDAreal value, CUDAreal a, CUDAreal b) {
if (value < a) return a;
if (value > b) return b;
return value;
}
__device__ bool MCGridCUDA::shadow_hit(const rayCU& ray, CUDAreal& tmin) const
{
return false;
}
__device__ bool MCGridCUDA::hit(const rayCU& ray, CUDAreal& tmin, ShadeRecCUDA& sr) const
{
//Material* mat_ptr = sr.material_ptr;
// if it's a kernel:
/*int column = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
rayCU ray = rays[column + stride * row];*/
//else:
//MCGridCUDA grid = *gr;
//int nx = grid.nx;
//int ny = grid.ny;
//int nz = grid.nz;
CUDAreal ox = ray.o.x;
CUDAreal oy = ray.o.y;
CUDAreal oz = ray.o.z;
CUDAreal dx = ray.d.x;
CUDAreal dy = ray.d.y;
CUDAreal dz = ray.d.z;
CUDAreal x0 = p0.x;
CUDAreal y0 = p0.y;
CUDAreal z0 = p0.z;
CUDAreal x1 = p1.x;
CUDAreal y1 = p1.y;
CUDAreal z1 = p1.z;
CUDAreal tx_min, ty_min, tz_min;
CUDAreal tx_max, ty_max, tz_max;
// the following code includes modifications from Shirley and Morley (2003)
CUDAreal a = 1.0 / dx;
if (a >= 0) {
tx_min = (x0 - ox) * a;
tx_max = (x1 - ox) * a;
}
else {
tx_min = (x1 - ox) * a;
tx_max = (x0 - ox) * a;
}
CUDAreal b = 1.0 / dy;
if (b >= 0) {
ty_min = (y0 - oy) * b;
ty_max = (y1 - oy) * b;
}
else {
ty_min = (y1 - oy) * b;
ty_max = (y0 - oy) * b;
}
CUDAreal c = 1.0 / dz;
if (c >= 0) {
tz_min = (z0 - oz) * c;
tz_max = (z1 - oz) * c;
}
else {
tz_min = (z1 - oz) * c;
tz_max = (z0 - oz) * c;
}
CUDAreal t0, t1;
if (tx_min > ty_min)
t0 = tx_min;
else
t0 = ty_min;
if (tz_min > t0)
t0 = tz_min;
if (tx_max < ty_max)
t1 = tx_max;
else
t1 = ty_max;
if (tz_max < t1)
t1 = tz_max;
if (t0 > t1)
return (false);
// initial cell coordinates
int ix, iy, iz;
if (inside_bb(p0, p1, ray.o)) { // does the ray start inside the grid?
ix = clamp((ox - x0) * nx / (x1 - x0), 0, nx - 1);
iy = clamp((oy - y0) * ny / (y1 - y0), 0, ny - 1);
iz = clamp((oz - z0) * nz / (z1 - z0), 0, nz - 1);
}
else {
CUDAreal3 p = ray.o + t0 * ray.d; // initial hit point with grid's bounding box
ix = clamp((p.x - x0) * nx / (x1 - x0), 0, nx - 1);
iy = clamp((p.y - y0) * ny / (y1 - y0), 0, ny - 1);
iz = clamp((p.z - z0) * nz / (z1 - z0), 0, nz - 1);
}
// ray parameter increments per cell in the x, y, and z directions
CUDAreal dtx = (tx_max - tx_min) / nx;
CUDAreal dty = (ty_max - ty_min) / ny;
CUDAreal dtz = (tz_max - tz_min) / nz;
CUDAreal tx_next, ty_next, tz_next;
int ix_step, iy_step, iz_step;
int ix_stop, iy_stop, iz_stop;
if (dx > 0) {
tx_next = tx_min + (ix + 1) * dtx;
ix_step = +1;
ix_stop = nx;
}
else {
tx_next = tx_min + (nx - ix) * dtx;
ix_step = -1;
ix_stop = -1;
}
if (dx == 0.0) {
tx_next = kHugeValueCUDA;
ix_step = -1;
ix_stop = -1;
}
if (dy > 0) {
ty_next = ty_min + (iy + 1) * dty;
iy_step = +1;
iy_stop = ny;
}
else {
ty_next = ty_min + (ny - iy) * dty;
iy_step = -1;
iy_stop = -1;
}
if (dy == 0.0) {
ty_next = kHugeValueCUDA;
iy_step = -1;
iy_stop = -1;
}
if (dz > 0) {
tz_next = tz_min + (iz + 1) * dtz;
iz_step = +1;
iz_stop = nz;
}
else {
tz_next = tz_min + (nz - iz) * dtz;
iz_step = -1;
iz_stop = -1;
}
if (dz == 0.0) {
tz_next = kHugeValueCUDA;
iz_step = -1;
iz_stop = -1;
}
// if (tx_next < 0) tx_next = kHugeValueCUDA;
// if (ty_next < 0) ty_next = kHugeValueCUDA;
// if (tz_next < 0) tz_next = kHugeValueCUDA;
// Test if there is a block face glued to the bounding box:
int block_id = cells[ix + nx * iy + nx * ny * iz];
CUDAreal3 block_p0 = __make_CUDAreal3(x0 + nx * BLOCKLENGTH_CUDA, y0 + ny * BLOCKLENGTH_CUDA, z0 + nz * BLOCKLENGTH_CUDA);
if (block_id != 0) {
real t_before = kHugeValueCUDA;
real tx_min_pp = tx_next - dtx;
real ty_min_pp = ty_next - dty;
real tz_min_pp = tz_next - dtz;
if (ix != 0 && ix != (nx - 1)) tx_min_pp = -kHugeValueCUDA;
if (iy != 0 && iy != (ny - 1)) ty_min_pp = -kHugeValueCUDA;
if (iz != 0 && iz != (nz - 1)) tz_min_pp = -kHugeValueCUDA;
if (tx_min_pp > ty_min_pp && tx_min_pp > tz_min_pp) {
(sr).normal = __make_CUDAreal3(-(CUDAreal)ix_step, 0, 0);
//sr.hdir = ix_step > 0 ? ShadeRec::South : ShadeRec::North;
t_before = tx_min_pp;
}
else if (ty_min_pp > tz_min_pp) {
(sr).normal = __make_CUDAreal3(0, -(CUDAreal)iy_step, 0);
//sr.hdir = iy_step > 0 ? ShadeRec::Bottom : ShadeRec::Top;
t_before = ty_min_pp;
}
else {
(sr).normal = __make_CUDAreal3(0, 0, -(CUDAreal)iz_step);
//sr.hdir = iz_step > 0 ? ShadeRec::West : ShadeRec::East;
t_before = tz_min_pp;
}
if (true /*block_ptr->block_hit(ray, block_p0, t_before, sr)*/) {
tmin = t_before;
return (true);
}
}
// traverse the grid
tmin = kHugeValueCUDA;
CUDAreal t_before = kHugeValueCUDA;
while (true) {
// MCBlock* block_ptr = cells[ix + nx * iy + nx * ny * iz];
if (tx_next < ty_next && tx_next < tz_next) {
//real tmin = tx_next - kEpsilonCUDACUDA;
//Material* mptr = sr.material_ptr;
(sr).normal = __make_CUDAreal3(-(CUDAreal)ix_step, 0, 0);
//sr.hdir = ix_step > 0 ? ShadeRec::South : ShadeRec::North;
//sr.t_Before = t_before;
t_before = tx_next;
tx_next += dtx;
ix += ix_step;
if (ix == ix_stop) {
(sr).material = 0;
return (false);
}
int block_ptr = cells[ix + nx * iy + nx * ny * iz];
CUDAreal3 block_p0 = __make_CUDAreal3(x0 + nx * BLOCKLENGTH_CUDA, y0 + ny * BLOCKLENGTH_CUDA, z0 + nz * BLOCKLENGTH_CUDA);
if (block_ptr /* && block_ptr->block_hit(ray, block_p0, t_before, sr)*/) {
tmin = t_before;
return (true);
}
//sr.material_ptr = mptr;
}
else {
if (ty_next < tz_next) {
//Material* mptr = sr.material_ptr;
(sr).normal = __make_CUDAreal3(0.0, -(CUDAreal)iy_step, 0);
//sr.hdir = iy_step > 0 ? ShadeRec::Bottom : ShadeRec::Top;
//sr.t_Before = t_before;
t_before = ty_next;
ty_next += dty;
iy += iy_step;
if (iy == iy_stop) {
//(*sr).material = mat_ptr;
return (false);
}
int block_ptr = cells[ix + nx * iy + nx * ny * iz];
CUDAreal3 block_p0 = __make_CUDAreal3(x0 + nx * BLOCKLENGTH_CUDA, y0 + ny * BLOCKLENGTH_CUDA, z0 + nz * BLOCKLENGTH_CUDA);
if (block_ptr /*&& block_ptr->block_hit(ray, block_p0, t_before, sr)*/) {
//material_ptr = object_ptr->get_material();
tmin = t_before;
//t = ty_next;
return (true);
}
//sr.material_ptr = mptr;
//mat_ptr
}
else {
//Material* mptr = sr.material_ptr;
(sr).normal = __make_CUDAreal3(0.0, 0.0, -(CUDAreal)iz_step);
//sr.hdir = iz_step > 0 ? ShadeRec::West : ShadeRec::East;
//sr.t_Before = t_before;
t_before = tz_next;
tz_next += dtz;
iz += iz_step;
if (iz == iz_stop) {
//sr.material_ptr = mat_ptr;
return (false);
}
int block_ptr = cells[ix + nx * iy + nx * ny * iz];
CUDAreal3 block_p0 = __make_CUDAreal3(x0 + nx * BLOCKLENGTH_CUDA, y0 + ny * BLOCKLENGTH_CUDA, z0 + nz * BLOCKLENGTH_CUDA);
//material_ptr = sr.material_ptr;
if (block_ptr/* && block_ptr->block_hit(ray, block_p0, t_before, sr)*/) {
//material_ptr = object_ptr->get_material();
//sr.material_ptr = material_ptr;
tmin = t_before;
// t = tz_next;
return (true);
}
//sr.material_ptr = mptr;
}
}
}
}
#endif // WCUDA
|
d608943cfbe1aac86b12c4e5a717331f3a91af40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**
* Element-wise Vector Multiplication: C[i] = A[i] * B[i].
* This sample is a very basic sample that implements element by element vector multiplication.
*/
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
/**
* CUDA Kernel Device code
* Computes the element-wise vector multiplication of A and B into C. The 3 vectors have the same number of
elements numElements.
*/
__global__ void vectorMultiply(float *A, float *B, float *C, int numElements)
{
int size = numElements * sizeof(float);
float *d_A, *d_B, *d_C;
int i = threadIdx.x + 2* blockDim.x*blockIdx.x;
if (i < numElements) C[i] = A[i] * B[i];
if (i < numElements + blockDim.x) C[i + blockDim.x] = A[i + blockDim.x] + B[i + blockDim.x];
}
//Host main routine
int main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
float EPS = 0.00001;
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector multiplication of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; i++)
{
*(h_A + i) = (float)i;
//printf("h_A = %f\n", h_A[i]);
}
for (int i = 0; i < numElements; i++)
*(h_B + i) = (1 / (EPS + i));
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
printf("Copy input data from the host memory to the CUDA device\n");
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Launch the VectorMultiply CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = ceil(numElements / (float) threadsPerBlock);
hipLaunchKernelGGL(( vectorMultiply) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A, d_B, d_C, numElements);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs((h_A[i] * h_B[i]) - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
hipFree(d_A); hipFree(d_B); hipFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
} | d608943cfbe1aac86b12c4e5a717331f3a91af40.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
/**
* Element-wise Vector Multiplication: C[i] = A[i] * B[i].
* This sample is a very basic sample that implements element by element vector multiplication.
*/
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
/**
* CUDA Kernel Device code
* Computes the element-wise vector multiplication of A and B into C. The 3 vectors have the same number of
elements numElements.
*/
__global__ void vectorMultiply(float *A, float *B, float *C, int numElements)
{
int size = numElements * sizeof(float);
float *d_A, *d_B, *d_C;
int i = threadIdx.x + 2* blockDim.x*blockIdx.x;
if (i < numElements) C[i] = A[i] * B[i];
if (i < numElements + blockDim.x) C[i + blockDim.x] = A[i + blockDim.x] + B[i + blockDim.x];
}
//Host main routine
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
float EPS = 0.00001;
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector multiplication of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; i++)
{
*(h_A + i) = (float)i;
//printf("h_A = %f\n", h_A[i]);
}
for (int i = 0; i < numElements; i++)
*(h_B + i) = (1 / (EPS + i));
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
printf("Copy input data from the host memory to the CUDA device\n");
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Launch the VectorMultiply CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = ceil(numElements / (float) threadsPerBlock);
vectorMultiply <<< blocksPerGrid, threadsPerBlock >>>(d_A, d_B, d_C, numElements);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs((h_A[i] * h_B[i]) - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
} |
af30222420b17d9e449507bcd957e16e2863066d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/concatenate_masks.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/dictionary/detail/concatenate.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/structs/detail/concatenate.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/advance.h>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform_scan.h>
#include <algorithm>
#include <numeric>
#include <utility>
namespace cudf {
namespace detail {
namespace {
// From benchmark data, the fused kernel optimization appears to perform better
// when there are more than a trivial number of columns, or when the null mask
// can also be computed at the same time
constexpr bool use_fused_kernel_heuristic(bool const has_nulls, size_t const num_columns)
{
return has_nulls || num_columns > 4;
}
auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_view stream)
{
// Create device views for each input view
using CDViewPtr = decltype(column_device_view::create(std::declval<column_view>(),
std::declval<rmm::cuda_stream_view>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(views.begin(), views.end(), device_view_owners.begin(), [stream](auto const& col) {
return column_device_view::create(col, stream);
});
// Assemble contiguous array of device views
auto device_views = thrust::host_vector<column_device_view>();
device_views.reserve(views.size());
std::transform(device_view_owners.cbegin(),
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
auto d_views =
make_device_uvector_async(device_views, stream, rmm::mr::get_current_device_resource());
// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
thrust::transform_inclusive_scan(
thrust::host,
device_views.cbegin(),
device_views.cend(),
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus{});
auto d_offsets =
make_device_uvector_async(offsets, stream, rmm::mr::get_current_device_resource());
auto const output_size = offsets.back();
return std::make_tuple(
std::move(device_view_owners), std::move(d_views), std::move(d_offsets), output_size);
}
/**
* @brief Concatenates the null mask bits of all the column device views in the
* `views` array to the destination bitmask.
*
* @tparam block_size Block size for using with single_lane_block_sum_reduce
*
* @param views Array of column_device_view
* @param output_offsets Prefix sum of sizes of elements of `views`
* @param number_of_views Size of `views` array
* @param dest_mask The output buffer to copy null masks into
* @param number_of_mask_bits The total number of null masks bits that are being copied
* @param out_valid_count To hold the total number of valid bits set
*/
template <size_type block_size>
__global__ void concatenate_masks_kernel(column_device_view const* views,
size_t const* output_offsets,
size_type number_of_views,
bitmask_type* dest_mask,
size_type number_of_mask_bits,
size_type* out_valid_count)
{
size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x;
auto active_mask = __ballot_sync(0xFFFF'FFFFu, mask_index < number_of_mask_bits);
size_type warp_valid_count = 0;
while (mask_index < number_of_mask_bits) {
size_type const source_view_index =
thrust::upper_bound(
thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) -
output_offsets - 1;
bool bit_is_set = true;
if (source_view_index < number_of_views) {
size_type const column_element_index = mask_index - output_offsets[source_view_index];
bit_is_set = views[source_view_index].is_valid(column_element_index);
}
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
if (threadIdx.x % detail::warp_size == 0) {
dest_mask[word_index(mask_index)] = new_word;
warp_valid_count += __popc(new_word);
}
mask_index += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits);
}
using detail::single_lane_block_sum_reduce;
auto const block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
} // namespace
size_type concatenate_masks(device_span<column_device_view const> d_views,
device_span<size_t const> d_offsets,
bitmask_type* dest_mask,
size_type output_size,
rmm::cuda_stream_view stream)
{
rmm::device_scalar<size_type> d_valid_count(0, stream);
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
hipLaunchKernelGGL(( concatenate_masks_kernel<block_size>)
, dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
d_views.data(),
d_offsets.data(),
static_cast<size_type>(d_views.size()),
dest_mask,
output_size,
d_valid_count.data());
return output_size - d_valid_count.value(stream);
}
size_type concatenate_masks(host_span<column_view const> views,
bitmask_type* dest_mask,
rmm::cuda_stream_view stream)
{
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
return concatenate_masks(d_views, d_offsets, dest_mask, output_size, stream);
}
namespace {
template <typename T, size_type block_size, bool Nullable>
__global__ void fused_concatenate_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_type num_input_views,
mutable_column_device_view output_view,
size_type* out_valid_count)
{
auto const output_size = output_view.size();
auto* output_data = output_view.data<T>();
int64_t output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFFu, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
auto const offset_it = thrust::prev(thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index));
size_type const partition_index = offset_it - input_offsets;
// Copy input data to output
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
auto const* input_data = input_view.data<T>();
output_data[output_index] = input_data[offset_index];
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % detail::warp_size == 0) {
output_view.null_mask()[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
if (Nullable) {
using detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
template <typename T>
std::unique_ptr<column> fused_concatenate(host_span<column_view const> views,
bool const has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using mask_policy = cudf::mask_allocation_policy;
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
CUDF_EXPECTS(output_size <= static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated rows exceeds the column size limit",
std::overflow_error);
// Allocate output
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto out_col = detail::allocate_like(views.front(), output_size, policy, stream, mr);
auto out_view = out_col->mutable_view();
auto d_out_view = mutable_column_device_view::create(out_view, stream);
rmm::device_scalar<size_type> d_valid_count(0, stream);
// Launch kernel
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
auto const kernel = has_nulls ? fused_concatenate_kernel<T, block_size, true>
: fused_concatenate_kernel<T, block_size, false>;
hipLaunchKernelGGL(( kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(),
d_views.data(),
d_offsets.data(),
static_cast<size_type>(d_views.size()),
*d_out_view,
d_valid_count.data());
if (has_nulls) {
out_col->set_null_count(output_size - d_valid_count.value(stream));
} else {
out_col->set_null_count(0); // prevent null count from being materialized
}
return out_col;
}
template <typename T>
std::unique_ptr<column> for_each_concatenate(host_span<column_view const> views,
bool const has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
using mask_policy = cudf::mask_allocation_policy;
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto col = cudf::detail::allocate_like(views.front(), total_element_count, policy, stream, mr);
auto m_view = col->mutable_view();
auto count = 0;
for (auto& v : views) {
thrust::copy(rmm::exec_policy(stream), v.begin<T>(), v.end<T>(), m_view.begin<T>() + count);
count += v.size();
}
// If concatenated column is nullable, proceed to calculate it
if (has_nulls) {
col->set_null_count(
cudf::detail::concatenate_masks(views, (col->mutable_view()).null_mask(), stream));
} else {
col->set_null_count(0); // prevent null count from being materialized
}
return col;
}
struct concatenate_dispatch {
host_span<column_view const> views;
rmm::cuda_stream_view stream;
rmm::mr::device_memory_resource* mr;
// fixed width
template <typename T>
std::unique_ptr<column> operator()()
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](auto const& col) { return col.has_nulls(); });
// Use a heuristic to guess when the fused kernel will be faster
if (use_fused_kernel_heuristic(has_nulls, views.size())) {
return fused_concatenate<T>(views, has_nulls, stream, mr);
} else {
return for_each_concatenate<T>(views, has_nulls, stream, mr);
}
}
};
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::dictionary32>()
{
return cudf::dictionary::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::string_view>()
{
return cudf::strings::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::list_view>()
{
return cudf::lists::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::struct_view>()
{
return cudf::structs::detail::concatenate(views, stream, mr);
}
void bounds_and_type_check(host_span<column_view const> cols, rmm::cuda_stream_view stream);
/**
* @brief Functor for traversing child columns and recursively verifying concatenation
* bounds and types.
*/
class traverse_children {
public:
// nothing to do for simple types.
template <typename T>
void operator()(host_span<column_view const>, rmm::cuda_stream_view)
{
}
private:
// verify length of concatenated offsets.
void check_offsets_size(host_span<column_view const> cols)
{
// offsets. we can't just add up the total sizes of all offset child columns because each one
// has an extra value, regardless of the # of parent rows. So we have to add up the total # of
// rows in the base column and add 1 at the end
size_t const total_offset_count =
std::accumulate(cols.begin(),
cols.end(),
std::size_t{},
[](size_t a, auto const& b) -> size_t { return a + b.size(); }) +
1;
CUDF_EXPECTS(total_offset_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated offsets exceeds the column size limit",
std::overflow_error);
}
};
template <>
void traverse_children::operator()<cudf::string_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// verify offsets
check_offsets_size(cols);
// chars
size_t const total_char_count = std::accumulate(
cols.begin(), cols.end(), std::size_t{}, [stream](size_t a, auto const& b) -> size_t {
strings_column_view scv(b);
return a + (scv.is_empty() ? 0
// if the column is unsliced, skip the offset retrieval.
: scv.offset() > 0
? cudf::detail::get_value<offset_type>(
scv.offsets(), scv.offset() + scv.size(), stream) -
cudf::detail::get_value<offset_type>(scv.offsets(), scv.offset(), stream)
// if the offset() is 0, it can still be sliced to a shorter length. in this case
// we only need to read a single offset. otherwise just return the full length
// (chars_size())
: scv.size() + 1 == scv.offsets().size()
? scv.chars_size()
: cudf::detail::get_value<offset_type>(scv.offsets(), scv.size(), stream));
});
CUDF_EXPECTS(total_char_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated chars exceeds the column size limit",
std::overflow_error);
}
template <>
void traverse_children::operator()<cudf::struct_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// march each child
auto child_iter = thrust::make_counting_iterator(0);
auto const num_children = cols.front().num_children();
std::vector<column_view> nth_children;
nth_children.reserve(cols.size());
std::for_each(child_iter, child_iter + num_children, [&](auto child_index) {
std::transform(cols.begin(),
cols.end(),
std::back_inserter(nth_children),
[child_index, stream](column_view const& col) {
structs_column_view scv(col);
return scv.get_sliced_child(child_index, stream);
});
bounds_and_type_check(nth_children, stream);
nth_children.clear();
});
}
template <>
void traverse_children::operator()<cudf::list_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// verify offsets
check_offsets_size(cols);
// recurse into the child columns
std::vector<column_view> nth_children;
nth_children.reserve(cols.size());
std::transform(
cols.begin(), cols.end(), std::back_inserter(nth_children), [stream](column_view const& col) {
lists_column_view lcv(col);
return lcv.get_sliced_child(stream);
});
bounds_and_type_check(nth_children, stream);
}
/**
* @brief Verifies that the sum of the sizes of all the columns to be concatenated
* will not exceed the max value of size_type, and verifies all column types match
*
* @param columns_to_concat Span of columns to check
*
* @throws cudf::logic_error if the total length of the concatenated columns would
* exceed the max value of size_type
*
* @throws cudf::logic_error if all of the input column types don't match
*/
void bounds_and_type_check(host_span<column_view const> cols, rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(std::all_of(cols.begin(),
cols.end(),
[expected_type = cols.front().type()](auto const& c) {
return c.type() == expected_type;
}),
"Type mismatch in columns to concatenate.");
// total size of all concatenated rows
size_t const total_row_count =
std::accumulate(cols.begin(), cols.end(), std::size_t{}, [](size_t a, auto const& b) {
return a + static_cast<size_t>(b.size());
});
CUDF_EXPECTS(total_row_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated rows exceeds the column size limit",
std::overflow_error);
// traverse children
cudf::type_dispatcher(cols.front().type(), traverse_children{}, cols, stream);
}
} // anonymous namespace
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(not columns_to_concat.empty(), "Unexpected empty list of columns to concatenate.");
// verify all types match and that we won't overflow size_type in output size
bounds_and_type_check(columns_to_concat, stream);
if (std::all_of(columns_to_concat.begin(), columns_to_concat.end(), [](column_view const& c) {
return c.is_empty();
})) {
return empty_like(columns_to_concat.front());
}
return type_dispatcher<dispatch_storage_type>(
columns_to_concat.front().type(), concatenate_dispatch{columns_to_concat, stream, mr});
}
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (tables_to_concat.empty()) { return std::make_unique<table>(); }
table_view const first_table = tables_to_concat.front();
CUDF_EXPECTS(std::all_of(tables_to_concat.begin(),
tables_to_concat.end(),
[&first_table](auto const& t) {
return t.num_columns() == first_table.num_columns();
}),
"Mismatch in table columns to concatenate.");
std::vector<std::unique_ptr<column>> concat_columns;
for (size_type i = 0; i < first_table.num_columns(); ++i) {
std::vector<column_view> cols;
std::transform(tables_to_concat.begin(),
tables_to_concat.end(),
std::back_inserter(cols),
[i](auto const& t) { return t.column(i); });
// verify all types match and that we won't overflow size_type in output size
bounds_and_type_check(cols, stream);
concat_columns.emplace_back(detail::concatenate(cols, stream, mr));
}
return std::make_unique<table>(std::move(concat_columns));
}
rmm::device_buffer concatenate_masks(host_span<column_view const> views,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); });
if (has_nulls) {
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
rmm::device_buffer null_mask =
create_null_mask(total_element_count, mask_state::UNINITIALIZED, mr);
detail::concatenate_masks(views, static_cast<bitmask_type*>(null_mask.data()), stream);
return null_mask;
}
// no nulls, so return an empty device buffer
return rmm::device_buffer{0, stream, mr};
}
} // namespace detail
rmm::device_buffer concatenate_masks(host_span<column_view const> views,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate_masks(views, cudf::get_default_stream(), mr);
}
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(columns_to_concat, cudf::get_default_stream(), mr);
}
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(tables_to_concat, cudf::get_default_stream(), mr);
}
} // namespace cudf
| af30222420b17d9e449507bcd957e16e2863066d.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/detail/concatenate_masks.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <cudf/dictionary/detail/concatenate.hpp>
#include <cudf/lists/detail/concatenate.hpp>
#include <cudf/strings/detail/concatenate.hpp>
#include <cudf/structs/detail/concatenate.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/utilities/default_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/advance.h>
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform_scan.h>
#include <algorithm>
#include <numeric>
#include <utility>
namespace cudf {
namespace detail {
namespace {
// From benchmark data, the fused kernel optimization appears to perform better
// when there are more than a trivial number of columns, or when the null mask
// can also be computed at the same time
constexpr bool use_fused_kernel_heuristic(bool const has_nulls, size_t const num_columns)
{
return has_nulls || num_columns > 4;
}
auto create_device_views(host_span<column_view const> views, rmm::cuda_stream_view stream)
{
// Create device views for each input view
using CDViewPtr = decltype(column_device_view::create(std::declval<column_view>(),
std::declval<rmm::cuda_stream_view>()));
auto device_view_owners = std::vector<CDViewPtr>(views.size());
std::transform(views.begin(), views.end(), device_view_owners.begin(), [stream](auto const& col) {
return column_device_view::create(col, stream);
});
// Assemble contiguous array of device views
auto device_views = thrust::host_vector<column_device_view>();
device_views.reserve(views.size());
std::transform(device_view_owners.cbegin(),
device_view_owners.cend(),
std::back_inserter(device_views),
[](auto const& col) { return *col; });
auto d_views =
make_device_uvector_async(device_views, stream, rmm::mr::get_current_device_resource());
// Compute the partition offsets
auto offsets = thrust::host_vector<size_t>(views.size() + 1);
thrust::transform_inclusive_scan(
thrust::host,
device_views.cbegin(),
device_views.cend(),
std::next(offsets.begin()),
[](auto const& col) { return col.size(); },
thrust::plus{});
auto d_offsets =
make_device_uvector_async(offsets, stream, rmm::mr::get_current_device_resource());
auto const output_size = offsets.back();
return std::make_tuple(
std::move(device_view_owners), std::move(d_views), std::move(d_offsets), output_size);
}
/**
* @brief Concatenates the null mask bits of all the column device views in the
* `views` array to the destination bitmask.
*
* @tparam block_size Block size for using with single_lane_block_sum_reduce
*
* @param views Array of column_device_view
* @param output_offsets Prefix sum of sizes of elements of `views`
* @param number_of_views Size of `views` array
* @param dest_mask The output buffer to copy null masks into
* @param number_of_mask_bits The total number of null masks bits that are being copied
* @param out_valid_count To hold the total number of valid bits set
*/
template <size_type block_size>
__global__ void concatenate_masks_kernel(column_device_view const* views,
size_t const* output_offsets,
size_type number_of_views,
bitmask_type* dest_mask,
size_type number_of_mask_bits,
size_type* out_valid_count)
{
size_type mask_index = threadIdx.x + blockIdx.x * blockDim.x;
auto active_mask = __ballot_sync(0xFFFF'FFFFu, mask_index < number_of_mask_bits);
size_type warp_valid_count = 0;
while (mask_index < number_of_mask_bits) {
size_type const source_view_index =
thrust::upper_bound(
thrust::seq, output_offsets, output_offsets + number_of_views, mask_index) -
output_offsets - 1;
bool bit_is_set = true;
if (source_view_index < number_of_views) {
size_type const column_element_index = mask_index - output_offsets[source_view_index];
bit_is_set = views[source_view_index].is_valid(column_element_index);
}
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
if (threadIdx.x % detail::warp_size == 0) {
dest_mask[word_index(mask_index)] = new_word;
warp_valid_count += __popc(new_word);
}
mask_index += blockDim.x * gridDim.x;
active_mask = __ballot_sync(active_mask, mask_index < number_of_mask_bits);
}
using detail::single_lane_block_sum_reduce;
auto const block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
} // namespace
size_type concatenate_masks(device_span<column_device_view const> d_views,
device_span<size_t const> d_offsets,
bitmask_type* dest_mask,
size_type output_size,
rmm::cuda_stream_view stream)
{
rmm::device_scalar<size_type> d_valid_count(0, stream);
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
concatenate_masks_kernel<block_size>
<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
d_views.data(),
d_offsets.data(),
static_cast<size_type>(d_views.size()),
dest_mask,
output_size,
d_valid_count.data());
return output_size - d_valid_count.value(stream);
}
size_type concatenate_masks(host_span<column_view const> views,
bitmask_type* dest_mask,
rmm::cuda_stream_view stream)
{
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
return concatenate_masks(d_views, d_offsets, dest_mask, output_size, stream);
}
namespace {
template <typename T, size_type block_size, bool Nullable>
__global__ void fused_concatenate_kernel(column_device_view const* input_views,
size_t const* input_offsets,
size_type num_input_views,
mutable_column_device_view output_view,
size_type* out_valid_count)
{
auto const output_size = output_view.size();
auto* output_data = output_view.data<T>();
int64_t output_index = threadIdx.x + blockIdx.x * blockDim.x;
size_type warp_valid_count = 0;
unsigned active_mask;
if (Nullable) { active_mask = __ballot_sync(0xFFFF'FFFFu, output_index < output_size); }
while (output_index < output_size) {
// Lookup input index by searching for output index in offsets
auto const offset_it = thrust::prev(thrust::upper_bound(
thrust::seq, input_offsets, input_offsets + num_input_views, output_index));
size_type const partition_index = offset_it - input_offsets;
// Copy input data to output
auto const offset_index = output_index - *offset_it;
auto const& input_view = input_views[partition_index];
auto const* input_data = input_view.data<T>();
output_data[output_index] = input_data[offset_index];
if (Nullable) {
bool const bit_is_set = input_view.is_valid(offset_index);
bitmask_type const new_word = __ballot_sync(active_mask, bit_is_set);
// First thread writes bitmask word
if (threadIdx.x % detail::warp_size == 0) {
output_view.null_mask()[word_index(output_index)] = new_word;
}
warp_valid_count += __popc(new_word);
}
output_index += blockDim.x * gridDim.x;
if (Nullable) { active_mask = __ballot_sync(active_mask, output_index < output_size); }
}
if (Nullable) {
using detail::single_lane_block_sum_reduce;
auto block_valid_count = single_lane_block_sum_reduce<block_size, 0>(warp_valid_count);
if (threadIdx.x == 0) { atomicAdd(out_valid_count, block_valid_count); }
}
}
template <typename T>
std::unique_ptr<column> fused_concatenate(host_span<column_view const> views,
bool const has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
using mask_policy = cudf::mask_allocation_policy;
// Preprocess and upload inputs to device memory
auto const device_views = create_device_views(views, stream);
auto const& d_views = std::get<1>(device_views);
auto const& d_offsets = std::get<2>(device_views);
auto const output_size = std::get<3>(device_views);
CUDF_EXPECTS(output_size <= static_cast<std::size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated rows exceeds the column size limit",
std::overflow_error);
// Allocate output
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto out_col = detail::allocate_like(views.front(), output_size, policy, stream, mr);
auto out_view = out_col->mutable_view();
auto d_out_view = mutable_column_device_view::create(out_view, stream);
rmm::device_scalar<size_type> d_valid_count(0, stream);
// Launch kernel
constexpr size_type block_size{256};
cudf::detail::grid_1d config(output_size, block_size);
auto const kernel = has_nulls ? fused_concatenate_kernel<T, block_size, true>
: fused_concatenate_kernel<T, block_size, false>;
kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>(
d_views.data(),
d_offsets.data(),
static_cast<size_type>(d_views.size()),
*d_out_view,
d_valid_count.data());
if (has_nulls) {
out_col->set_null_count(output_size - d_valid_count.value(stream));
} else {
out_col->set_null_count(0); // prevent null count from being materialized
}
return out_col;
}
template <typename T>
std::unique_ptr<column> for_each_concatenate(host_span<column_view const> views,
bool const has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
using mask_policy = cudf::mask_allocation_policy;
auto const policy = has_nulls ? mask_policy::ALWAYS : mask_policy::NEVER;
auto col = cudf::detail::allocate_like(views.front(), total_element_count, policy, stream, mr);
auto m_view = col->mutable_view();
auto count = 0;
for (auto& v : views) {
thrust::copy(rmm::exec_policy(stream), v.begin<T>(), v.end<T>(), m_view.begin<T>() + count);
count += v.size();
}
// If concatenated column is nullable, proceed to calculate it
if (has_nulls) {
col->set_null_count(
cudf::detail::concatenate_masks(views, (col->mutable_view()).null_mask(), stream));
} else {
col->set_null_count(0); // prevent null count from being materialized
}
return col;
}
struct concatenate_dispatch {
host_span<column_view const> views;
rmm::cuda_stream_view stream;
rmm::mr::device_memory_resource* mr;
// fixed width
template <typename T>
std::unique_ptr<column> operator()()
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](auto const& col) { return col.has_nulls(); });
// Use a heuristic to guess when the fused kernel will be faster
if (use_fused_kernel_heuristic(has_nulls, views.size())) {
return fused_concatenate<T>(views, has_nulls, stream, mr);
} else {
return for_each_concatenate<T>(views, has_nulls, stream, mr);
}
}
};
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::dictionary32>()
{
return cudf::dictionary::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::string_view>()
{
return cudf::strings::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::list_view>()
{
return cudf::lists::detail::concatenate(views, stream, mr);
}
template <>
std::unique_ptr<column> concatenate_dispatch::operator()<cudf::struct_view>()
{
return cudf::structs::detail::concatenate(views, stream, mr);
}
void bounds_and_type_check(host_span<column_view const> cols, rmm::cuda_stream_view stream);
/**
* @brief Functor for traversing child columns and recursively verifying concatenation
* bounds and types.
*/
class traverse_children {
public:
// nothing to do for simple types.
template <typename T>
void operator()(host_span<column_view const>, rmm::cuda_stream_view)
{
}
private:
// verify length of concatenated offsets.
void check_offsets_size(host_span<column_view const> cols)
{
// offsets. we can't just add up the total sizes of all offset child columns because each one
// has an extra value, regardless of the # of parent rows. So we have to add up the total # of
// rows in the base column and add 1 at the end
size_t const total_offset_count =
std::accumulate(cols.begin(),
cols.end(),
std::size_t{},
[](size_t a, auto const& b) -> size_t { return a + b.size(); }) +
1;
CUDF_EXPECTS(total_offset_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated offsets exceeds the column size limit",
std::overflow_error);
}
};
template <>
void traverse_children::operator()<cudf::string_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// verify offsets
check_offsets_size(cols);
// chars
size_t const total_char_count = std::accumulate(
cols.begin(), cols.end(), std::size_t{}, [stream](size_t a, auto const& b) -> size_t {
strings_column_view scv(b);
return a + (scv.is_empty() ? 0
// if the column is unsliced, skip the offset retrieval.
: scv.offset() > 0
? cudf::detail::get_value<offset_type>(
scv.offsets(), scv.offset() + scv.size(), stream) -
cudf::detail::get_value<offset_type>(scv.offsets(), scv.offset(), stream)
// if the offset() is 0, it can still be sliced to a shorter length. in this case
// we only need to read a single offset. otherwise just return the full length
// (chars_size())
: scv.size() + 1 == scv.offsets().size()
? scv.chars_size()
: cudf::detail::get_value<offset_type>(scv.offsets(), scv.size(), stream));
});
CUDF_EXPECTS(total_char_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated chars exceeds the column size limit",
std::overflow_error);
}
template <>
void traverse_children::operator()<cudf::struct_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// march each child
auto child_iter = thrust::make_counting_iterator(0);
auto const num_children = cols.front().num_children();
std::vector<column_view> nth_children;
nth_children.reserve(cols.size());
std::for_each(child_iter, child_iter + num_children, [&](auto child_index) {
std::transform(cols.begin(),
cols.end(),
std::back_inserter(nth_children),
[child_index, stream](column_view const& col) {
structs_column_view scv(col);
return scv.get_sliced_child(child_index, stream);
});
bounds_and_type_check(nth_children, stream);
nth_children.clear();
});
}
template <>
void traverse_children::operator()<cudf::list_view>(host_span<column_view const> cols,
rmm::cuda_stream_view stream)
{
// verify offsets
check_offsets_size(cols);
// recurse into the child columns
std::vector<column_view> nth_children;
nth_children.reserve(cols.size());
std::transform(
cols.begin(), cols.end(), std::back_inserter(nth_children), [stream](column_view const& col) {
lists_column_view lcv(col);
return lcv.get_sliced_child(stream);
});
bounds_and_type_check(nth_children, stream);
}
/**
* @brief Verifies that the sum of the sizes of all the columns to be concatenated
* will not exceed the max value of size_type, and verifies all column types match
*
* @param columns_to_concat Span of columns to check
*
* @throws cudf::logic_error if the total length of the concatenated columns would
* exceed the max value of size_type
*
* @throws cudf::logic_error if all of the input column types don't match
*/
void bounds_and_type_check(host_span<column_view const> cols, rmm::cuda_stream_view stream)
{
CUDF_EXPECTS(std::all_of(cols.begin(),
cols.end(),
[expected_type = cols.front().type()](auto const& c) {
return c.type() == expected_type;
}),
"Type mismatch in columns to concatenate.");
// total size of all concatenated rows
size_t const total_row_count =
std::accumulate(cols.begin(), cols.end(), std::size_t{}, [](size_t a, auto const& b) {
return a + static_cast<size_t>(b.size());
});
CUDF_EXPECTS(total_row_count <= static_cast<size_t>(std::numeric_limits<size_type>::max()),
"Total number of concatenated rows exceeds the column size limit",
std::overflow_error);
// traverse children
cudf::type_dispatcher(cols.front().type(), traverse_children{}, cols, stream);
}
} // anonymous namespace
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(not columns_to_concat.empty(), "Unexpected empty list of columns to concatenate.");
// verify all types match and that we won't overflow size_type in output size
bounds_and_type_check(columns_to_concat, stream);
if (std::all_of(columns_to_concat.begin(), columns_to_concat.end(), [](column_view const& c) {
return c.is_empty();
})) {
return empty_like(columns_to_concat.front());
}
return type_dispatcher<dispatch_storage_type>(
columns_to_concat.front().type(), concatenate_dispatch{columns_to_concat, stream, mr});
}
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (tables_to_concat.empty()) { return std::make_unique<table>(); }
table_view const first_table = tables_to_concat.front();
CUDF_EXPECTS(std::all_of(tables_to_concat.begin(),
tables_to_concat.end(),
[&first_table](auto const& t) {
return t.num_columns() == first_table.num_columns();
}),
"Mismatch in table columns to concatenate.");
std::vector<std::unique_ptr<column>> concat_columns;
for (size_type i = 0; i < first_table.num_columns(); ++i) {
std::vector<column_view> cols;
std::transform(tables_to_concat.begin(),
tables_to_concat.end(),
std::back_inserter(cols),
[i](auto const& t) { return t.column(i); });
// verify all types match and that we won't overflow size_type in output size
bounds_and_type_check(cols, stream);
concat_columns.emplace_back(detail::concatenate(cols, stream, mr));
}
return std::make_unique<table>(std::move(concat_columns));
}
rmm::device_buffer concatenate_masks(host_span<column_view const> views,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
bool const has_nulls =
std::any_of(views.begin(), views.end(), [](const column_view col) { return col.has_nulls(); });
if (has_nulls) {
size_type const total_element_count =
std::accumulate(views.begin(), views.end(), 0, [](auto accumulator, auto const& v) {
return accumulator + v.size();
});
rmm::device_buffer null_mask =
create_null_mask(total_element_count, mask_state::UNINITIALIZED, mr);
detail::concatenate_masks(views, static_cast<bitmask_type*>(null_mask.data()), stream);
return null_mask;
}
// no nulls, so return an empty device buffer
return rmm::device_buffer{0, stream, mr};
}
} // namespace detail
rmm::device_buffer concatenate_masks(host_span<column_view const> views,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate_masks(views, cudf::get_default_stream(), mr);
}
// Concatenates the elements from a vector of column_views
std::unique_ptr<column> concatenate(host_span<column_view const> columns_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(columns_to_concat, cudf::get_default_stream(), mr);
}
std::unique_ptr<table> concatenate(host_span<table_view const> tables_to_concat,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::concatenate(tables_to_concat, cudf::get_default_stream(), mr);
}
} // namespace cudf
|
249bddeb562ff99daa7c0336e937f99c4aaa9f12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N 8
#define THREAD_PER_BLOCK 2
__global__ void transpose(int * in, int * out, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
out[index] = in[(index / size) + size * (index % size)];
}
int main()
{
int * in, * out;
int * d_in, * d_out;
int size = N * N * sizeof(int);
int i;
hipMalloc((void**)&d_in, size);
hipMalloc((void**)&d_out, size);
in = (int *)malloc(size);
out = (int *)malloc(size);
for(i = 0; i<N*N; ++i)
{
in[i] = i;
}
hipMemcpy(d_in, in, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( transpose), dim3(N*N/THREAD_PER_BLOCK), dim3(THREAD_PER_BLOCK) , 0, 0, d_in, d_out, N);
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost);
for(i=0; i<N*N; ++i)
{
printf("%2d ", in[i]);
if((i+1)%N == 0) {
printf("\n");
}
}
printf("--------\n");
for(i=0; i<N*N; ++i)
{
printf("%2d ", out[i]);
if((i+1)%N == 0) {
printf("\n");
}
}
free(in); free(out);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 249bddeb562ff99daa7c0336e937f99c4aaa9f12.cu | #include <stdio.h>
#include <math.h>
#define N 8
#define THREAD_PER_BLOCK 2
__global__ void transpose(int * in, int * out, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
out[index] = in[(index / size) + size * (index % size)];
}
int main()
{
int * in, * out;
int * d_in, * d_out;
int size = N * N * sizeof(int);
int i;
cudaMalloc((void**)&d_in, size);
cudaMalloc((void**)&d_out, size);
in = (int *)malloc(size);
out = (int *)malloc(size);
for(i = 0; i<N*N; ++i)
{
in[i] = i;
}
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
transpose<<< N*N/THREAD_PER_BLOCK, THREAD_PER_BLOCK >>>(d_in, d_out, N);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
for(i=0; i<N*N; ++i)
{
printf("%2d ", in[i]);
if((i+1)%N == 0) {
printf("\n");
}
}
printf("--------\n");
for(i=0; i<N*N; ++i)
{
printf("%2d ", out[i]);
if((i+1)%N == 0) {
printf("\n");
}
}
free(in); free(out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
7302ebe3c1e9e4dedba0ecc609f21e0f1f601b08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "instrumentation.h"
// Stencil Code Kernel for the heat calculation
__global__ void simpleStencil_Kernel(int size, float *grid_old,
float *grid_new) {
const float gamma = 0.24;
float tmp_val;
for (int Id = blockIdx.x * blockDim.x + threadIdx.x;
Id < (size - 2) * (size - 2); Id += blockDim.x + gridDim.x) {
int index = Id + 1 + size +
((Id) / (size - 2)) * 2; // + 1 + size um vom rand wegzukommen
tmp_val = grid_old[index] +
gamma * (-4 * grid_old[index] + grid_old[index + 1] +
grid_old[index - 1] + grid_old[index + size] +
grid_old[index - size]);
if (tmp_val > 127)
tmp_val = 127.0;
if (tmp_val < 0)
tmp_val = 0.0;
grid_new[index] = tmp_val;
}
}
void simpleStencil_Kernel_Wrapper(int gridSize, int blockSize, int size,
float *grid_old, float *grid_new) {
dim3 grid_dim = dim3(gridSize);
dim3 block_dim = dim3(blockSize);
hipLaunchKernelGGL(( simpleStencil_Kernel), dim3(grid_dim), dim3(block_dim), 0, 0, size, grid_old, grid_new);
}
// optimized Stencil Code Kernel for the heat calculation
__global__ void optStencil_Kernel(int size, float *grid_old, float *grid_new) {
const float gamma = 0.24;
extern __shared__ float shm[];
float tmp_val;
float curr_old[4];
int rot_index_e;
int rot_index_c;
int rot_index_w;
int Idy = blockIdx.x * blockDim.x + threadIdx.x - blockIdx.x * 2;
if (Idy < size - 1) {
for (int x = 0; x < size; x++) {
rot_index_e = x % 4;
rot_index_c = (x + 3) % 4;
rot_index_w = (x + 2) % 4;
curr_old[rot_index_e] = grid_old[x + Idy * size];
shm[(rot_index_e)*blockDim.x + threadIdx.x] = curr_old[rot_index_e];
__syncthreads();
if (x > 1 && threadIdx.x != 0 && threadIdx.x != (blockDim.x - 1)) {
tmp_val =
curr_old[rot_index_c] +
gamma * (-4 * curr_old[rot_index_c] +
shm[rot_index_e * blockDim.x + threadIdx.x] + // east
shm[rot_index_w * blockDim.x + threadIdx.x] + // west
shm[rot_index_c * blockDim.x + threadIdx.x - 1] + // north
shm[rot_index_c * blockDim.x + threadIdx.x + 1] // south
);
if (tmp_val > 127)
tmp_val = 127.0;
if (tmp_val < 0)
tmp_val = 0.0;
grid_new[(x - 1) + Idy * size] = tmp_val;
}
__syncthreads();
}
}
}
void optStencil_Kernel_Wrapper(int gridSize, int blockSize, int shm_size,
int size, float *grid_old, float *grid_new) {
dim3 grid_dim = dim3(gridSize);
dim3 block_dim = dim3(blockSize);
hipLaunchKernelGGL(( optStencil_Kernel), dim3(grid_dim), dim3(block_dim), shm_size, 0, size, grid_old,
grid_new);
}
| 7302ebe3c1e9e4dedba0ecc609f21e0f1f601b08.cu | #include "instrumentation.h"
// Stencil Code Kernel for the heat calculation
__global__ void simpleStencil_Kernel(int size, float *grid_old,
float *grid_new) {
const float gamma = 0.24;
float tmp_val;
for (int Id = blockIdx.x * blockDim.x + threadIdx.x;
Id < (size - 2) * (size - 2); Id += blockDim.x + gridDim.x) {
int index = Id + 1 + size +
((Id) / (size - 2)) * 2; // + 1 + size um vom rand wegzukommen
tmp_val = grid_old[index] +
gamma * (-4 * grid_old[index] + grid_old[index + 1] +
grid_old[index - 1] + grid_old[index + size] +
grid_old[index - size]);
if (tmp_val > 127)
tmp_val = 127.0;
if (tmp_val < 0)
tmp_val = 0.0;
grid_new[index] = tmp_val;
}
}
void simpleStencil_Kernel_Wrapper(int gridSize, int blockSize, int size,
float *grid_old, float *grid_new) {
dim3 grid_dim = dim3(gridSize);
dim3 block_dim = dim3(blockSize);
simpleStencil_Kernel<<<grid_dim, block_dim>>>(size, grid_old, grid_new);
}
// optimized Stencil Code Kernel for the heat calculation
__global__ void optStencil_Kernel(int size, float *grid_old, float *grid_new) {
const float gamma = 0.24;
extern __shared__ float shm[];
float tmp_val;
float curr_old[4];
int rot_index_e;
int rot_index_c;
int rot_index_w;
int Idy = blockIdx.x * blockDim.x + threadIdx.x - blockIdx.x * 2;
if (Idy < size - 1) {
for (int x = 0; x < size; x++) {
rot_index_e = x % 4;
rot_index_c = (x + 3) % 4;
rot_index_w = (x + 2) % 4;
curr_old[rot_index_e] = grid_old[x + Idy * size];
shm[(rot_index_e)*blockDim.x + threadIdx.x] = curr_old[rot_index_e];
__syncthreads();
if (x > 1 && threadIdx.x != 0 && threadIdx.x != (blockDim.x - 1)) {
tmp_val =
curr_old[rot_index_c] +
gamma * (-4 * curr_old[rot_index_c] +
shm[rot_index_e * blockDim.x + threadIdx.x] + // east
shm[rot_index_w * blockDim.x + threadIdx.x] + // west
shm[rot_index_c * blockDim.x + threadIdx.x - 1] + // north
shm[rot_index_c * blockDim.x + threadIdx.x + 1] // south
);
if (tmp_val > 127)
tmp_val = 127.0;
if (tmp_val < 0)
tmp_val = 0.0;
grid_new[(x - 1) + Idy * size] = tmp_val;
}
__syncthreads();
}
}
}
void optStencil_Kernel_Wrapper(int gridSize, int blockSize, int shm_size,
int size, float *grid_old, float *grid_new) {
dim3 grid_dim = dim3(gridSize);
dim3 block_dim = dim3(blockSize);
optStencil_Kernel<<<grid_dim, block_dim, shm_size>>>(size, grid_old,
grid_new);
}
|
5250910456e9636703615b7f13dec66ef3b11b59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//
// //////////////////////////////////////////////////////////
// // //
// // hybridMANTIS v1.0 //
// // fastDETECT2 - CUDA code //
// // (optical photons transport) //
// // //
// //////////////////////////////////////////////////////////
//
//
//
//
//
//
// ****Disclaimer****
// This software and documentation (the "Software") were developed at the Food and Drug Administration (FDA) by employees of the Federal Government in
// the course of their official duties. Pursuant to Title 17, Section 105 of the United States Code, this work is not subject to copyright protection
// and is in the public domain. Permission is hereby granted, free of charge, to any person obtaining a copy of the Software, to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, or sell copies of the
// Software or derivatives, and to permit persons to whom the Software is furnished to do so. FDA assumes no responsibility whatsoever for use by other
// parties of the Software, its source code, documentation or compiled executables, and makes no guarantees, expressed or implied, about its quality,
// reliability, or any other characteristic. Further, use of this code in no way implies endorsement by the FDA or confers any advantage in regulatory
// decisions. Although this software can be redistributed and/or modified freely, we ask that any derivative works bear some notice that they are
// derived from it, and any modified versions bear some notice that they have been modified.
//
//
// Associated publication: Sharma Diksha, Badal Andreu and Badano Aldo, "hybridMANTIS: a CPU-GPU Monte Carlo method for modeling indirect x-ray detectors with
// columnar scintillators". Physics in Medicine and Biology, 57(8), pp. 23572372 (2012)
//
//
// File: hybridMANTIS_cuda_ver1_0.cu
// Author: Diksha Sharma (US Food and Drug Administration)
// Email: diksha.sharma@fda.hhs.gov
// Last updated: Apr 18, 2012
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////
//
// Header libraries
//
/////////////////////////////////////////
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#ifdef USING_CUDA
#include <cutil_inline.h>
#include <hip/hip_vector_types.h>
#include <stdint.h>
#endif
/////////////////////////////////////////
//
// Global variables
//
/////////////////////////////////////////
#define max_photon_per_EDE 900000 // maximum number of optical photons that can be generated per energy deposition event (EDE)
#ifdef USING_CUDA
#define gpubufsize 2304000 // GPU buffer size: # of events sent to the GPU
#endif
/////////////////////////////////////////
//
// Include kernel program
//
/////////////////////////////////////////
#include "kernel_cuda_c_ver1_0.cu"
/////////////////////////////////////////
//
// CUDA parameters
//
/////////////////////////////////////////
#ifdef USING_CUDA
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
#define GRIDSIZE 18000 // number of blocks
#define BLOCKSIZE 128 // number of threads
#endif
////////////////////////////////////////////////////////////////////////////
// MAIN PROGRAM //
////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// gpuoptical(): Performs optical transport using GPU
// Input arguments: penctr, myfactGPU
//
// penctr - flag to indicate how optical transport will be run on GPU.
// a value of '99' : calling gpuoptical() first time to initialize the GPU and allocate memories and reset counters.
// a value of '100': calling optical transport kernel
// a value of '101': calling gpuoptical() last time; running optical transport for remaining buffer; copying data from device to host and getting output images.
// myfactGPU - buffer size to be sent to GPU after load balancing
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" void gpuoptical_(int *penctr, int *myfactGPU)
{
// command line arguments
float xdetector, ydetector, radius, height, n_C, n_IC, top_absfrac, bulk_abscoeff, beta, d_min, lbound_x, lbound_y, ubound_x, ubound_y, d_max, yield, sensorRefl;
int pixelsize, num_primary, min_optphotons, max_optphotons, num_bins;
// CUDA variables
unsigned int hTimer = 0; // timer
static float totalgpuTime = 0.0f; // total time taken
dim3 threads, blocks; // threads and blocks
int devID; // GPU device ID
// GNU scientific library (gsl) variables
const gsl_rng_type * Tgsl;
gsl_rng * rgsl;
double mu_gsl;
// Host (CPU) counters
unsigned long long int host_num_generated = 0; // total # of photons generated for all the x-ray histories (across all threads)
unsigned long long int host_num_detect = 0; // total # of photons detected at the sensor plane of a column
unsigned long long int host_num_lost = 0; // total # of photons lost when exiting out of the detector boundaries in x/y direction
unsigned long long int host_num_abs_top = 0; // total # of photons absorbed at the top surface of the detector
unsigned long long int host_num_abs_bulk = 0; // total # of photons absorbed in the bulk of the detector
unsigned long long int host_num_outofcol = 0; // total # of photons killed because they moved out of current column when reflected (due to precision errors)
unsigned long long int host_num_theta1 = 0; // total # of photons killed if incidence angle > 1.57 or < 0 radian (after resampling max 100 times)
// create 2D array for storing output PRF image
int xdim = 0;
int ydim = 0;
int indexi=0, indexj=0;
int my_index=0;
size_t pitch; // pitch used for storing 2D image array in GPU memory
int nbytes = (*myfactGPU)*sizeof(struct start_info); // total number of bytes for storing interaction events buffer information
// allocate memory pointers
unsigned long long int *myimage = 0; // device memory for output image
int *num_detected_primary = 0; // device memory for # detected photons/primary
struct start_info *h_a = 0; // pointer to the struct info data in the host memory
struct start_info *d_a = 0; // pointers to struct data in the device memory
// copy to local variables from PENELOPE common block
xdetector = inputargs_.detx; // x dimension of detector (in um). x in (0,xdetector)
ydetector = inputargs_.dety; // y dimension of detector (in um). y in (0,ydetector)
height = inputargs_.detheight; // height of column and thickness of detector (in um). z in range (-H/2, H/2)
radius = inputargs_.detradius; // radius of column (in um).
n_C = inputargs_.detnC; // refractive index of columns
n_IC = inputargs_.detnIC; // refractive index of intercolumnar material
top_absfrac = inputargs_.dettop; // column's top surface absorption fraction (0,1)
bulk_abscoeff = inputargs_.detbulk; // column's bulk absorption coefficient (in um^-1)
beta = inputargs_.detbeta; // roughness coefficient of column walls (0,0.5)
d_min = inputargs_.detdmin; // minimum distance a photon can travel when transmitted from a column
d_max = inputargs_.detdmax; // maximum distance a photon can travel when transmitted from a column
lbound_x = inputargs_.detlboundx; // x lower bound of region of interest of output PRF image (in um)
lbound_y = inputargs_.detlboundy; // y lower bound (in um)
ubound_x = inputargs_.detuboundx; // x upper bound (in um)
ubound_y = inputargs_.detuboundy; // y upper bound (in um)
yield = inputargs_.detyield; // light yield (/eV)
pixelsize = inputargs_.detpixel; // 1 pixel = 'pixelsize' microns (in um)
sensorRefl = inputargs_.detsensorRefl; // Non-Ideal sensor reflectivity (%) (0,100)
num_primary = inputargs_.mynumhist; // total number of primaries to be simulated
min_optphotons = inputargs_.minphotons; // minimum number of optical photons detected to be included in PHS
max_optphotons = inputargs_.maxphotons; // maximum number of optical photons detected to be included in PHS
num_bins = inputargs_.mynumbins; // number of bins for genrating PHS
// set the device with max GFlops
devID = cutGetMaxGflopsDeviceId();
hipSetDevice( devID );
// create a generator chosen by the
// environment variable GSL_RNG_TYPE
gsl_rng_env_setup();
Tgsl = gsl_rng_default;
rgsl = gsl_rng_alloc (Tgsl);
// dimensions of PRF image
xdim = ceil((ubound_x - lbound_x)/pixelsize);
ydim = ceil((ubound_y - lbound_y)/pixelsize);
if(*penctr == 99) // initialize GPU; allocate and initialize memories
{
// allocate device memory for storing output arrays
hipMallocPitch((void**)&myimage, &pitch, xdim*sizeof(unsigned long long int), ydim); // allocate 2D image array (PRF)
cutilSafeCall( hipMemset2D(myimage, pitch, 0, xdim*sizeof(unsigned long long int), ydim) ); // initialize to 0
cutilSafeCall( hipMalloc((void**)&num_detected_primary, sizeof(int)*num_primary) ); // create 1D array for outputting # detected/primary
cutilSafeCall( hipMemset(num_detected_primary, 0, sizeof(int)*num_primary) ); // initialize to 0
// allocate host and device memory for transferring buffer information
cutilSafeCall( hipHostMalloc((void**)&h_a, nbytes) );
cutilSafeCall( hipMalloc((void**)&d_a, nbytes) );
// copy address of memory pointers to PENELOPE variables. These variables are used later to point to device and host memories without re-initializing the GPU.
gpumemaddr_.gpuimage = (unsigned long long int)myimage;
gpumemaddr_.gpudetect = (unsigned long long int)num_detected_primary;
gpumemaddr_.hosta = (unsigned long long int)h_a;
gpumemaddr_.deva = (unsigned long long int)d_a;
gpumemaddr_.devpitch = (unsigned long long int)pitch;
// reset the host counters
host_num_generated=0;
host_num_detect=0;
host_num_abs_top=0;
host_num_abs_bulk=0;
host_num_lost=0;
host_num_outofcol=0;
host_num_theta1=0;
// reset device counters to zero
cutilSafeCall(hipMemcpyToSymbol("num_detect",&host_num_detect,sizeof(unsigned long long int)*1,0,hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol("num_generated",&host_num_generated,sizeof(unsigned long long int)*1,0,hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol("num_abs_top",&host_num_abs_top,sizeof(unsigned long long int)*1,0,hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol("num_abs_bulk",&host_num_abs_bulk,sizeof(unsigned long long int)*1,0,hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol("num_lost",&host_num_lost,sizeof(unsigned long long int)*1,0,hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol("num_outofcol",&host_num_outofcol,sizeof(unsigned long long int)*1,0,hipMemcpyHostToDevice));
cutilSafeCall(hipMemcpyToSymbol("num_theta1",&host_num_theta1,sizeof(unsigned long long int)*1,0,hipMemcpyHostToDevice));
cutilCheckError( cutCreateTimer(&hTimer) );
}
else if(*penctr == 100) // run optical kernel
{
// synchronize threads to ensure that GPU is not busy processing previous kernel call
hipDeviceSynchronize();
// allocate nbytes
nbytes = (*myfactGPU)*sizeof(struct start_info);
// copy memory address from PENELOPE variables
myimage = (unsigned long long int*)gpumemaddr_.gpuimage;
num_detected_primary = (int*)gpumemaddr_.gpudetect;
h_a = (struct start_info*)gpumemaddr_.hosta;
d_a = (struct start_info*)gpumemaddr_.deva;
pitch = (size_t)gpumemaddr_.devpitch;
// assign number of threads and blocks
threads = dim3(BLOCKSIZE,1);
blocks = dim3(GRIDSIZE,1);
// reading data from buffer
for(my_index = 0; my_index < (*myfactGPU); my_index++) // iterate over buffer length
{
// units in the penelope output file are in cm. Convert them to microns.
h_a[my_index].str_x = optical_.xbufopt[my_index] * 10000.0f; // x-coordinate of interaction event
h_a[my_index].str_y = optical_.ybufopt[my_index] * 10000.0f; // y-coordinate
h_a[my_index].str_z = optical_.zbufopt[my_index] * 10000.0f; // z-coordinate
h_a[my_index].str_E = optical_.debufopt[my_index]; // energy deposited
h_a[my_index].str_histnum = optical_.nbufopt[my_index]; // x-ray history number
// sample # optical photons based on light yield and energy deposited for this interaction event (using Poisson distribution)
mu_gsl = (double)h_a[my_index].str_E * yield;
h_a[my_index].str_N = gsl_ran_poisson(rgsl,mu_gsl);
if(h_a[my_index].str_N > max_photon_per_EDE)
{
printf("\n\n str_n exceeds max photons. program is exiting - %d !! \n\n", h_a[my_index].str_N);
exit(0);
}
}
// execute the optical transport kernel
// asynchronously copy data from host to device (all to stream 0)
cutilSafeCall( hipMemcpyAsync(d_a, h_a, nbytes, hipMemcpyHostToDevice, 0) );
// each kernel has BLOCKSIZE threads; each thread transports one event in the buffer (info.str_N optical photons)
hipLaunchKernelGGL(( algo), dim3(blocks), dim3(threads), 0, 0, d_a, myimage, num_detected_primary, pitch, (*myfactGPU), xdetector, ydetector, radius, height, n_C, n_IC, top_absfrac, bulk_abscoeff, beta, d_min, pixelsize, lbound_x, lbound_y, ubound_x, ubound_y, d_max, sensorRefl);
cutilCheckMsg("algo() execution failed\n");
}
else if(*penctr == 101) // calling optical transport kernel last time. copy back final data from device to host
{
// synchronize threads to ensure that GPU is not busy processing previous kernel call
hipDeviceSynchronize();
// here 'nbytes' is not necesaarily equal to 'gpubufsize*sizeof(struct start_info)', because in the last call optical_.myctropt can be <= gpubufsize
nbytes = (*myfactGPU)*sizeof(struct start_info);
// copy memory address from PENELOPE variables
myimage = (unsigned long long int*)gpumemaddr_.gpuimage;
num_detected_primary = (int*)gpumemaddr_.gpudetect;
h_a = (struct start_info*)gpumemaddr_.hosta;
d_a = (struct start_info*)gpumemaddr_.deva;
pitch = (size_t)gpumemaddr_.devpitch;
// allocate host memory
unsigned long long int *h_myimage = 0; // page-locked host memory for asynchronous copying (contain host image for evey kernel run)
cutilSafeCall( hipHostMalloc((void**)&h_myimage, xdim*ydim*sizeof(unsigned long long int)) );
int *h_num_detected_primary = 0; // host memory to get # detected/primary
cutilSafeCall( hipHostMalloc((void**)&h_num_detected_primary, sizeof(int)*num_primary) );
for(indexj=0; indexj < num_primary; indexj++)
h_num_detected_primary[indexj] = 0;
int *h_histogram = 0; // host memory for storing histogram of # photons detected/primary
h_histogram = (int*)malloc(sizeof(int)*num_bins);
for(indexj=0; indexj < num_bins; indexj++)
h_histogram[indexj] = 0;
// assign number of threads and blocks
threads = dim3(BLOCKSIZE,1);
blocks = dim3(GRIDSIZE,1);
// reading data from buffer
for(my_index = 0; my_index < (*myfactGPU); my_index++) // iterate over x-rays
{
// units in the penelope output file are in cm. Convert them to microns.
h_a[my_index].str_x = optical_.xbufopt[my_index] * 10000.0f; // x-coordinate
h_a[my_index].str_y = optical_.ybufopt[my_index] * 10000.0f; // y-coordinate
h_a[my_index].str_z = optical_.zbufopt[my_index] * 10000.0f; // z-coordinate
h_a[my_index].str_E = optical_.debufopt[my_index]; // energy deposited
h_a[my_index].str_histnum = optical_.nbufopt[my_index]; // x-ray history number
// sample # optical photons based on light yield and energy deposited for this interaction event
mu_gsl = (double)h_a[my_index].str_E * yield;
h_a[my_index].str_N = gsl_ran_poisson(rgsl,mu_gsl);
if(h_a[my_index].str_N > max_photon_per_EDE)
{
printf("\n\n str_n exceeds max photons. program is exiting - %d !! \n\n", h_a[my_index].str_N);
exit(0);
}
} // for loop ends
// execute the kernel
// asynchronously copy data from host to device (all to stream 0)
cutilSafeCall( hipMemcpyAsync(d_a, h_a, nbytes, hipMemcpyHostToDevice, 0) );
// each kernel has BLOCKSIZE threads; each thread transports one event in the buffer (info.str_N optical photons)
hipLaunchKernelGGL(( algo), dim3(blocks), dim3(threads), 0, 0, d_a, myimage, num_detected_primary, pitch, (*myfactGPU), xdetector, ydetector, radius, height, n_C, n_IC, top_absfrac, bulk_abscoeff, beta, d_min, pixelsize, lbound_x, lbound_y, ubound_x, ubound_y, d_max, sensorRefl);
// asynchronously copy image data from device to host
cutilSafeCall( hipMemcpy2DAsync((void*)h_myimage,sizeof(unsigned long long int)*xdim,(void*)myimage,pitch,sizeof(unsigned long long int)*xdim,ydim,hipMemcpyDeviceToHost, 0) );
cutilSafeCall( hipMemcpyAsync(h_num_detected_primary, num_detected_primary, sizeof(int)*num_primary, hipMemcpyDeviceToHost, 0) );
cutilCheckMsg("algo() execution failed\n");
hipDeviceSynchronize(); // ensure that GPU has finished before copying back the final results.
// copy counters from device to host
cutilSafeCall(hipMemcpyFromSymbol((void *) &host_num_detect,num_detect,sizeof(unsigned long long int)*1,0,hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpyFromSymbol((void *) &host_num_generated,num_generated,sizeof(unsigned long long int)*1,0,hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpyFromSymbol((void *) &host_num_abs_top,num_abs_top,sizeof(unsigned long long int)*1,0,hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpyFromSymbol((void *) &host_num_abs_bulk,num_abs_bulk,sizeof(unsigned long long int)*1,0,hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpyFromSymbol((void *) &host_num_lost,num_lost,sizeof(unsigned long long int)*1,0,hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpyFromSymbol((void *) &host_num_outofcol,num_outofcol,sizeof(unsigned long long int)*1,0,hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpyFromSymbol((void *) &host_num_theta1,num_theta1,sizeof(unsigned long long int)*1,0,hipMemcpyDeviceToHost));
// add h_myimage to the new_myimage (array in PENELOPE)
for(indexi = 0; indexi < ydim; indexi++)
for(indexj = 0; indexj < xdim; indexj++)
outputimage_.newimageopt[indexi][indexj] = outputimage_.newimageopt[indexi][indexj] + h_myimage[indexi*xdim + indexj];
// make histogram of number of detected photons/primary for num_bins
int binsize=0, newbin=0;
int bincorr=0;
binsize = floor((max_optphotons-min_optphotons)/num_bins); // calculate size of each bin. Assuming equally spaced bins.
bincorr = floor(min_optphotons/binsize); // correction in bin number if min_optphotons > 0.
for(indexi = 0; indexi < num_primary; indexi++)
{
newbin = floor(h_num_detected_primary[indexi]/binsize) - bincorr; // find bin #
if(h_num_detected_primary[indexi] > 0) // store only non-zero bins
{
if(h_num_detected_primary[indexi] <= min_optphotons) // # detected < minimum photons given by user, add to the first bin
h_histogram[0]++;
else if(h_num_detected_primary[indexi] >= max_optphotons) // # detected > maximum photons given by user, then add to the last bin
h_histogram[num_bins-1]++;
else
h_histogram[newbin]++;
}
}
// add num_detected_primary to gldetprimary array in PENELOPE
for(indexi = 0; indexi < num_bins; indexi++)
outputdetprim_.gldetprimary[indexi] = outputdetprim_.gldetprimary[indexi] + h_histogram[indexi];
// type cast unsigned long long int to double
double cast_host_num_generated;
double cast_host_num_detect;
double cast_host_num_abs_top;
double cast_host_num_abs_bulk;
double cast_host_num_lost;
double cast_host_num_outofcol;
double cast_host_num_theta1;
double cast_gputime;
cast_host_num_generated = (double)host_num_generated;
cast_host_num_detect = (double)host_num_detect;
cast_host_num_abs_top = (double)host_num_abs_top;
cast_host_num_abs_bulk = (double)host_num_abs_bulk;
cast_host_num_lost = (double)host_num_lost;
cast_host_num_outofcol = (double)host_num_outofcol;
cast_host_num_theta1 = (double)host_num_theta1;
cast_gputime = (double)totalgpuTime;
// save to global counters
optstats_.glgen = optstats_.glgen + cast_host_num_generated;
optstats_.gldetect = optstats_.gldetect + cast_host_num_detect;
optstats_.glabstop = optstats_.glabstop + cast_host_num_abs_top;
optstats_.glabsbulk = optstats_.glabsbulk + cast_host_num_abs_bulk;
optstats_.gllost = optstats_.gllost + cast_host_num_lost;
optstats_.gloutofcol = optstats_.gloutofcol + cast_host_num_outofcol;
optstats_.gltheta1 = optstats_.gltheta1 + cast_host_num_theta1;
optstats_.glgputime = optstats_.glgputime + cast_gputime;
// release resources
cutilSafeCall(hipFree(d_a));
cutilSafeCall(hipFree(myimage));
cutilSafeCall(hipFree(num_detected_primary));
hipHostFree(h_a);
hipHostFree(h_myimage);
hipHostFree(h_num_detected_primary);
free(h_histogram);
} // else ends
return;
} // CUDA main() ends
#endif
| 5250910456e9636703615b7f13dec66ef3b11b59.cu | ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//
// //////////////////////////////////////////////////////////
// // //
// // hybridMANTIS v1.0 //
// // fastDETECT2 - CUDA code //
// // (optical photons transport) //
// // //
// //////////////////////////////////////////////////////////
//
//
//
//
//
//
// ****Disclaimer****
// This software and documentation (the "Software") were developed at the Food and Drug Administration (FDA) by employees of the Federal Government in
// the course of their official duties. Pursuant to Title 17, Section 105 of the United States Code, this work is not subject to copyright protection
// and is in the public domain. Permission is hereby granted, free of charge, to any person obtaining a copy of the Software, to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, or sell copies of the
// Software or derivatives, and to permit persons to whom the Software is furnished to do so. FDA assumes no responsibility whatsoever for use by other
// parties of the Software, its source code, documentation or compiled executables, and makes no guarantees, expressed or implied, about its quality,
// reliability, or any other characteristic. Further, use of this code in no way implies endorsement by the FDA or confers any advantage in regulatory
// decisions. Although this software can be redistributed and/or modified freely, we ask that any derivative works bear some notice that they are
// derived from it, and any modified versions bear some notice that they have been modified.
//
//
// Associated publication: Sharma Diksha, Badal Andreu and Badano Aldo, "hybridMANTIS: a CPU-GPU Monte Carlo method for modeling indirect x-ray detectors with
// columnar scintillators". Physics in Medicine and Biology, 57(8), pp. 2357–2372 (2012)
//
//
// File: hybridMANTIS_cuda_ver1_0.cu
// Author: Diksha Sharma (US Food and Drug Administration)
// Email: diksha.sharma@fda.hhs.gov
// Last updated: Apr 18, 2012
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////
//
// Header libraries
//
/////////////////////////////////////////
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#ifdef USING_CUDA
#include <cutil_inline.h>
#include <vector_types.h>
#include <stdint.h>
#endif
/////////////////////////////////////////
//
// Global variables
//
/////////////////////////////////////////
#define max_photon_per_EDE 900000 // maximum number of optical photons that can be generated per energy deposition event (EDE)
#ifdef USING_CUDA
#define gpubufsize 2304000 // GPU buffer size: # of events sent to the GPU
#endif
/////////////////////////////////////////
//
// Include kernel program
//
/////////////////////////////////////////
#include "kernel_cuda_c_ver1_0.cu"
/////////////////////////////////////////
//
// CUDA parameters
//
/////////////////////////////////////////
#ifdef USING_CUDA
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
#define GRIDSIZE 18000 // number of blocks
#define BLOCKSIZE 128 // number of threads
#endif
////////////////////////////////////////////////////////////////////////////
// MAIN PROGRAM //
////////////////////////////////////////////////////////////////////////////
#ifdef USING_CUDA
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// gpuoptical(): Performs optical transport using GPU
// Input arguments: penctr, myfactGPU
//
// penctr - flag to indicate how optical transport will be run on GPU.
// a value of '99' : calling gpuoptical() first time to initialize the GPU and allocate memories and reset counters.
// a value of '100': calling optical transport kernel
// a value of '101': calling gpuoptical() last time; running optical transport for remaining buffer; copying data from device to host and getting output images.
// myfactGPU - buffer size to be sent to GPU after load balancing
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C" void gpuoptical_(int *penctr, int *myfactGPU)
{
// command line arguments
float xdetector, ydetector, radius, height, n_C, n_IC, top_absfrac, bulk_abscoeff, beta, d_min, lbound_x, lbound_y, ubound_x, ubound_y, d_max, yield, sensorRefl;
int pixelsize, num_primary, min_optphotons, max_optphotons, num_bins;
// CUDA variables
unsigned int hTimer = 0; // timer
static float totalgpuTime = 0.0f; // total time taken
dim3 threads, blocks; // threads and blocks
int devID; // GPU device ID
// GNU scientific library (gsl) variables
const gsl_rng_type * Tgsl;
gsl_rng * rgsl;
double mu_gsl;
// Host (CPU) counters
unsigned long long int host_num_generated = 0; // total # of photons generated for all the x-ray histories (across all threads)
unsigned long long int host_num_detect = 0; // total # of photons detected at the sensor plane of a column
unsigned long long int host_num_lost = 0; // total # of photons lost when exiting out of the detector boundaries in x/y direction
unsigned long long int host_num_abs_top = 0; // total # of photons absorbed at the top surface of the detector
unsigned long long int host_num_abs_bulk = 0; // total # of photons absorbed in the bulk of the detector
unsigned long long int host_num_outofcol = 0; // total # of photons killed because they moved out of current column when reflected (due to precision errors)
unsigned long long int host_num_theta1 = 0; // total # of photons killed if incidence angle > 1.57 or < 0 radian (after resampling max 100 times)
// create 2D array for storing output PRF image
int xdim = 0;
int ydim = 0;
int indexi=0, indexj=0;
int my_index=0;
size_t pitch; // pitch used for storing 2D image array in GPU memory
int nbytes = (*myfactGPU)*sizeof(struct start_info); // total number of bytes for storing interaction events buffer information
// allocate memory pointers
unsigned long long int *myimage = 0; // device memory for output image
int *num_detected_primary = 0; // device memory for # detected photons/primary
struct start_info *h_a = 0; // pointer to the struct info data in the host memory
struct start_info *d_a = 0; // pointers to struct data in the device memory
// copy to local variables from PENELOPE common block
xdetector = inputargs_.detx; // x dimension of detector (in um). x in (0,xdetector)
ydetector = inputargs_.dety; // y dimension of detector (in um). y in (0,ydetector)
height = inputargs_.detheight; // height of column and thickness of detector (in um). z in range (-H/2, H/2)
radius = inputargs_.detradius; // radius of column (in um).
n_C = inputargs_.detnC; // refractive index of columns
n_IC = inputargs_.detnIC; // refractive index of intercolumnar material
top_absfrac = inputargs_.dettop; // column's top surface absorption fraction (0,1)
bulk_abscoeff = inputargs_.detbulk; // column's bulk absorption coefficient (in um^-1)
beta = inputargs_.detbeta; // roughness coefficient of column walls (0,0.5)
d_min = inputargs_.detdmin; // minimum distance a photon can travel when transmitted from a column
d_max = inputargs_.detdmax; // maximum distance a photon can travel when transmitted from a column
lbound_x = inputargs_.detlboundx; // x lower bound of region of interest of output PRF image (in um)
lbound_y = inputargs_.detlboundy; // y lower bound (in um)
ubound_x = inputargs_.detuboundx; // x upper bound (in um)
ubound_y = inputargs_.detuboundy; // y upper bound (in um)
yield = inputargs_.detyield; // light yield (/eV)
pixelsize = inputargs_.detpixel; // 1 pixel = 'pixelsize' microns (in um)
sensorRefl = inputargs_.detsensorRefl; // Non-Ideal sensor reflectivity (%) (0,100)
num_primary = inputargs_.mynumhist; // total number of primaries to be simulated
min_optphotons = inputargs_.minphotons; // minimum number of optical photons detected to be included in PHS
max_optphotons = inputargs_.maxphotons; // maximum number of optical photons detected to be included in PHS
num_bins = inputargs_.mynumbins; // number of bins for genrating PHS
// set the device with max GFlops
devID = cutGetMaxGflopsDeviceId();
cudaSetDevice( devID );
// create a generator chosen by the
// environment variable GSL_RNG_TYPE
gsl_rng_env_setup();
Tgsl = gsl_rng_default;
rgsl = gsl_rng_alloc (Tgsl);
// dimensions of PRF image
xdim = ceil((ubound_x - lbound_x)/pixelsize);
ydim = ceil((ubound_y - lbound_y)/pixelsize);
if(*penctr == 99) // initialize GPU; allocate and initialize memories
{
// allocate device memory for storing output arrays
cudaMallocPitch((void**)&myimage, &pitch, xdim*sizeof(unsigned long long int), ydim); // allocate 2D image array (PRF)
cutilSafeCall( cudaMemset2D(myimage, pitch, 0, xdim*sizeof(unsigned long long int), ydim) ); // initialize to 0
cutilSafeCall( cudaMalloc((void**)&num_detected_primary, sizeof(int)*num_primary) ); // create 1D array for outputting # detected/primary
cutilSafeCall( cudaMemset(num_detected_primary, 0, sizeof(int)*num_primary) ); // initialize to 0
// allocate host and device memory for transferring buffer information
cutilSafeCall( cudaMallocHost((void**)&h_a, nbytes) );
cutilSafeCall( cudaMalloc((void**)&d_a, nbytes) );
// copy address of memory pointers to PENELOPE variables. These variables are used later to point to device and host memories without re-initializing the GPU.
gpumemaddr_.gpuimage = (unsigned long long int)myimage;
gpumemaddr_.gpudetect = (unsigned long long int)num_detected_primary;
gpumemaddr_.hosta = (unsigned long long int)h_a;
gpumemaddr_.deva = (unsigned long long int)d_a;
gpumemaddr_.devpitch = (unsigned long long int)pitch;
// reset the host counters
host_num_generated=0;
host_num_detect=0;
host_num_abs_top=0;
host_num_abs_bulk=0;
host_num_lost=0;
host_num_outofcol=0;
host_num_theta1=0;
// reset device counters to zero
cutilSafeCall(cudaMemcpyToSymbol("num_detect",&host_num_detect,sizeof(unsigned long long int)*1,0,cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol("num_generated",&host_num_generated,sizeof(unsigned long long int)*1,0,cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol("num_abs_top",&host_num_abs_top,sizeof(unsigned long long int)*1,0,cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol("num_abs_bulk",&host_num_abs_bulk,sizeof(unsigned long long int)*1,0,cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol("num_lost",&host_num_lost,sizeof(unsigned long long int)*1,0,cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol("num_outofcol",&host_num_outofcol,sizeof(unsigned long long int)*1,0,cudaMemcpyHostToDevice));
cutilSafeCall(cudaMemcpyToSymbol("num_theta1",&host_num_theta1,sizeof(unsigned long long int)*1,0,cudaMemcpyHostToDevice));
cutilCheckError( cutCreateTimer(&hTimer) );
}
else if(*penctr == 100) // run optical kernel
{
// synchronize threads to ensure that GPU is not busy processing previous kernel call
cudaThreadSynchronize();
// allocate nbytes
nbytes = (*myfactGPU)*sizeof(struct start_info);
// copy memory address from PENELOPE variables
myimage = (unsigned long long int*)gpumemaddr_.gpuimage;
num_detected_primary = (int*)gpumemaddr_.gpudetect;
h_a = (struct start_info*)gpumemaddr_.hosta;
d_a = (struct start_info*)gpumemaddr_.deva;
pitch = (size_t)gpumemaddr_.devpitch;
// assign number of threads and blocks
threads = dim3(BLOCKSIZE,1);
blocks = dim3(GRIDSIZE,1);
// reading data from buffer
for(my_index = 0; my_index < (*myfactGPU); my_index++) // iterate over buffer length
{
// units in the penelope output file are in cm. Convert them to microns.
h_a[my_index].str_x = optical_.xbufopt[my_index] * 10000.0f; // x-coordinate of interaction event
h_a[my_index].str_y = optical_.ybufopt[my_index] * 10000.0f; // y-coordinate
h_a[my_index].str_z = optical_.zbufopt[my_index] * 10000.0f; // z-coordinate
h_a[my_index].str_E = optical_.debufopt[my_index]; // energy deposited
h_a[my_index].str_histnum = optical_.nbufopt[my_index]; // x-ray history number
// sample # optical photons based on light yield and energy deposited for this interaction event (using Poisson distribution)
mu_gsl = (double)h_a[my_index].str_E * yield;
h_a[my_index].str_N = gsl_ran_poisson(rgsl,mu_gsl);
if(h_a[my_index].str_N > max_photon_per_EDE)
{
printf("\n\n str_n exceeds max photons. program is exiting - %d !! \n\n", h_a[my_index].str_N);
exit(0);
}
}
// execute the optical transport kernel
// asynchronously copy data from host to device (all to stream 0)
cutilSafeCall( cudaMemcpyAsync(d_a, h_a, nbytes, cudaMemcpyHostToDevice, 0) );
// each kernel has BLOCKSIZE threads; each thread transports one event in the buffer (info.str_N optical photons)
algo<<<blocks, threads, 0, 0>>>(d_a, myimage, num_detected_primary, pitch, (*myfactGPU), xdetector, ydetector, radius, height, n_C, n_IC, top_absfrac, bulk_abscoeff, beta, d_min, pixelsize, lbound_x, lbound_y, ubound_x, ubound_y, d_max, sensorRefl);
cutilCheckMsg("algo() execution failed\n");
}
else if(*penctr == 101) // calling optical transport kernel last time. copy back final data from device to host
{
// synchronize threads to ensure that GPU is not busy processing previous kernel call
cudaThreadSynchronize();
// here 'nbytes' is not necesaarily equal to 'gpubufsize*sizeof(struct start_info)', because in the last call optical_.myctropt can be <= gpubufsize
nbytes = (*myfactGPU)*sizeof(struct start_info);
// copy memory address from PENELOPE variables
myimage = (unsigned long long int*)gpumemaddr_.gpuimage;
num_detected_primary = (int*)gpumemaddr_.gpudetect;
h_a = (struct start_info*)gpumemaddr_.hosta;
d_a = (struct start_info*)gpumemaddr_.deva;
pitch = (size_t)gpumemaddr_.devpitch;
// allocate host memory
unsigned long long int *h_myimage = 0; // page-locked host memory for asynchronous copying (contain host image for evey kernel run)
cutilSafeCall( cudaMallocHost((void**)&h_myimage, xdim*ydim*sizeof(unsigned long long int)) );
int *h_num_detected_primary = 0; // host memory to get # detected/primary
cutilSafeCall( cudaMallocHost((void**)&h_num_detected_primary, sizeof(int)*num_primary) );
for(indexj=0; indexj < num_primary; indexj++)
h_num_detected_primary[indexj] = 0;
int *h_histogram = 0; // host memory for storing histogram of # photons detected/primary
h_histogram = (int*)malloc(sizeof(int)*num_bins);
for(indexj=0; indexj < num_bins; indexj++)
h_histogram[indexj] = 0;
// assign number of threads and blocks
threads = dim3(BLOCKSIZE,1);
blocks = dim3(GRIDSIZE,1);
// reading data from buffer
for(my_index = 0; my_index < (*myfactGPU); my_index++) // iterate over x-rays
{
// units in the penelope output file are in cm. Convert them to microns.
h_a[my_index].str_x = optical_.xbufopt[my_index] * 10000.0f; // x-coordinate
h_a[my_index].str_y = optical_.ybufopt[my_index] * 10000.0f; // y-coordinate
h_a[my_index].str_z = optical_.zbufopt[my_index] * 10000.0f; // z-coordinate
h_a[my_index].str_E = optical_.debufopt[my_index]; // energy deposited
h_a[my_index].str_histnum = optical_.nbufopt[my_index]; // x-ray history number
// sample # optical photons based on light yield and energy deposited for this interaction event
mu_gsl = (double)h_a[my_index].str_E * yield;
h_a[my_index].str_N = gsl_ran_poisson(rgsl,mu_gsl);
if(h_a[my_index].str_N > max_photon_per_EDE)
{
printf("\n\n str_n exceeds max photons. program is exiting - %d !! \n\n", h_a[my_index].str_N);
exit(0);
}
} // for loop ends
// execute the kernel
// asynchronously copy data from host to device (all to stream 0)
cutilSafeCall( cudaMemcpyAsync(d_a, h_a, nbytes, cudaMemcpyHostToDevice, 0) );
// each kernel has BLOCKSIZE threads; each thread transports one event in the buffer (info.str_N optical photons)
algo<<<blocks, threads, 0, 0>>>(d_a, myimage, num_detected_primary, pitch, (*myfactGPU), xdetector, ydetector, radius, height, n_C, n_IC, top_absfrac, bulk_abscoeff, beta, d_min, pixelsize, lbound_x, lbound_y, ubound_x, ubound_y, d_max, sensorRefl);
// asynchronously copy image data from device to host
cutilSafeCall( cudaMemcpy2DAsync((void*)h_myimage,sizeof(unsigned long long int)*xdim,(void*)myimage,pitch,sizeof(unsigned long long int)*xdim,ydim,cudaMemcpyDeviceToHost, 0) );
cutilSafeCall( cudaMemcpyAsync(h_num_detected_primary, num_detected_primary, sizeof(int)*num_primary, cudaMemcpyDeviceToHost, 0) );
cutilCheckMsg("algo() execution failed\n");
cudaThreadSynchronize(); // ensure that GPU has finished before copying back the final results.
// copy counters from device to host
cutilSafeCall(cudaMemcpyFromSymbol((void *) &host_num_detect,num_detect,sizeof(unsigned long long int)*1,0,cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpyFromSymbol((void *) &host_num_generated,num_generated,sizeof(unsigned long long int)*1,0,cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpyFromSymbol((void *) &host_num_abs_top,num_abs_top,sizeof(unsigned long long int)*1,0,cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpyFromSymbol((void *) &host_num_abs_bulk,num_abs_bulk,sizeof(unsigned long long int)*1,0,cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpyFromSymbol((void *) &host_num_lost,num_lost,sizeof(unsigned long long int)*1,0,cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpyFromSymbol((void *) &host_num_outofcol,num_outofcol,sizeof(unsigned long long int)*1,0,cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpyFromSymbol((void *) &host_num_theta1,num_theta1,sizeof(unsigned long long int)*1,0,cudaMemcpyDeviceToHost));
// add h_myimage to the new_myimage (array in PENELOPE)
for(indexi = 0; indexi < ydim; indexi++)
for(indexj = 0; indexj < xdim; indexj++)
outputimage_.newimageopt[indexi][indexj] = outputimage_.newimageopt[indexi][indexj] + h_myimage[indexi*xdim + indexj];
// make histogram of number of detected photons/primary for num_bins
int binsize=0, newbin=0;
int bincorr=0;
binsize = floor((max_optphotons-min_optphotons)/num_bins); // calculate size of each bin. Assuming equally spaced bins.
bincorr = floor(min_optphotons/binsize); // correction in bin number if min_optphotons > 0.
for(indexi = 0; indexi < num_primary; indexi++)
{
newbin = floor(h_num_detected_primary[indexi]/binsize) - bincorr; // find bin #
if(h_num_detected_primary[indexi] > 0) // store only non-zero bins
{
if(h_num_detected_primary[indexi] <= min_optphotons) // # detected < minimum photons given by user, add to the first bin
h_histogram[0]++;
else if(h_num_detected_primary[indexi] >= max_optphotons) // # detected > maximum photons given by user, then add to the last bin
h_histogram[num_bins-1]++;
else
h_histogram[newbin]++;
}
}
// add num_detected_primary to gldetprimary array in PENELOPE
for(indexi = 0; indexi < num_bins; indexi++)
outputdetprim_.gldetprimary[indexi] = outputdetprim_.gldetprimary[indexi] + h_histogram[indexi];
// type cast unsigned long long int to double
double cast_host_num_generated;
double cast_host_num_detect;
double cast_host_num_abs_top;
double cast_host_num_abs_bulk;
double cast_host_num_lost;
double cast_host_num_outofcol;
double cast_host_num_theta1;
double cast_gputime;
cast_host_num_generated = (double)host_num_generated;
cast_host_num_detect = (double)host_num_detect;
cast_host_num_abs_top = (double)host_num_abs_top;
cast_host_num_abs_bulk = (double)host_num_abs_bulk;
cast_host_num_lost = (double)host_num_lost;
cast_host_num_outofcol = (double)host_num_outofcol;
cast_host_num_theta1 = (double)host_num_theta1;
cast_gputime = (double)totalgpuTime;
// save to global counters
optstats_.glgen = optstats_.glgen + cast_host_num_generated;
optstats_.gldetect = optstats_.gldetect + cast_host_num_detect;
optstats_.glabstop = optstats_.glabstop + cast_host_num_abs_top;
optstats_.glabsbulk = optstats_.glabsbulk + cast_host_num_abs_bulk;
optstats_.gllost = optstats_.gllost + cast_host_num_lost;
optstats_.gloutofcol = optstats_.gloutofcol + cast_host_num_outofcol;
optstats_.gltheta1 = optstats_.gltheta1 + cast_host_num_theta1;
optstats_.glgputime = optstats_.glgputime + cast_gputime;
// release resources
cutilSafeCall(cudaFree(d_a));
cutilSafeCall(cudaFree(myimage));
cutilSafeCall(cudaFree(num_detected_primary));
cudaFreeHost(h_a);
cudaFreeHost(h_myimage);
cudaFreeHost(h_num_detected_primary);
free(h_histogram);
} // else ends
return;
} // CUDA main() ends
#endif
|
09c5d920f184dc391f3f7cf65ef977dbf3f85a76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "svd3_cuda.h"
#include "stdio.h"
#include <chrono>
//#define columnas 640
//#define filas 480
//#define columnas 800
//#define filas 800
#define Threads 16
using namespace std;
__global__ void add(float *m1, float *m3, int filas, int columnas)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
int col_min = col-1;
int col_max = col+1;
int fil_min = fil-1;
int fil_max = fil+1;
if(col < columnas && fil < filas && col_min >= 0 && fil_min >= 0 && fil_max < (filas) && col_max < (columnas))
{
int indexi0j0 = (columnas*fil_min)+col_min;
int indexi0j1 = (columnas*fil_min)+col;
int indexi0j2 = (columnas*fil_min)+col_max;
int indexi1j0 = (columnas*fil)+col_min;
int indexij = (columnas*fil)+col;
int indexi1j2 = (columnas*fil)+col_max;
int indexi2j0 = (columnas*fil_max)+col_min;
int indexi2j1 = (columnas*fil_max)+col;
int indexi2j2 = (columnas*fil_max)+col_max;
float u11, u12, u13, u21, u22, u23, u31, u32, u33;
float s11, s12, s13, s21, s22, s23, s31, s32, s33;
float v11, v12, v13, v21, v22, v23, v31, v32, v33;
svd(m1[indexi0j0], m1[indexi0j1], m1[indexi0j2],
m1[indexi1j0], m1[indexij], m1[indexi1j2],
m1[indexi2j0], m1[indexi2j1], m1[indexi2j2],
// output U
u11, u12, u13, u21, u22, u23, u31, u32, u33,
// output S
s11, s12, s13, s21, s22, s23, s31, s32, s33,
// output V
v11, v12, v13, v21, v22, v23, v31, v32, v33);
//m3[index] = m1[index]+m2[index];
/*m3[indexij] = m1[indexi0j0]+m1[indexi0j1]+m1[indexi0j2]+
m1[indexi1j0]+m1[indexij]+m1[indexi1j2]+
m1[indexi2j0]+m1[indexi2j1]+m1[indexi2j2];*/
m3[indexij] = s11+s22+s33;
}
}
int main()
{
int filas = 1080;
int columnas = 1920;
int cont = 0;
cout << "craete" << endl;
//float a[filas][columnas], c[filas][columnas];
float a[filas][columnas];
cout << "craete1" << endl;
float *dev_a, *dev_b;
float *dev_c;
for(int i=0; i<filas; i++)
{
//cont = 0;
for(int j=0; j<columnas; j++)
{
//cout << i <<"-" << j<<endl;
a[i][j] = cont+i;
cont++;
}
}
cout << "init" << endl;
auto t11 = std::chrono::high_resolution_clock::now();
hipMalloc((void**) &dev_a, filas*columnas*sizeof(float));
hipMalloc((void**) &dev_c, filas*columnas*sizeof(float));
hipMemcpy(dev_a, a, filas*columnas*sizeof(float), hipMemcpyHostToDevice);
dim3 dimThreadsBloque(Threads, Threads);
float BFloat = (float) columnas / (float) Threads;
int B = (int) ceil(BFloat);
// El grid tendr B nmero de bloques en x y y
dim3 dimBloques(B, B);
hipLaunchKernelGGL(( add), dim3(dimBloques), dim3(dimThreadsBloque), 0, 0, dev_a, dev_c, filas, columnas);
hipMemcpy(a, dev_c, filas*columnas*sizeof(float), hipMemcpyDeviceToHost);
auto t12 = std::chrono::high_resolution_clock::now();
/*for(int i=0; i< filas; i++)
{
for(int j=0; j<columnas; j++)
{
cout << i <<"-"<<j<<" : "<<c[i][j] << endl;
}
}*/
hipFree(dev_a);
hipFree(dev_c);
cout << std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count() << endl;
return 0;
}
| 09c5d920f184dc391f3f7cf65ef977dbf3f85a76.cu | #include <iostream>
#include "svd3_cuda.h"
#include "stdio.h"
#include <chrono>
//#define columnas 640
//#define filas 480
//#define columnas 800
//#define filas 800
#define Threads 16
using namespace std;
__global__ void add(float *m1, float *m3, int filas, int columnas)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int fil = blockIdx.y*blockDim.y+threadIdx.y;
int col_min = col-1;
int col_max = col+1;
int fil_min = fil-1;
int fil_max = fil+1;
if(col < columnas && fil < filas && col_min >= 0 && fil_min >= 0 && fil_max < (filas) && col_max < (columnas))
{
int indexi0j0 = (columnas*fil_min)+col_min;
int indexi0j1 = (columnas*fil_min)+col;
int indexi0j2 = (columnas*fil_min)+col_max;
int indexi1j0 = (columnas*fil)+col_min;
int indexij = (columnas*fil)+col;
int indexi1j2 = (columnas*fil)+col_max;
int indexi2j0 = (columnas*fil_max)+col_min;
int indexi2j1 = (columnas*fil_max)+col;
int indexi2j2 = (columnas*fil_max)+col_max;
float u11, u12, u13, u21, u22, u23, u31, u32, u33;
float s11, s12, s13, s21, s22, s23, s31, s32, s33;
float v11, v12, v13, v21, v22, v23, v31, v32, v33;
svd(m1[indexi0j0], m1[indexi0j1], m1[indexi0j2],
m1[indexi1j0], m1[indexij], m1[indexi1j2],
m1[indexi2j0], m1[indexi2j1], m1[indexi2j2],
// output U
u11, u12, u13, u21, u22, u23, u31, u32, u33,
// output S
s11, s12, s13, s21, s22, s23, s31, s32, s33,
// output V
v11, v12, v13, v21, v22, v23, v31, v32, v33);
//m3[index] = m1[index]+m2[index];
/*m3[indexij] = m1[indexi0j0]+m1[indexi0j1]+m1[indexi0j2]+
m1[indexi1j0]+m1[indexij]+m1[indexi1j2]+
m1[indexi2j0]+m1[indexi2j1]+m1[indexi2j2];*/
m3[indexij] = s11+s22+s33;
}
}
int main()
{
int filas = 1080;
int columnas = 1920;
int cont = 0;
cout << "craete" << endl;
//float a[filas][columnas], c[filas][columnas];
float a[filas][columnas];
cout << "craete1" << endl;
float *dev_a, *dev_b;
float *dev_c;
for(int i=0; i<filas; i++)
{
//cont = 0;
for(int j=0; j<columnas; j++)
{
//cout << i <<"-" << j<<endl;
a[i][j] = cont+i;
cont++;
}
}
cout << "init" << endl;
auto t11 = std::chrono::high_resolution_clock::now();
cudaMalloc((void**) &dev_a, filas*columnas*sizeof(float));
cudaMalloc((void**) &dev_c, filas*columnas*sizeof(float));
cudaMemcpy(dev_a, a, filas*columnas*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimThreadsBloque(Threads, Threads);
float BFloat = (float) columnas / (float) Threads;
int B = (int) ceil(BFloat);
// El grid tendrá B número de bloques en x y y
dim3 dimBloques(B, B);
add<<<dimBloques, dimThreadsBloque>>>(dev_a, dev_c, filas, columnas);
cudaMemcpy(a, dev_c, filas*columnas*sizeof(float), cudaMemcpyDeviceToHost);
auto t12 = std::chrono::high_resolution_clock::now();
/*for(int i=0; i< filas; i++)
{
for(int j=0; j<columnas; j++)
{
cout << i <<"-"<<j<<" : "<<c[i][j] << endl;
}
}*/
cudaFree(dev_a);
cudaFree(dev_c);
cout << std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count() << endl;
return 0;
}
|
e3d45378e89a0c6b936d357f2a647a3da90a887f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void merge_sum(unsigned char * img_all, unsigned char * img, float * selection, float * selection_sum, int n, int stride)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
int idx = 0;
float weight = 0;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) {
img[3*((y+j)*width + x)] = 0;
img[3*((y+j)*width + x)+1] = 0;
img[3*((y+j)*width + x)+2] = 0;
for (idx = 0; idx < n; idx ++) {
weight = selection[idx * stride + ((y+j)*width + x)] / selection_sum[((y+j)*width + x)];
//weight = 0.25;
//weight = 0.5;
img[3*((y+j)*width + x)] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x)] * weight);
img[3*((y+j)*width + x)+1] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x) + 1] * weight);
img[3*((y+j)*width + x)+2] += (unsigned char)(img_all[idx * stride * 3 + 3*((y+j)*width + x) + 2] * weight);
}
}
} | e3d45378e89a0c6b936d357f2a647a3da90a887f.cu | #include "includes.h"
__global__ void merge_sum(unsigned char * img_all, unsigned char * img, float * selection, float * selection_sum, int n, int stride)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
int idx = 0;
float weight = 0;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) {
img[3*((y+j)*width + x)] = 0;
img[3*((y+j)*width + x)+1] = 0;
img[3*((y+j)*width + x)+2] = 0;
for (idx = 0; idx < n; idx ++) {
weight = selection[idx * stride + ((y+j)*width + x)] / selection_sum[((y+j)*width + x)];
//weight = 0.25;
//weight = 0.5;
img[3*((y+j)*width + x)] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x)] * weight);
img[3*((y+j)*width + x)+1] += (unsigned char) (img_all[idx * stride * 3 + 3*((y+j)*width + x) + 1] * weight);
img[3*((y+j)*width + x)+2] += (unsigned char)(img_all[idx * stride * 3 + 3*((y+j)*width + x) + 2] * weight);
}
}
} |
20e6e15b805ebe8fdd89e8ee4c13f72fc5f039b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/non_max_suppressionv3_impl.cuh"
#include <hipcub/hipcub.hpp>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <vector>
#include <limits>
#include <iostream>
#include <algorithm>
constexpr int kNmsBlockDim = 16;
constexpr int kNmsBlockDimMax = 128;
constexpr int kNmsBoxesPerThread = 8 * sizeof(int);
template <typename T>
struct GreaterThanCubOp {
float threshold_;
__host__ __device__ __forceinline__ GreaterThanCubOp(float threshold) : threshold_(threshold) {}
__host__ __device__ __forceinline__ bool operator()(const T &val) const {
return (static_cast<float>(val) > threshold_);
}
};
template <typename T>
__inline__ __device__ void Swap(T *lhs, T *rhs) {
T tmp = lhs[0];
lhs[0] = rhs[0];
rhs[0] = tmp;
}
template <typename T>
__inline__ __device__ T max(T x, T y) {
if (x > y) {
return x;
} else {
return y;
}
}
template <typename T>
__inline__ __device__ T min(T x, T y) {
if (x < y) {
return x;
} else {
return y;
}
}
template <typename T>
__inline__ __device__ void Flipped(T *box) {
if (box[0] > box[2]) Swap(&box[0], &box[2]);
if (box[1] > box[3]) Swap(&box[1], &box[3]);
}
template <typename T>
__inline__ __device__ bool IouDecision(T *box_A, T *box_B, T a_area, float IOU_threshold) {
T b_area = (box_B[2] - box_B[0]) * (box_B[3] - box_B[1]);
if (a_area == static_cast<T>(0.0) || b_area == static_cast<T>(0.0)) return false;
T x_1 = max(box_A[0], box_B[0]);
T y_1 = max(box_A[1], box_B[1]);
T x_2 = min(box_A[2], box_B[2]);
T y_2 = min(box_A[3], box_B[3]);
T width = max(x_2 - x_1, T(0)); // in case of no overlap
T height = max(y_2 - y_1, T(0));
T intersection = width * height;
float aa = static_cast<float>(intersection);
T bb = a_area + b_area - intersection;
float bt = static_cast<float>(bb) * IOU_threshold;
return aa > bt;
}
template <typename T>
__inline__ __device__ void SelectHelper(int i_selected, int i_original, T *original, T *selected) {
selected[i_selected * 4 + 0] = original[i_original * 4 + 0];
selected[i_selected * 4 + 1] = original[i_original * 4 + 1];
selected[i_selected * 4 + 2] = original[i_original * 4 + 2];
selected[i_selected * 4 + 3] = original[i_original * 4 + 3];
Flipped(selected + i_selected * 4);
}
template <typename T>
__global__ void IndexMultiSelect(const int num_elements, int *index_buff, T *original, T *selected) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
SelectHelper(idx, static_cast<int>(index_buff[idx]), original, selected);
}
}
template <typename T>
__global__ void CastFloat(const int num_elements, T *scores, float *scores_float) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
scores_float[idx] = static_cast<float>(scores[idx]);
}
}
__global__ void SetZeros(const int num_elements, unsigned int *target) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
target[idx] = 0;
}
}
template <typename T>
bool CheckBitHost(T bit_mask, int bit) {
return (bit_mask >> (bit % kNmsBoxesPerThread)) & 1;
}
template <typename T>
__launch_bounds__(kNmsBlockDim *kNmsBlockDim, 4) __global__
void NMSReduce(const int num, int u_num, float iou_threshold, T *boxes_sort, int box_size, unsigned int *sel_mask) {
__shared__ T shared_i_boxes[kNmsBlockDim * 4];
// Same thing with areas
__shared__ T shared_i_areas[kNmsBlockDim];
// The condition of the for loop is common to all threads in the block.
// This is necessary to be able to call __syncthreads() inside of the loop.
for (int i_block_offset = blockIdx.x * blockDim.x; i_block_offset < num; i_block_offset += blockDim.x * gridDim.x) {
const int i = i_block_offset + threadIdx.x;
if (i < num) {
// One 1D line load the boxes for x-dimension.
if (threadIdx.y == 0) {
shared_i_boxes[threadIdx.x * 4 + 0] = boxes_sort[i * 4 + 0];
shared_i_boxes[threadIdx.x * 4 + 1] = boxes_sort[i * 4 + 1];
shared_i_boxes[threadIdx.x * 4 + 2] = boxes_sort[i * 4 + 2];
shared_i_boxes[threadIdx.x * 4 + 3] = boxes_sort[i * 4 + 3];
T area = (boxes_sort[i * 4 + 2] - boxes_sort[i * 4 + 0]) * (boxes_sort[i * 4 + 3] - boxes_sort[i * 4 + 1]);
shared_i_areas[threadIdx.x] = area;
}
}
__syncthreads();
for (int j_thread_offset = kNmsBoxesPerThread * (blockIdx.y * blockDim.y + threadIdx.y); j_thread_offset < num;
j_thread_offset += kNmsBoxesPerThread * blockDim.y * gridDim.y) {
int above_threshold = 0;
// Make sure that threads are within valid domain.
bool valid = false;
// Loop over the next kNmsBoxesPerThread boxes and set corresponding bit
// if it is overlapping with current box
for (int ib = 0; ib < kNmsBoxesPerThread; ++ib) {
const int j = j_thread_offset + ib;
if (i >= j || i >= num || j >= num) continue;
valid = true;
T *j_box = boxes_sort + j * 4;
T *i_box = shared_i_boxes + threadIdx.x * 4;
if (IouDecision(i_box, j_box, shared_i_areas[threadIdx.x], iou_threshold)) {
// we have score[j] <= score[i]. j > i
above_threshold |= (1U << ib);
}
}
if (valid) {
sel_mask[i * u_num + j_thread_offset / kNmsBoxesPerThread] = above_threshold;
}
}
__syncthreads(); // making sure everyone is done reading shared memory.
}
}
template <typename T>
int CalNms(const int num_input, int *num_keep, float iou_threshold, int max_output_size, T *boxes_sort, int *index_buff,
int box_size, unsigned int *sel_mask, bool *sel_boxes, int *output_ptr, const uint32_t &device_id,
hipStream_t cuda_stream) {
int u_num = (num_input + kNmsBoxesPerThread - 1) / kNmsBoxesPerThread;
const int max_nms_mask_size = num_input * u_num;
int thread_num = 256 < num_input ? 256 : num_input;
hipDeviceProp_t prop;
(void)hipGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_num = min(static_cast<int>(((num_input - 1) / thread_num) + 1), max_blocks);
hipLaunchKernelGGL(( SetZeros), dim3(block_num), dim3(thread_num), 0, cuda_stream, max_nms_mask_size, sel_mask);
int num_blocks = (num_input + kNmsBlockDim - 1) / kNmsBlockDim;
num_blocks = ::max(::min(num_blocks, kNmsBlockDimMax), 1);
dim3 blocks(num_blocks, num_blocks);
dim3 threads(kNmsBlockDim, kNmsBlockDim);
hipLaunchKernelGGL(( NMSReduce), dim3(blocks), dim3(threads), 0, cuda_stream, num_input, u_num, iou_threshold, boxes_sort, box_size, sel_mask);
std::vector<unsigned int> sel_mask_host(num_input * u_num);
hipMemcpyAsync(sel_mask_host.data(), sel_mask, num_input * u_num * sizeof(unsigned int), hipMemcpyDeviceToHost,
cuda_stream);
std::vector<int> local(u_num);
std::vector<char> sel_boxes_host(num_input);
for (int box = 0; box < u_num; box += 1) {
local[box] = 0xFFFFFFFF;
}
int accepted_boxes = 0;
for (int box = 0; box < num_input - 1; ++box) {
if (!CheckBitHost(local[box / kNmsBoxesPerThread], box)) {
continue;
}
accepted_boxes += 1;
int offset = box * u_num;
for (int b = 0; b < u_num; b += 1) {
local[b] &= ~sel_mask_host[offset + b];
}
if (accepted_boxes > max_output_size) break;
}
for (int box = 0; box < num_input; box += 1) {
sel_boxes_host[box] = CheckBitHost(local[box / kNmsBoxesPerThread], box);
}
hipMemcpyAsync(sel_boxes, sel_boxes_host.data(), num_input * sizeof(char), hipMemcpyHostToDevice, cuda_stream);
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
(void)hipcub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, static_cast<int *>(nullptr),
static_cast<char *>(nullptr), static_cast<int *>(nullptr),
static_cast<int *>(nullptr), num_input, cuda_stream);
(void)hipMalloc(&d_temp_storage, temp_storage_bytes);
(void)hipcub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, index_buff, sel_boxes, output_ptr, num_keep,
num_input, cuda_stream);
(void)hipFree(d_temp_storage);
int num_count = 0;
hipStreamSynchronize(reinterpret_cast<hipStream_t>(cuda_stream));
hipMemcpyAsync(&num_count, num_keep, sizeof(int), hipMemcpyDeviceToHost, cuda_stream);
num_count = max_output_size < num_count ? max_output_size : num_count;
return num_count;
}
template <typename T, typename M, typename S>
int DoNms(const int num_input, int *count, int *num_keep, T *scores, T *boxes_in, M iou_threshold_, M score_threshold_,
int *index_buff, S max_output_size_, int box_size, unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream) {
float iou_threshold = static_cast<float>(iou_threshold_);
float score_threshold = static_cast<float>(score_threshold_);
int max_output_size = static_cast<int>(max_output_size_);
hipMemset(count, 0, sizeof(int));
float *scores_float = nullptr;
size_t scores_float_temp_storage_bytes = num_input * sizeof(float);
(void)hipMalloc(&scores_float, scores_float_temp_storage_bytes);
int thread_num = 256 < num_input ? 256 : num_input;
hipDeviceProp_t prop;
(void)hipGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_num = ::min(static_cast<int>(((num_input - 1) / thread_num) + 1), max_blocks);
hipLaunchKernelGGL(( CastFloat), dim3(block_num), dim3(thread_num), 0, cuda_stream, num_input, scores, scores_float);
auto policy = thrust::hip::par.on(cuda_stream);
thrust::device_ptr<int> dev_ptr(index_buff);
thrust::sequence(policy, dev_ptr, dev_ptr + num_input);
hipStreamSynchronize(reinterpret_cast<hipStream_t>(cuda_stream));
size_t cub_sort_temp_storage_bytes = 0;
(void)hipcub::DeviceRadixSort::SortPairsDescending(nullptr, cub_sort_temp_storage_bytes,
static_cast<float *>(nullptr), // scores
static_cast<float *>(nullptr), // sorted scores
static_cast<int *>(nullptr), // input indices
static_cast<int *>(nullptr), // sorted indices
num_input, // num items
0, 8 * sizeof(float), // sort all bits
cuda_stream);
float *scores_sorted = nullptr;
size_t scores_sorted_temp_storage_bytes = num_input * sizeof(float);
(void)hipMalloc(&scores_sorted, scores_sorted_temp_storage_bytes);
int *index_sorted = nullptr;
size_t index_sorted_temp_storage_bytes = num_input * sizeof(int);
(void)hipMalloc(&index_sorted, index_sorted_temp_storage_bytes);
void *sort_temp_buff = nullptr;
(void)hipMalloc(&sort_temp_buff, cub_sort_temp_storage_bytes);
(void)hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_buff, cub_sort_temp_storage_bytes, scores_float,
scores_sorted, index_buff, index_sorted, num_input, 0,
8 * sizeof(float), cuda_stream);
hipStreamSynchronize(reinterpret_cast<hipStream_t>(cuda_stream));
(void)hipFree(sort_temp_buff);
GreaterThanCubOp<T> score_limit(score_threshold);
void *s_temp_storage = nullptr;
size_t s_temp_storage_bytes = 0;
(void)hipcub::DeviceSelect::If(nullptr, s_temp_storage_bytes, static_cast<float *>(nullptr),
static_cast<float *>(nullptr), static_cast<int *>(nullptr), num_input, score_limit,
cuda_stream);
(void)hipMalloc(&s_temp_storage, s_temp_storage_bytes);
(void)hipcub::DeviceSelect::If(s_temp_storage, s_temp_storage_bytes, scores_sorted, scores_float, count, num_input,
score_limit, cuda_stream);
(void)hipFree(s_temp_storage);
(void)hipFree(scores_float);
(void)hipFree(scores_sorted);
T *boxes_sort = nullptr;
size_t boxes_temp_storage_bytes = num_input * box_size * sizeof(T);
(void)hipMalloc(&boxes_sort, boxes_temp_storage_bytes);
hipLaunchKernelGGL(( IndexMultiSelect), dim3(block_num), dim3(thread_num), 0, cuda_stream, num_input, index_sorted, boxes_in, boxes_sort);
hipStreamSynchronize(reinterpret_cast<hipStream_t>(cuda_stream));
int num_count = 0;
hipMemcpyAsync(&num_count, count, sizeof(int), hipMemcpyDeviceToHost, cuda_stream);
const int num_to_keep = num_count;
if (num_to_keep <= 0) {
return 0;
}
int output_size = CalNms(num_to_keep, num_keep, iou_threshold, max_output_size, boxes_sort, index_sorted, box_size,
sel_mask, sel_boxes, output_ptr, device_id, reinterpret_cast<hipStream_t>(cuda_stream));
(void)hipFree(boxes_sort);
(void)hipFree(index_sorted);
return output_size;
}
template CUDA_LIB_EXPORT int DoNms<float, float, int>(const int num_input, int *count, int *num_keep, float *scores,
float *boxes_in, float iou_threshold_, float score_threshold_,
int *index_buff, int max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<float, float, int64_t>(const int num_input, int *count, int *num_keep, float *scores,
float *boxes_in, float iou_threshold_, float score_threshold_,
int *index_buff, int64_t max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<half, float, int>(const int num_input, int *count, int *num_keep, half *scores,
half *boxes_in, float iou_threshold_, float score_threshold_,
int *index_buff, int max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<half, float, int64_t>(const int num_input, int *count, int *num_keep, half *scores,
half *boxes_in, float iou_threshold_, float score_threshold_,
int *index_buff, int64_t max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<float, half, int>(const int num_input, int *count, int *num_keep, float *scores,
float *boxes_in, half iou_threshold_, half score_threshold_,
int *index_buff, int max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<float, half, int64_t>(const int num_input, int *count, int *num_keep, float *scores,
float *boxes_in, half iou_threshold_, half score_threshold_,
int *index_buff, int64_t max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<half, half, int>(const int num_input, int *count, int *num_keep, half *scores,
half *boxes_in, half iou_threshold_, half score_threshold_,
int *index_buff, int max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<half, half, int64_t>(const int num_input, int *count, int *num_keep, half *scores,
half *boxes_in, half iou_threshold_, half score_threshold_,
int *index_buff, int64_t max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, hipStream_t cuda_stream);
| 20e6e15b805ebe8fdd89e8ee4c13f72fc5f039b0.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/non_max_suppressionv3_impl.cuh"
#include <cub/cub.cuh>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <vector>
#include <limits>
#include <iostream>
#include <algorithm>
constexpr int kNmsBlockDim = 16;
constexpr int kNmsBlockDimMax = 128;
constexpr int kNmsBoxesPerThread = 8 * sizeof(int);
template <typename T>
struct GreaterThanCubOp {
float threshold_;
__host__ __device__ __forceinline__ GreaterThanCubOp(float threshold) : threshold_(threshold) {}
__host__ __device__ __forceinline__ bool operator()(const T &val) const {
return (static_cast<float>(val) > threshold_);
}
};
template <typename T>
__inline__ __device__ void Swap(T *lhs, T *rhs) {
T tmp = lhs[0];
lhs[0] = rhs[0];
rhs[0] = tmp;
}
template <typename T>
__inline__ __device__ T max(T x, T y) {
if (x > y) {
return x;
} else {
return y;
}
}
template <typename T>
__inline__ __device__ T min(T x, T y) {
if (x < y) {
return x;
} else {
return y;
}
}
template <typename T>
__inline__ __device__ void Flipped(T *box) {
if (box[0] > box[2]) Swap(&box[0], &box[2]);
if (box[1] > box[3]) Swap(&box[1], &box[3]);
}
template <typename T>
__inline__ __device__ bool IouDecision(T *box_A, T *box_B, T a_area, float IOU_threshold) {
T b_area = (box_B[2] - box_B[0]) * (box_B[3] - box_B[1]);
if (a_area == static_cast<T>(0.0) || b_area == static_cast<T>(0.0)) return false;
T x_1 = max(box_A[0], box_B[0]);
T y_1 = max(box_A[1], box_B[1]);
T x_2 = min(box_A[2], box_B[2]);
T y_2 = min(box_A[3], box_B[3]);
T width = max(x_2 - x_1, T(0)); // in case of no overlap
T height = max(y_2 - y_1, T(0));
T intersection = width * height;
float aa = static_cast<float>(intersection);
T bb = a_area + b_area - intersection;
float bt = static_cast<float>(bb) * IOU_threshold;
return aa > bt;
}
template <typename T>
__inline__ __device__ void SelectHelper(int i_selected, int i_original, T *original, T *selected) {
selected[i_selected * 4 + 0] = original[i_original * 4 + 0];
selected[i_selected * 4 + 1] = original[i_original * 4 + 1];
selected[i_selected * 4 + 2] = original[i_original * 4 + 2];
selected[i_selected * 4 + 3] = original[i_original * 4 + 3];
Flipped(selected + i_selected * 4);
}
template <typename T>
__global__ void IndexMultiSelect(const int num_elements, int *index_buff, T *original, T *selected) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
SelectHelper(idx, static_cast<int>(index_buff[idx]), original, selected);
}
}
template <typename T>
__global__ void CastFloat(const int num_elements, T *scores, float *scores_float) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
scores_float[idx] = static_cast<float>(scores[idx]);
}
}
__global__ void SetZeros(const int num_elements, unsigned int *target) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) {
target[idx] = 0;
}
}
template <typename T>
bool CheckBitHost(T bit_mask, int bit) {
return (bit_mask >> (bit % kNmsBoxesPerThread)) & 1;
}
template <typename T>
__launch_bounds__(kNmsBlockDim *kNmsBlockDim, 4) __global__
void NMSReduce(const int num, int u_num, float iou_threshold, T *boxes_sort, int box_size, unsigned int *sel_mask) {
__shared__ T shared_i_boxes[kNmsBlockDim * 4];
// Same thing with areas
__shared__ T shared_i_areas[kNmsBlockDim];
// The condition of the for loop is common to all threads in the block.
// This is necessary to be able to call __syncthreads() inside of the loop.
for (int i_block_offset = blockIdx.x * blockDim.x; i_block_offset < num; i_block_offset += blockDim.x * gridDim.x) {
const int i = i_block_offset + threadIdx.x;
if (i < num) {
// One 1D line load the boxes for x-dimension.
if (threadIdx.y == 0) {
shared_i_boxes[threadIdx.x * 4 + 0] = boxes_sort[i * 4 + 0];
shared_i_boxes[threadIdx.x * 4 + 1] = boxes_sort[i * 4 + 1];
shared_i_boxes[threadIdx.x * 4 + 2] = boxes_sort[i * 4 + 2];
shared_i_boxes[threadIdx.x * 4 + 3] = boxes_sort[i * 4 + 3];
T area = (boxes_sort[i * 4 + 2] - boxes_sort[i * 4 + 0]) * (boxes_sort[i * 4 + 3] - boxes_sort[i * 4 + 1]);
shared_i_areas[threadIdx.x] = area;
}
}
__syncthreads();
for (int j_thread_offset = kNmsBoxesPerThread * (blockIdx.y * blockDim.y + threadIdx.y); j_thread_offset < num;
j_thread_offset += kNmsBoxesPerThread * blockDim.y * gridDim.y) {
int above_threshold = 0;
// Make sure that threads are within valid domain.
bool valid = false;
// Loop over the next kNmsBoxesPerThread boxes and set corresponding bit
// if it is overlapping with current box
for (int ib = 0; ib < kNmsBoxesPerThread; ++ib) {
const int j = j_thread_offset + ib;
if (i >= j || i >= num || j >= num) continue;
valid = true;
T *j_box = boxes_sort + j * 4;
T *i_box = shared_i_boxes + threadIdx.x * 4;
if (IouDecision(i_box, j_box, shared_i_areas[threadIdx.x], iou_threshold)) {
// we have score[j] <= score[i]. j > i
above_threshold |= (1U << ib);
}
}
if (valid) {
sel_mask[i * u_num + j_thread_offset / kNmsBoxesPerThread] = above_threshold;
}
}
__syncthreads(); // making sure everyone is done reading shared memory.
}
}
template <typename T>
int CalNms(const int num_input, int *num_keep, float iou_threshold, int max_output_size, T *boxes_sort, int *index_buff,
int box_size, unsigned int *sel_mask, bool *sel_boxes, int *output_ptr, const uint32_t &device_id,
cudaStream_t cuda_stream) {
int u_num = (num_input + kNmsBoxesPerThread - 1) / kNmsBoxesPerThread;
const int max_nms_mask_size = num_input * u_num;
int thread_num = 256 < num_input ? 256 : num_input;
cudaDeviceProp prop;
(void)cudaGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_num = min(static_cast<int>(((num_input - 1) / thread_num) + 1), max_blocks);
SetZeros<<<block_num, thread_num, 0, cuda_stream>>>(max_nms_mask_size, sel_mask);
int num_blocks = (num_input + kNmsBlockDim - 1) / kNmsBlockDim;
num_blocks = std::max(std::min(num_blocks, kNmsBlockDimMax), 1);
dim3 blocks(num_blocks, num_blocks);
dim3 threads(kNmsBlockDim, kNmsBlockDim);
NMSReduce<<<blocks, threads, 0, cuda_stream>>>(num_input, u_num, iou_threshold, boxes_sort, box_size, sel_mask);
std::vector<unsigned int> sel_mask_host(num_input * u_num);
cudaMemcpyAsync(sel_mask_host.data(), sel_mask, num_input * u_num * sizeof(unsigned int), cudaMemcpyDeviceToHost,
cuda_stream);
std::vector<int> local(u_num);
std::vector<char> sel_boxes_host(num_input);
for (int box = 0; box < u_num; box += 1) {
local[box] = 0xFFFFFFFF;
}
int accepted_boxes = 0;
for (int box = 0; box < num_input - 1; ++box) {
if (!CheckBitHost(local[box / kNmsBoxesPerThread], box)) {
continue;
}
accepted_boxes += 1;
int offset = box * u_num;
for (int b = 0; b < u_num; b += 1) {
local[b] &= ~sel_mask_host[offset + b];
}
if (accepted_boxes > max_output_size) break;
}
for (int box = 0; box < num_input; box += 1) {
sel_boxes_host[box] = CheckBitHost(local[box / kNmsBoxesPerThread], box);
}
cudaMemcpyAsync(sel_boxes, sel_boxes_host.data(), num_input * sizeof(char), cudaMemcpyHostToDevice, cuda_stream);
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
(void)cub::DeviceSelect::Flagged(nullptr, temp_storage_bytes, static_cast<int *>(nullptr),
static_cast<char *>(nullptr), static_cast<int *>(nullptr),
static_cast<int *>(nullptr), num_input, cuda_stream);
(void)cudaMalloc(&d_temp_storage, temp_storage_bytes);
(void)cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, index_buff, sel_boxes, output_ptr, num_keep,
num_input, cuda_stream);
(void)cudaFree(d_temp_storage);
int num_count = 0;
cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(cuda_stream));
cudaMemcpyAsync(&num_count, num_keep, sizeof(int), cudaMemcpyDeviceToHost, cuda_stream);
num_count = max_output_size < num_count ? max_output_size : num_count;
return num_count;
}
template <typename T, typename M, typename S>
int DoNms(const int num_input, int *count, int *num_keep, T *scores, T *boxes_in, M iou_threshold_, M score_threshold_,
int *index_buff, S max_output_size_, int box_size, unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream) {
float iou_threshold = static_cast<float>(iou_threshold_);
float score_threshold = static_cast<float>(score_threshold_);
int max_output_size = static_cast<int>(max_output_size_);
cudaMemset(count, 0, sizeof(int));
float *scores_float = nullptr;
size_t scores_float_temp_storage_bytes = num_input * sizeof(float);
(void)cudaMalloc(&scores_float, scores_float_temp_storage_bytes);
int thread_num = 256 < num_input ? 256 : num_input;
cudaDeviceProp prop;
(void)cudaGetDeviceProperties(&prop, device_id);
int max_blocks = prop.multiProcessorCount;
int block_num = std::min(static_cast<int>(((num_input - 1) / thread_num) + 1), max_blocks);
CastFloat<<<block_num, thread_num, 0, cuda_stream>>>(num_input, scores, scores_float);
auto policy = thrust::cuda::par.on(cuda_stream);
thrust::device_ptr<int> dev_ptr(index_buff);
thrust::sequence(policy, dev_ptr, dev_ptr + num_input);
cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(cuda_stream));
size_t cub_sort_temp_storage_bytes = 0;
(void)cub::DeviceRadixSort::SortPairsDescending(nullptr, cub_sort_temp_storage_bytes,
static_cast<float *>(nullptr), // scores
static_cast<float *>(nullptr), // sorted scores
static_cast<int *>(nullptr), // input indices
static_cast<int *>(nullptr), // sorted indices
num_input, // num items
0, 8 * sizeof(float), // sort all bits
cuda_stream);
float *scores_sorted = nullptr;
size_t scores_sorted_temp_storage_bytes = num_input * sizeof(float);
(void)cudaMalloc(&scores_sorted, scores_sorted_temp_storage_bytes);
int *index_sorted = nullptr;
size_t index_sorted_temp_storage_bytes = num_input * sizeof(int);
(void)cudaMalloc(&index_sorted, index_sorted_temp_storage_bytes);
void *sort_temp_buff = nullptr;
(void)cudaMalloc(&sort_temp_buff, cub_sort_temp_storage_bytes);
(void)cub::DeviceRadixSort::SortPairsDescending(sort_temp_buff, cub_sort_temp_storage_bytes, scores_float,
scores_sorted, index_buff, index_sorted, num_input, 0,
8 * sizeof(float), cuda_stream);
cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(cuda_stream));
(void)cudaFree(sort_temp_buff);
GreaterThanCubOp<T> score_limit(score_threshold);
void *s_temp_storage = nullptr;
size_t s_temp_storage_bytes = 0;
(void)cub::DeviceSelect::If(nullptr, s_temp_storage_bytes, static_cast<float *>(nullptr),
static_cast<float *>(nullptr), static_cast<int *>(nullptr), num_input, score_limit,
cuda_stream);
(void)cudaMalloc(&s_temp_storage, s_temp_storage_bytes);
(void)cub::DeviceSelect::If(s_temp_storage, s_temp_storage_bytes, scores_sorted, scores_float, count, num_input,
score_limit, cuda_stream);
(void)cudaFree(s_temp_storage);
(void)cudaFree(scores_float);
(void)cudaFree(scores_sorted);
T *boxes_sort = nullptr;
size_t boxes_temp_storage_bytes = num_input * box_size * sizeof(T);
(void)cudaMalloc(&boxes_sort, boxes_temp_storage_bytes);
IndexMultiSelect<<<block_num, thread_num, 0, cuda_stream>>>(num_input, index_sorted, boxes_in, boxes_sort);
cudaStreamSynchronize(reinterpret_cast<cudaStream_t>(cuda_stream));
int num_count = 0;
cudaMemcpyAsync(&num_count, count, sizeof(int), cudaMemcpyDeviceToHost, cuda_stream);
const int num_to_keep = num_count;
if (num_to_keep <= 0) {
return 0;
}
int output_size = CalNms(num_to_keep, num_keep, iou_threshold, max_output_size, boxes_sort, index_sorted, box_size,
sel_mask, sel_boxes, output_ptr, device_id, reinterpret_cast<cudaStream_t>(cuda_stream));
(void)cudaFree(boxes_sort);
(void)cudaFree(index_sorted);
return output_size;
}
template CUDA_LIB_EXPORT int DoNms<float, float, int>(const int num_input, int *count, int *num_keep, float *scores,
float *boxes_in, float iou_threshold_, float score_threshold_,
int *index_buff, int max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<float, float, int64_t>(const int num_input, int *count, int *num_keep, float *scores,
float *boxes_in, float iou_threshold_, float score_threshold_,
int *index_buff, int64_t max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<half, float, int>(const int num_input, int *count, int *num_keep, half *scores,
half *boxes_in, float iou_threshold_, float score_threshold_,
int *index_buff, int max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<half, float, int64_t>(const int num_input, int *count, int *num_keep, half *scores,
half *boxes_in, float iou_threshold_, float score_threshold_,
int *index_buff, int64_t max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<float, half, int>(const int num_input, int *count, int *num_keep, float *scores,
float *boxes_in, half iou_threshold_, half score_threshold_,
int *index_buff, int max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<float, half, int64_t>(const int num_input, int *count, int *num_keep, float *scores,
float *boxes_in, half iou_threshold_, half score_threshold_,
int *index_buff, int64_t max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<half, half, int>(const int num_input, int *count, int *num_keep, half *scores,
half *boxes_in, half iou_threshold_, half score_threshold_,
int *index_buff, int max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT int DoNms<half, half, int64_t>(const int num_input, int *count, int *num_keep, half *scores,
half *boxes_in, half iou_threshold_, half score_threshold_,
int *index_buff, int64_t max_output_size_, int box_size,
unsigned int *sel_mask, bool *sel_boxes, int *output_ptr,
const uint32_t &device_id, cudaStream_t cuda_stream);
|
75d986ec5210460712d6d1635f4a3fb8500aacbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
__global__ void ascii(char *a, int *b){
int tid = threadIdx.x;
b[tid] = int(a[tid]);
printf("%d\t", b[tid]);
printf("\n");
}
__global__ void reverse(int *b, int *c){
int tid = threadIdx.x;
c[tid]=0;
while(b[tid] != 0)
{
c[tid] = c[tid]*10 + b[tid]%10;
b[tid] /= 10;
}
printf("%d\t", c[tid]);
printf("\n");
}
int main(void){
int n, i, size, size1, b[100], *d_b, *d_c, c[100];
char a[100], *d_a;
printf("Enter the string to be toggled:\n");
scanf("%s",a);
n=strlen(a);
//for(i=0; i<n; i++)
//{
// b[i] = int(a[i]);
//}
size = sizeof(char);
size1 = sizeof(int);
hipMalloc((void **)&d_a,size*n);
hipMalloc((void **)&d_b,size1*n);
hipMalloc((void **)&d_c,size1*n);
hipMemcpy(d_a,a,size*n,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ascii) , dim3(1),dim3(n), 0, 0, d_a, d_b);
hipMemcpy(b,d_b,size1*n,hipMemcpyDeviceToHost);
for(i=0; i<n; i++)
{printf("%d\t",b[i]);}
printf("\n");
hipMemcpy(d_b,b,size1*n,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( reverse) , dim3(1),dim3(n), 0, 0, d_b, d_c);
hipMemcpy(c,d_c,size1*n,hipMemcpyDeviceToHost);
for(i=0; i<n; i++)
{
if(c[i]>26 && c[i]<126)
{printf("%c\t",c[i]);}
else
{printf("%d\t",c[i]);}
}
printf("\n");
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
} | 75d986ec5210460712d6d1635f4a3fb8500aacbc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
__global__ void ascii(char *a, int *b){
int tid = threadIdx.x;
b[tid] = int(a[tid]);
printf("%d\t", b[tid]);
printf("\n");
}
__global__ void reverse(int *b, int *c){
int tid = threadIdx.x;
c[tid]=0;
while(b[tid] != 0)
{
c[tid] = c[tid]*10 + b[tid]%10;
b[tid] /= 10;
}
printf("%d\t", c[tid]);
printf("\n");
}
int main(void){
int n, i, size, size1, b[100], *d_b, *d_c, c[100];
char a[100], *d_a;
printf("Enter the string to be toggled:\n");
scanf("%s",a);
n=strlen(a);
//for(i=0; i<n; i++)
//{
// b[i] = int(a[i]);
//}
size = sizeof(char);
size1 = sizeof(int);
cudaMalloc((void **)&d_a,size*n);
cudaMalloc((void **)&d_b,size1*n);
cudaMalloc((void **)&d_c,size1*n);
cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice);
ascii <<<1,n>>> (d_a, d_b);
cudaMemcpy(b,d_b,size1*n,cudaMemcpyDeviceToHost);
for(i=0; i<n; i++)
{printf("%d\t",b[i]);}
printf("\n");
cudaMemcpy(d_b,b,size1*n,cudaMemcpyHostToDevice);
reverse <<<1,n>>> (d_b, d_c);
cudaMemcpy(c,d_c,size1*n,cudaMemcpyDeviceToHost);
for(i=0; i<n; i++)
{
if(c[i]>26 && c[i]<126)
{printf("%c\t",c[i]);}
else
{printf("%d\t",c[i]);}
}
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
3261d83cf61122cdd280940e4c422ec93c0f11ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__constant__ float *c_Kernel;
__global__ void convolutionRowsKernel_up_smp( float *d_Dst, float *d_Src, int imageW, int n_imageW, int imageH, int filter_Rad, int Halo_steps )
{
extern __shared__ float s_Data[];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - Halo_steps) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
const int baseX1 = blockIdx.x * ROWS_RESULT_STEPS * 2 * ROWS_BLOCKDIM_X + 2 * threadIdx.x;
if (baseY < imageH)
{
d_Src += baseY * imageW + baseX;
d_Dst += baseY * n_imageW + baseX1;
//Load left halo
//#pragma unroll
for (int i = 0; i < Halo_steps; ++i)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo and main data
//#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS + Halo_steps; ++i)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
//#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS; ++i)
{
int pos_x = (baseX1 + 2 * (i - Halo_steps) * ROWS_BLOCKDIM_X);
if (pos_x < n_imageW)
{
float sum_1 = 0.0f, sum_2 = 0.0f;
//#pragma unroll
for (int l = -(filter_Rad / 2); l <= filter_Rad / 2; ++l)
{
int t = 2 * l;
float temp = s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X + l];
sum_1 += c_Kernel[filter_Rad + t] * temp *2.0f;
sum_2 += c_Kernel[filter_Rad + t - 1] * temp *2.0f;
}
sum_2 += c_Kernel[2 * filter_Rad] * 2.0f * s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X + filter_Rad / 2 + 1];
d_Dst[2 * (i - Halo_steps)* ROWS_BLOCKDIM_X] = sum_1;
if (pos_x + 1 < n_imageW) d_Dst[2 * (i - Halo_steps) * ROWS_BLOCKDIM_X + 1] = sum_2;
}
}
}
} | 3261d83cf61122cdd280940e4c422ec93c0f11ce.cu | #include "includes.h"
__constant__ float *c_Kernel;
__global__ void convolutionRowsKernel_up_smp( float *d_Dst, float *d_Src, int imageW, int n_imageW, int imageH, int filter_Rad, int Halo_steps )
{
extern __shared__ float s_Data[];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - Halo_steps) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
const int baseX1 = blockIdx.x * ROWS_RESULT_STEPS * 2 * ROWS_BLOCKDIM_X + 2 * threadIdx.x;
if (baseY < imageH)
{
d_Src += baseY * imageW + baseX;
d_Dst += baseY * n_imageW + baseX1;
//Load left halo
//#pragma unroll
for (int i = 0; i < Halo_steps; ++i)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo and main data
//#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS + Halo_steps; ++i)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
//#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS; ++i)
{
int pos_x = (baseX1 + 2 * (i - Halo_steps) * ROWS_BLOCKDIM_X);
if (pos_x < n_imageW)
{
float sum_1 = 0.0f, sum_2 = 0.0f;
//#pragma unroll
for (int l = -(filter_Rad / 2); l <= filter_Rad / 2; ++l)
{
int t = 2 * l;
float temp = s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X + l];
sum_1 += c_Kernel[filter_Rad + t] * temp *2.0f;
sum_2 += c_Kernel[filter_Rad + t - 1] * temp *2.0f;
}
sum_2 += c_Kernel[2 * filter_Rad] * 2.0f * s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X + filter_Rad / 2 + 1];
d_Dst[2 * (i - Halo_steps)* ROWS_BLOCKDIM_X] = sum_1;
if (pos_x + 1 < n_imageW) d_Dst[2 * (i - Halo_steps) * ROWS_BLOCKDIM_X + 1] = sum_2;
}
}
}
} |
c01dfe23201fc3eb556edd8604418e0488f29903.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <rocblas.h>
#define CHECK_CUBLAS_ERROR(err) \
if (err != HIPBLAS_STATUS_SUCCESS) \
{\
printf("[%s:%d] CUBLAS error %d\n", __FILE__, __LINE__, err);\
exit(EXIT_FAILURE);\
}
void mat_mul_cublas(float *A, float *B, float *C,
int ROW_A, int COL_A, int COL_B) {
int N = ROW_A;
if(ROW_A != COL_A || ROW_A != COL_B)
{
printf("Support Square Matrix Only!\n");
exit(EXIT_FAILURE);
}
/******************** TODO *********************/
}
| c01dfe23201fc3eb556edd8604418e0488f29903.cu | #include <stdio.h>
#include <stdlib.h>
#include <cublas_v2.h>
#define CHECK_CUBLAS_ERROR(err) \
if (err != CUBLAS_STATUS_SUCCESS) \
{\
printf("[%s:%d] CUBLAS error %d\n", __FILE__, __LINE__, err);\
exit(EXIT_FAILURE);\
}
void mat_mul_cublas(float *A, float *B, float *C,
int ROW_A, int COL_A, int COL_B) {
int N = ROW_A;
if(ROW_A != COL_A || ROW_A != COL_B)
{
printf("Support Square Matrix Only!\n");
exit(EXIT_FAILURE);
}
/******************** TODO *********************/
}
|
c2d9631bd5bed431b502bc6db7f5ba2f404d77a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ROWWISE_WEIGHTS_NORMS_LAYER_INSTANTIATE
#include "lbann/layers/misc/rowwise_weights_norms_impl.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/**
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (height/bdimx) x (width/bdimy) x 1
*/
template <typename T>
__global__ void row_sqsums_kernel(
size_t height,
size_t width,
const T* __restrict__ mat,
size_t mat_ldim,
T* __restrict__ row_sqsums) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
// Accumulate sum of squares for each matrix row
for (size_t row=gidx; row<height; row+=nthreadsx) {
T sqsum{0};
for (size_t col=gidy; col<width; col+=nthreadsy) {
const auto& x = mat[row+col*mat_ldim];
sqsum += x*x;
}
gpu_lib::atomic_add(&row_sqsums[row], sqsum);
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::row_sqsums(
const El::Matrix<TensorDataType, Device>& mat,
El::Matrix<TensorDataType, Device>& row_sqsums) {
// Launch kernel
El::Zero(row_sqsums);
if (!mat.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(row_sqsums),
gpu::get_sync_info(mat));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
grid_dims.x = (mat.Height() + block_dims.x - 1) / block_dims.x;
grid_dims.y = (mat.Width()/64 + block_dims.y - 1) / block_dims.y;
grid_dims.y = El::Min(El::Max(grid_dims.y, 1), 65536);
hydrogen::gpu::LaunchKernel(
row_sqsums_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
static_cast<size_t>(mat.Height()),
static_cast<size_t>(mat.Width()),
mat.LockedBuffer(),
static_cast<size_t>(mat.LDim()),
row_sqsums.Buffer());
}
}
namespace {
/**
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (size/bdim) x 1 x 1
*/
template <typename T>
__global__ void sqrt_kernel(
size_t size,
T* __restrict__ buf) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i=gid; i<size; i+=nthreads) {
auto& x = buf[i];
x = gpu_lib::sqrt(x);
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::sqrt(
El::Matrix<TensorDataType, Device>& mat) {
// Check that matrix is valid
if (!mat.Contiguous()) {
LBANN_ERROR("matrix is not contiguous");
}
// Launch kernel
if (!mat.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (mat.Height()*mat.Width() + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
sqrt_kernel<TensorDataType>,
grid_dims, block_dims, 0, gpu::get_sync_info(mat),
static_cast<size_t>(mat.Height()*mat.Width()),
mat.Buffer());
}
}
namespace {
/**
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (size/bdim) x 1 x 1
*/
template <typename T>
__global__ void divide_kernel(
size_t size,
T* __restrict__ numer,
const T* __restrict__ denom) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i=gid; i<size; i+=nthreads) {
auto& x = numer[i];
const auto& y = denom[i];
const auto& z = x / y;
x = gpu_lib::isfinite(z) ? z : T{0};
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::divide(
El::Matrix<TensorDataType, Device>& numer,
const El::Matrix<TensorDataType, Device>& denom) {
// Check that matrices are valid
if (numer.Height() != denom.Height()
|| numer.Width() != denom.Width()) {
LBANN_ERROR("numerator and denominator do not have same dims");
}
if (!numer.Contiguous() || !denom.Contiguous()) {
LBANN_ERROR("matrices are not contiguous");
}
// Launch kernel
if (!numer.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(numer),
gpu::get_sync_info(denom));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (numer.Height()*numer.Width() + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
divide_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
numer.Height()*numer.Width(),
numer.Buffer(),
denom.LockedBuffer());
}
}
namespace {
template <typename T>
__global__ void row_axpy_kernel(
size_t height,
size_t width,
T alpha,
const T* __restrict__ a_vec,
const T* __restrict__ x_mat,
size_t x_ldim,
T beta,
T* __restrict__ y_mat,
size_t y_ldim) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
// Accumulate sum of squares for each matrix row
for (size_t row=gidx; row<height; row+=nthreadsx) {
const auto& alpha_a = alpha * a_vec[row];
for (size_t col=gidy; col<width; col+=nthreadsy) {
const auto& x = x_mat[row+col*x_ldim];
auto& y = y_mat[row+col*y_ldim];
y = alpha_a * x + beta * y;
}
}
}
} // namespace <anon>
/**
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (height/bdimx) x (width/bdimy) x 1
*/
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::row_axpy(
TensorDataType alpha,
const El::Matrix<TensorDataType, Device>& a_vec,
const El::Matrix<TensorDataType, Device>& x_mat,
TensorDataType beta,
El::Matrix<TensorDataType, Device>& y_mat) {
// Check that matrices are valid
if (x_mat.Height() != y_mat.Height()
|| x_mat.Width() != y_mat.Width()
|| a_vec.Height() != y_mat.Height()
|| a_vec.Width() != 1) {
LBANN_ERROR("matrix dims do not match");
}
// Launch kernel
if (!y_mat.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(y_mat),
gpu::get_sync_info(a_vec),
gpu::get_sync_info(x_mat));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.x = 1;
grid_dims.x = (y_mat.Height() + block_dims.x - 1) / block_dims.x;
grid_dims.y = (y_mat.Width() + block_dims.y - 1) / block_dims.y;
grid_dims.y = El::Min(grid_dims.y, 65536);
hydrogen::gpu::LaunchKernel(
row_axpy_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
static_cast<size_t>(y_mat.Height()),
static_cast<size_t>(y_mat.Width()),
alpha,
a_vec.LockedBuffer(),
x_mat.LockedBuffer(),
static_cast<size_t>(x_mat.LDim()),
beta,
y_mat.Buffer(),
static_cast<size_t>(y_mat.LDim()));
}
}
#define PROTO(T) \
template class rowwise_weights_norms_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class rowwise_weights_norms_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| c2d9631bd5bed431b502bc6db7f5ba2f404d77a3.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ROWWISE_WEIGHTS_NORMS_LAYER_INSTANTIATE
#include "lbann/layers/misc/rowwise_weights_norms_impl.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/**
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (height/bdimx) x (width/bdimy) x 1
*/
template <typename T>
__global__ void row_sqsums_kernel(
size_t height,
size_t width,
const T* __restrict__ mat,
size_t mat_ldim,
T* __restrict__ row_sqsums) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
// Accumulate sum of squares for each matrix row
for (size_t row=gidx; row<height; row+=nthreadsx) {
T sqsum{0};
for (size_t col=gidy; col<width; col+=nthreadsy) {
const auto& x = mat[row+col*mat_ldim];
sqsum += x*x;
}
gpu_lib::atomic_add(&row_sqsums[row], sqsum);
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::row_sqsums(
const El::Matrix<TensorDataType, Device>& mat,
El::Matrix<TensorDataType, Device>& row_sqsums) {
// Launch kernel
El::Zero(row_sqsums);
if (!mat.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(row_sqsums),
gpu::get_sync_info(mat));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.y = 1;
grid_dims.x = (mat.Height() + block_dims.x - 1) / block_dims.x;
grid_dims.y = (mat.Width()/64 + block_dims.y - 1) / block_dims.y;
grid_dims.y = El::Min(El::Max(grid_dims.y, 1), 65536);
hydrogen::gpu::LaunchKernel(
row_sqsums_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
static_cast<size_t>(mat.Height()),
static_cast<size_t>(mat.Width()),
mat.LockedBuffer(),
static_cast<size_t>(mat.LDim()),
row_sqsums.Buffer());
}
}
namespace {
/**
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (size/bdim) x 1 x 1
*/
template <typename T>
__global__ void sqrt_kernel(
size_t size,
T* __restrict__ buf) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i=gid; i<size; i+=nthreads) {
auto& x = buf[i];
x = gpu_lib::sqrt(x);
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::sqrt(
El::Matrix<TensorDataType, Device>& mat) {
// Check that matrix is valid
if (!mat.Contiguous()) {
LBANN_ERROR("matrix is not contiguous");
}
// Launch kernel
if (!mat.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (mat.Height()*mat.Width() + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
sqrt_kernel<TensorDataType>,
grid_dims, block_dims, 0, gpu::get_sync_info(mat),
static_cast<size_t>(mat.Height()*mat.Width()),
mat.Buffer());
}
}
namespace {
/**
* Block dimensions: bdim x 1 x 1
*
* Grid dimensions: (size/bdim) x 1 x 1
*/
template <typename T>
__global__ void divide_kernel(
size_t size,
T* __restrict__ numer,
const T* __restrict__ denom) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t i=gid; i<size; i+=nthreads) {
auto& x = numer[i];
const auto& y = denom[i];
const auto& z = x / y;
x = gpu_lib::isfinite(z) ? z : T{0};
}
}
} // namespace <anon>
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::divide(
El::Matrix<TensorDataType, Device>& numer,
const El::Matrix<TensorDataType, Device>& denom) {
// Check that matrices are valid
if (numer.Height() != denom.Height()
|| numer.Width() != denom.Width()) {
LBANN_ERROR("numerator and denominator do not have same dims");
}
if (!numer.Contiguous() || !denom.Contiguous()) {
LBANN_ERROR("matrices are not contiguous");
}
// Launch kernel
if (!numer.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(numer),
gpu::get_sync_info(denom));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (numer.Height()*numer.Width() + block_size - 1) / block_size;
hydrogen::gpu::LaunchKernel(
divide_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
numer.Height()*numer.Width(),
numer.Buffer(),
denom.LockedBuffer());
}
}
namespace {
template <typename T>
__global__ void row_axpy_kernel(
size_t height,
size_t width,
T alpha,
const T* __restrict__ a_vec,
const T* __restrict__ x_mat,
size_t x_ldim,
T beta,
T* __restrict__ y_mat,
size_t y_ldim) {
// Indices
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
// Accumulate sum of squares for each matrix row
for (size_t row=gidx; row<height; row+=nthreadsx) {
const auto& alpha_a = alpha * a_vec[row];
for (size_t col=gidy; col<width; col+=nthreadsy) {
const auto& x = x_mat[row+col*x_ldim];
auto& y = y_mat[row+col*y_ldim];
y = alpha_a * x + beta * y;
}
}
}
} // namespace <anon>
/**
* Block dimensions: bdimx x bdimy x 1
*
* Grid dimensions: (height/bdimx) x (width/bdimy) x 1
*/
template <typename TensorDataType, data_layout Layout, El::Device Device>
void rowwise_weights_norms_layer<TensorDataType, Layout, Device>::row_axpy(
TensorDataType alpha,
const El::Matrix<TensorDataType, Device>& a_vec,
const El::Matrix<TensorDataType, Device>& x_mat,
TensorDataType beta,
El::Matrix<TensorDataType, Device>& y_mat) {
// Check that matrices are valid
if (x_mat.Height() != y_mat.Height()
|| x_mat.Width() != y_mat.Width()
|| a_vec.Height() != y_mat.Height()
|| a_vec.Width() != 1) {
LBANN_ERROR("matrix dims do not match");
}
// Launch kernel
if (!y_mat.IsEmpty()) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(y_mat),
gpu::get_sync_info(a_vec),
gpu::get_sync_info(x_mat));
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
block_dims.x = 1;
grid_dims.x = (y_mat.Height() + block_dims.x - 1) / block_dims.x;
grid_dims.y = (y_mat.Width() + block_dims.y - 1) / block_dims.y;
grid_dims.y = El::Min(grid_dims.y, 65536);
hydrogen::gpu::LaunchKernel(
row_axpy_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
static_cast<size_t>(y_mat.Height()),
static_cast<size_t>(y_mat.Width()),
alpha,
a_vec.LockedBuffer(),
x_mat.LockedBuffer(),
static_cast<size_t>(x_mat.LDim()),
beta,
y_mat.Buffer(),
static_cast<size_t>(y_mat.LDim()));
}
}
#define PROTO(T) \
template class rowwise_weights_norms_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class rowwise_weights_norms_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
a948a044c3ff04226ebc1daa387eabf6db722ffe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <chrono>
#define CHECK_CUDA_ERR(cudaerr) \
{ \
auto err = cudaerr; \
if (err != hipSuccess) { \
printf("kernel launch failed with error \"%s\".\n",\
hipGetErrorString(err)); \
exit(1); \
} \
}
__device__ void color_pixel(
char *colors, char *pixels,
double c_re, double c_im,
int global_index, int max_iter) {
double i = 0, j = 0;
int iteration = 0;
while ( i*i + j*j < 4 && iteration < max_iter) {
double i_new = i*i - j*j + c_re;
j = 2*i*j + c_im;
i = i_new;
iteration++;
}
int color_index = global_index * 3;
if (iteration < max_iter) {
pixels[color_index] = colors[3 + iteration * 3];
pixels[color_index + 1] = colors[3 + iteration * 3 + 1];
pixels[color_index + 2] = colors[3 + iteration * 3 + 2];
} else {
pixels[color_index] = colors[0];
pixels[color_index + 1] = colors[1];
pixels[color_index + 2] = colors[2];
}
}
__global__ void mandelbrot(char *colors, char* pixels, int height, int width, int max_iter) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int x = global_index % width;
int y = global_index / width;
double c_re = ((double)x - (double)width / 2.0) * 4.0 / width;
double c_im = ((double)y - (double)height / 2.0) * 4.0 / width;
if (global_index < height * width)
color_pixel(colors, pixels, c_re, c_im, global_index, max_iter);
}
void fill_colors(char *colors, int max_iter) {
colors[0] = 200;
colors[1] = 200;
colors[2] = 200;
int shade = 1, speed1 = 0, speed2 = 10, speed3 = 0, j = 1;
for (int i = 0; i < max_iter; i+=3) {
if (j % 50 == 0)
shade <<= 1;
int red = colors[0] + i * speed1 - j;
int green = colors[1] + i * speed2;
int blue = colors[2] + i * speed3 - j;
if (red < 0) red = 0;
if (green < 0) green = 0;
if (blue < 0) blue = 0;
colors[3 + i] = (red) % (256 / shade);
colors[3 + i + 1] = (green) % (256 / shade);
colors[3 + i + 2] = (blue) % (256 / shade);
j += 1;
}
}
int main(int argc, char **argv) {
int write_to_file_flag = std::atoi(argv[1]);
int x_pixels = 19968, y_pixels = 13730, max_iter = 150;
int n_pixels = x_pixels * y_pixels;
char *host_pixels, *device_pixels, *host_colors, *device_colors;
size_t pixel_size = sizeof(char) * n_pixels * 3; // * 3 for RGB
// This allocates pinned memory to speed-up memory transfers
CHECK_CUDA_ERR(hipHostMalloc(&host_pixels, pixel_size));
CHECK_CUDA_ERR(hipMalloc(&device_pixels, pixel_size));
size_t color_size = sizeof(char) * (max_iter * 3 + 3);
CHECK_CUDA_ERR(hipHostMalloc(&host_colors, color_size));
CHECK_CUDA_ERR(hipMalloc(&device_colors, color_size));
fill_colors(host_colors, max_iter);
CHECK_CUDA_ERR(hipMemcpy(device_colors, host_colors, color_size, hipMemcpyHostToDevice));
CHECK_CUDA_ERR(hipDeviceSynchronize());
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( mandelbrot), dim3((32 + n_pixels) / 32), dim3(32), 0, 0,
/*colors=*/device_colors,
/*pixels=*/device_pixels,
/*height=*/y_pixels,
/*width=*/x_pixels,
/*max_iter*/max_iter);
CHECK_CUDA_ERR(hipDeviceSynchronize());
auto end = std::chrono::steady_clock::now();
std::cout << "RUN "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
start = std::chrono::steady_clock::now();
CHECK_CUDA_ERR(hipMemcpy(host_pixels, device_pixels, pixel_size, hipMemcpyDeviceToHost));
CHECK_CUDA_ERR(hipDeviceSynchronize());
end = std::chrono::steady_clock::now();
std::cout << "READ "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
if (write_to_file_flag) {
long long current_time = time(nullptr);
std::ofstream image (std::to_string(current_time).append("-gpu.bmp"), std::ofstream::binary);
image <<
(uint8_t)0x42 <<
(uint8_t)0x4D <<
(uint8_t)0x7C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x1A <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x0C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 << // Image Width
(uint8_t)0x4E << // Image Width
(uint8_t)0xA2 << // Image Height
(uint8_t)0x45 << // Image height
(uint8_t)0x01 <<
(uint8_t)0x00 <<
(uint8_t)0x18 <<
(uint8_t)0x00;
for (int i = 0; i < n_pixels * 3; i++)
image << host_pixels[i];
image << 0x00 << 0x00;
}
CHECK_CUDA_ERR(hipHostFree(host_pixels));
CHECK_CUDA_ERR(hipHostFree(host_colors));
CHECK_CUDA_ERR(hipFree(device_pixels));
CHECK_CUDA_ERR(hipFree(device_colors));
return 0;
}
| a948a044c3ff04226ebc1daa387eabf6db722ffe.cu | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <fstream>
#include <iostream>
#include <chrono>
#define CHECK_CUDA_ERR(cudaerr) \
{ \
auto err = cudaerr; \
if (err != cudaSuccess) { \
printf("kernel launch failed with error \"%s\".\n",\
cudaGetErrorString(err)); \
exit(1); \
} \
}
__device__ void color_pixel(
char *colors, char *pixels,
double c_re, double c_im,
int global_index, int max_iter) {
double i = 0, j = 0;
int iteration = 0;
while ( i*i + j*j < 4 && iteration < max_iter) {
double i_new = i*i - j*j + c_re;
j = 2*i*j + c_im;
i = i_new;
iteration++;
}
int color_index = global_index * 3;
if (iteration < max_iter) {
pixels[color_index] = colors[3 + iteration * 3];
pixels[color_index + 1] = colors[3 + iteration * 3 + 1];
pixels[color_index + 2] = colors[3 + iteration * 3 + 2];
} else {
pixels[color_index] = colors[0];
pixels[color_index + 1] = colors[1];
pixels[color_index + 2] = colors[2];
}
}
__global__ void mandelbrot(char *colors, char* pixels, int height, int width, int max_iter) {
int global_index = threadIdx.x + blockDim.x * blockIdx.x;
int x = global_index % width;
int y = global_index / width;
double c_re = ((double)x - (double)width / 2.0) * 4.0 / width;
double c_im = ((double)y - (double)height / 2.0) * 4.0 / width;
if (global_index < height * width)
color_pixel(colors, pixels, c_re, c_im, global_index, max_iter);
}
void fill_colors(char *colors, int max_iter) {
colors[0] = 200;
colors[1] = 200;
colors[2] = 200;
int shade = 1, speed1 = 0, speed2 = 10, speed3 = 0, j = 1;
for (int i = 0; i < max_iter; i+=3) {
if (j % 50 == 0)
shade <<= 1;
int red = colors[0] + i * speed1 - j;
int green = colors[1] + i * speed2;
int blue = colors[2] + i * speed3 - j;
if (red < 0) red = 0;
if (green < 0) green = 0;
if (blue < 0) blue = 0;
colors[3 + i] = (red) % (256 / shade);
colors[3 + i + 1] = (green) % (256 / shade);
colors[3 + i + 2] = (blue) % (256 / shade);
j += 1;
}
}
int main(int argc, char **argv) {
int write_to_file_flag = std::atoi(argv[1]);
int x_pixels = 19968, y_pixels = 13730, max_iter = 150;
int n_pixels = x_pixels * y_pixels;
char *host_pixels, *device_pixels, *host_colors, *device_colors;
size_t pixel_size = sizeof(char) * n_pixels * 3; // * 3 for RGB
// This allocates pinned memory to speed-up memory transfers
CHECK_CUDA_ERR(cudaMallocHost(&host_pixels, pixel_size));
CHECK_CUDA_ERR(cudaMalloc(&device_pixels, pixel_size));
size_t color_size = sizeof(char) * (max_iter * 3 + 3);
CHECK_CUDA_ERR(cudaMallocHost(&host_colors, color_size));
CHECK_CUDA_ERR(cudaMalloc(&device_colors, color_size));
fill_colors(host_colors, max_iter);
CHECK_CUDA_ERR(cudaMemcpy(device_colors, host_colors, color_size, cudaMemcpyHostToDevice));
CHECK_CUDA_ERR(cudaDeviceSynchronize());
auto start = std::chrono::steady_clock::now();
mandelbrot<<<(32 + n_pixels) / 32, 32>>>(
/*colors=*/device_colors,
/*pixels=*/device_pixels,
/*height=*/y_pixels,
/*width=*/x_pixels,
/*max_iter*/max_iter);
CHECK_CUDA_ERR(cudaDeviceSynchronize());
auto end = std::chrono::steady_clock::now();
std::cout << "RUN "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
start = std::chrono::steady_clock::now();
CHECK_CUDA_ERR(cudaMemcpy(host_pixels, device_pixels, pixel_size, cudaMemcpyDeviceToHost));
CHECK_CUDA_ERR(cudaDeviceSynchronize());
end = std::chrono::steady_clock::now();
std::cout << "READ "
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
<< std::endl;
if (write_to_file_flag) {
long long current_time = time(nullptr);
std::ofstream image (std::to_string(current_time).append("-gpu.bmp"), std::ofstream::binary);
image <<
(uint8_t)0x42 <<
(uint8_t)0x4D <<
(uint8_t)0x7C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x1A <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x0C <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 <<
(uint8_t)0x00 << // Image Width
(uint8_t)0x4E << // Image Width
(uint8_t)0xA2 << // Image Height
(uint8_t)0x45 << // Image height
(uint8_t)0x01 <<
(uint8_t)0x00 <<
(uint8_t)0x18 <<
(uint8_t)0x00;
for (int i = 0; i < n_pixels * 3; i++)
image << host_pixels[i];
image << 0x00 << 0x00;
}
CHECK_CUDA_ERR(cudaFreeHost(host_pixels));
CHECK_CUDA_ERR(cudaFreeHost(host_colors));
CHECK_CUDA_ERR(cudaFree(device_pixels));
CHECK_CUDA_ERR(cudaFree(device_colors));
return 0;
}
|
b6e71fbf3371bed98442794b1a6fa32e90da8c8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereolite.h"
// Subtract I0 and I1 for error
__global__
void LitePlaneSweepGetErrorKernel(float* i0, float* i1, int width, int height, int stride, float *error)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
error[pos] = fabsf(i0[pos] - i1[pos]);
}
// Remove disparity of pixels whose error is very close to the mean error
__global__
void LitePlaneSweepMeanCleanup(float* error, float* meanError, float standardDev, float* disparity, float2* finalWarp,
int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
if ((meanError[pos] - error[pos]) > standardDev) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
finalWarp[pos] = make_float2(0.0f, 0.0f);
}
/*if (abs(meanError[pos] - error[pos]) < 1.0f ) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
finalWarp[pos] = make_float2(0.0f, 0.0f);
}*/
}
__global__
void LitePlaneSweepMeanCleanup(float* error, float* meanError, float standardDev, float* disparity,
int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
if ((meanError[pos] - error[pos]) > standardDev) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
}
}
// Window-based SAD
__global__
void LitePlaneSweepCorrelationKernel(float* imError, float* disparity, float sweepDistance, int maxDisparity,
int windowSize, int width, int height, int stride, float *error, float *meanError)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float currError = 0.0f;
int windowCount = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
currError += imError[col + stride * row];
windowCount++;
}
}
}
currError = currError / windowCount;
meanError[pos] = ((float)sweepDistance * meanError[pos] + currError) / ((float)sweepDistance + 1.0f);
if (currError < error[pos]) {
/*if (sweepDistance == maxDisparity) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
}
else {*/
error[pos] = currError;
disparity[pos] = (float)sweepDistance;
//}
}
}
void StereoLite::PlaneSweepCorrelation(float *i0, float *i1, float* disparity, float sweepDistance, int windowSize,
int w, int h, int s, float *error)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
//LitePlaneSweepCorrelationKernel << <blocks, threads >> > (i0, i1, disparity, sweepDistance, windowSize, w, h, s, error);
LitePlaneSweepGetErrorKernel << < blocks, threads >> > (i0, i1, w, h, s, ps_errorHolder);
LitePlaneSweepCorrelationKernel << <blocks, threads >> > (ps_errorHolder, disparity, sweepDistance,
planeSweepMaxDisparity, windowSize, w, h, s, error, ps_meanError);
LitePlaneSweepMeanCleanup << < blocks, threads >> > (error, ps_meanError, planeSweepStandardDev, disparity,
w, h, s);
}
// Window-based SAD with warping vector fetch for left-right consistency calculation
__global__
void LitePlaneSweepCorrelationGetWarpKernel(float* imError, float* disparity, float sweepDistance, float sweepStride, int maxDisparity,
int windowSize, float2* currentWarp, float2 * finalWarp, float2* tv,
int width, int height, int stride, float *error, float *meanError)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float2 currWarp;
currWarp.x = currentWarp[pos].x + sweepStride * tv[pos].x;
currWarp.y = currentWarp[pos].y + sweepStride * tv[pos].y;
currentWarp[pos] = currWarp;
float currError = 0.0f;
int windowCount = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
currError += imError[col + stride * row];
windowCount++;
}
}
}
currError = currError / windowCount;
float totalSweep = sweepDistance / sweepStride;
/*if ((error[pos] != 1000.0f) && (currError > error[pos])) {
meanError[pos] = currError;
}*/
meanError[pos] = (totalSweep * meanError[pos] + currError) / (totalSweep + 1.0f);
//meanError[pos] = (meanError[pos] + currError) / (1.0f + 1.0f);
if (currError < error[pos]) {
/*if (sweepDistance == maxDisparity) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
finalWarp[pos] = make_float2(0.0f, 0.0f);
}
else {*/
error[pos] = currError;
disparity[pos] = sweepDistance;
finalWarp[pos] = currWarp;
//}
}
}
void StereoLite::PlaneSweepCorrelationGetWarp(float *i0, float *i1, float* disparity,
float sweepDistance, float sweepStride, int windowSize,
float2* currentWarp, float2* finalWarp, float2 * translationVector, int w, int h, int s, float *error)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
/*LitePlaneSweepCorrelationGetWarpKernel << <blocks, threads >> > (i0, i1, disparity, sweepDistance, windowSize,
currentWarp, finalWarp, translationVector, w, h, s, error);*/
LitePlaneSweepGetErrorKernel << < blocks, threads >> > (i0, i1, w, h, s, ps_errorHolder);
LitePlaneSweepCorrelationGetWarpKernel << <blocks, threads >> > (ps_errorHolder, disparity,
sweepDistance, sweepStride,
planeSweepMaxDisparity, windowSize, currentWarp, finalWarp,
translationVector, w, h, s, error, ps_meanError);
/*LitePlaneSweepMeanCleanup << < blocks, threads >> > (error, ps_meanError, planeSweepStandardDev, disparity, finalWarp,
w, h, s);*/
}
// Left to Right Consistency
texture<float, hipTextureType2D, hipReadModeElementType> disparityTex;
__global__
void LiteLeftRightConsistencyKernel(float *disparityForward, float2* warpingVector, float* leftRightDiff,
float epsilon, float* disparityFinal, float2* finalWarpForward, int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float2 warpUV = warpingVector[pos];
int windowSize = 3;
bool isConsistent = true;
float currDiff = 0.0f;
int windowCnt = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
/*if (leftRightDiff[col + stride * row] > epsilon) {
isConsistent = false;
}*/
currDiff += leftRightDiff[col + stride * row];
windowCnt++;
}
}
}
currDiff = currDiff / (float)windowCnt;
if (currDiff > epsilon) {
isConsistent = false;
}
if (!isConsistent){
disparityFinal[pos] = 0.0f;
finalWarpForward[pos] = make_float2(0.0f, 0.0f);
}
else {
disparityFinal[pos] = disparityForward[pos];
finalWarpForward[pos] = warpUV;
}
}
__global__
void LiteLeftRightDiffKernel(float *disparityForward, float2* warpingVector, float* leftRightDiff,
int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float2 warpUV = warpingVector[pos];
float x = ((float)ix + warpUV.x + 0.5f) / (float)width;
float y = ((float)iy + warpUV.y + 0.5f) / (float)height;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float dispBackward = tex2D(disparityTex, x, y);
leftRightDiff[pos] = abs(dispBackward - disparityForward[pos]);
}
void StereoLite::LeftRightConsistency(float *disparityForward, float* disparityBackward, float2* warpingVector,
float epsilon, float* disparityFinal, float2* finalWarpForward,
int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
disparityTex.addressMode[0] = hipAddressModeMirror;
disparityTex.addressMode[1] = hipAddressModeMirror;
disparityTex.filterMode = hipFilterModeLinear;
disparityTex.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, disparityTex, disparityBackward, w, h, s * sizeof(float));
LiteLeftRightDiffKernel << < blocks, threads >> > (disparityForward, warpingVector, ps_leftRightDiff, w, h, s);
LiteLeftRightConsistencyKernel << <blocks, threads >> > (disparityForward, warpingVector, ps_leftRightDiff,
epsilon, disparityFinal, finalWarpForward, w, h, s);
}
// [Hirata] Upsampling/Propagation
__global__ void LitePropagateColorOnlyKernel(float* grad, float* lidar, float2* warpUV, float2 * warpUVOut, float* depthOut, int radius,
int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;
int maxRad = radius;
int kernelSize = maxRad * 2 + 1;
int shift = maxRad;
// Find closest lidar point
float dnearest = 0.0f;
float2 uvnearest;
int dnearest_idx;
float r0 = 10000.0f;
for (int j = 0; j < kernelSize; j++) {
for (int i = 0; i < kernelSize; i++) {
int col = (ix + i - shift);
int row = (iy + j - shift);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
//col + stride*row
float currLidar = lidar[col + stride * row];
float2 currUV = warpUV[col + stride * row];
if (currLidar != 0.0f) {
float r = sqrtf((ix - col)*(ix - col) + (iy - row)*(iy - row));
if (r < r0) {
r0 = r;
dnearest_idx = col + stride * row;
dnearest = currLidar;
uvnearest = currUV;
}
}
}
}
}
// Propagation
float sum = 0.0f;
float sumU = 0.0f;
float sumV = 0.0f;
float count = 0.0f;
int countPoint = 0;
for (int j = 0; j < kernelSize; j++) {
for (int i = 0; i < kernelSize; i++) {
int col = (ix + i - shift);
int row = (iy + j - shift);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
//col + stride*row
float currLidar = lidar[col + stride * row];
float2 currUV = warpUV[col + stride * row];
if (currLidar != 0.0f) {
countPoint++;
float gs = 1.0f / (1.0f + sqrtf((ix - col)*(ix - col) + (iy - row)*(iy - row)));
float gr = 1.0f / (1.0f + fabsf(dnearest - currLidar));
float grU = 1.0f / (1.0f + fabsf(uvnearest.x - currUV.x));
float grV = 1.0f / (1.0f + fabsf(uvnearest.y - currUV.y));
// Find maximum gradient in between the current lidar point and the current pixel
float gmax = grad[pos];
// y-direction
if (iy < row) {
for (int gy = iy; gy <= row; gy++) {
if (grad[ix + stride * gy] > gmax) {
gmax = grad[ix + stride * gy];
}
}
}
else if (iy > row) {
for (int gy = row; gy <= iy; gy++) {
if (grad[ix + stride * gy] > gmax) {
gmax = grad[ix + stride * gy];
}
}
}
// x-direction
if (ix < col) {
for (int gx = ix; gx <= col; gx++) {
if (grad[gx + stride * iy] > gmax) {
gmax = grad[gx + stride * iy];
}
}
}
else if (ix > col) {
for (int gx = col; gx <= ix; gx++) {
if (grad[gx + stride * iy] > gmax) {
gmax = grad[gx + stride * iy];
}
}
}
sum += currLidar * gs * gr * (1.0f / (gmax + 0.001f));
sumU += warpUV[col + stride * row].x * gs * grU * (1.0f / (gmax + 0.001f));
sumV += warpUV[col + stride * row].y * gs * grV * (1.0f / (gmax + 0.001f));
count += gs * gr * (1.0f / (gmax + 0.001f));
}
}
}
}
float propagatedDepth;
float2 propagatedUV;
if (count != 0.0f) {
propagatedDepth = sum / count;
propagatedUV.x = sumU / count;
propagatedUV.y = sumV / count;
}
else {
propagatedDepth = 0.0f;
propagatedUV.x = 0.0f;
propagatedUV.y = 0.0f;
}
depthOut[pos] = propagatedDepth;
}
}
void StereoLite::PropagateColorOnly(float* grad, float* lidar, float2 * warpUV, float2* warpUVOut, float* depthOut, int radius)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y));
LitePropagateColorOnlyKernel << < blocks, threads >> > (grad, lidar, warpUV, warpUVOut, depthOut, radius, width, height, stride);
}
texture<float, 2, hipReadModeElementType> texForGradient;
__global__ void LiteGradientKernel(float* output, int width, int height, int stride) {
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float2 grad;
float t0;
// x derivative
t0 = tex2D(texForGradient, x + 1.0f * dx, y);
t0 -= tex2D(texForGradient, x, y);
t0 = tex2D(texForGradient, x + 1.0f * dx, y + 1.0f * dy);
t0 -= tex2D(texForGradient, x, y + 1.0f * dy);
grad.x = t0;
// y derivative
t0 = tex2D(texForGradient, x, y + 1.0f * dy);
t0 -= tex2D(texForGradient, x, y);
t0 = tex2D(texForGradient, x + 1.0f * dx, y + 1.0f * dy);
t0 -= tex2D(texForGradient, x + 1.0f * dx, y);
grad.y = t0;
output[pos] = sqrtf(grad.x * grad.x + grad.y * grad.y);
}
}
void StereoLite::Gradient(float* input, float* output) {
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y));
// mirror if a coordinate value is out-of-range
texForGradient.addressMode[0] = hipAddressModeMirror;
texForGradient.addressMode[1] = hipAddressModeMirror;
texForGradient.filterMode = hipFilterModeLinear;
texForGradient.normalized = true;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture2D(0, texForGradient, input, width, height, stride * sizeof(float));
LiteGradientKernel << < blocks, threads >> > (output, width, height, stride);
}
__global__ void GetMaskPositiveKernel(float* input, float* output, int width, int height, int stride) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
if (input[pos] > 0.0f) {
output[pos] = 1.0f;
}
else {
output[pos] = 0.0f;
}
}
__global__ void GetMaskNegativeKernel(float* input, float* output, int width, int height, int stride) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
if (input[pos] > 0.0f) {
output[pos] = 0.0f;
}
else {
output[pos] = 1.0f;
}
}
void StereoLite::GetMask(float* input, float* output, bool isPositive, int w, int h, int s) {
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y));
if (isPositive) {
GetMaskPositiveKernel << < blocks, threads >> > (input, output, w, h, s);
}
else {
GetMaskNegativeKernel << < blocks, threads >> > (input, output, w, h, s);
}
}
// OLD KERNELS
__global__
void LitePlaneSweepCorrelationKernel(float* i0, float* i1, float* disparity, int sweepDistance,
int windowSize, int width, int height, int stride, float *error)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float currError = 0.0f;
int windowCount = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
currError += fabsf(i0[col + stride * row] - i1[col + stride * row]);
windowCount++;
}
}
}
currError = currError / windowCount;
if (currError < error[pos]) {
error[pos] = currError;
disparity[pos] = (float)sweepDistance;
}
}
__global__
void LitePlaneSweepCorrelationGetWarpKernel(float* i0, float* i1, float* disparity, int sweepDistance,
int windowSize, float2* currentWarp, float2 * finalWarp, float2* tv,
int width, int height, int stride, float *error)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
currentWarp[pos].x = currentWarp[pos].x + tv[pos].x;
currentWarp[pos].y = currentWarp[pos].y + tv[pos].y;
float currError = 0.0f;
int windowCount = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
currError += fabsf(i0[col + stride * row] - i1[col + stride * row]);
windowCount++;
}
}
}
currError = currError / windowCount;
if (currError < error[pos]) {
error[pos] = currError;
disparity[pos] = (float)sweepDistance;
finalWarp[pos] = currentWarp[pos];
}
} | b6e71fbf3371bed98442794b1a6fa32e90da8c8d.cu | #include "stereolite.h"
// Subtract I0 and I1 for error
__global__
void LitePlaneSweepGetErrorKernel(float* i0, float* i1, int width, int height, int stride, float *error)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
error[pos] = fabsf(i0[pos] - i1[pos]);
}
// Remove disparity of pixels whose error is very close to the mean error
__global__
void LitePlaneSweepMeanCleanup(float* error, float* meanError, float standardDev, float* disparity, float2* finalWarp,
int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
if ((meanError[pos] - error[pos]) > standardDev) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
finalWarp[pos] = make_float2(0.0f, 0.0f);
}
/*if (abs(meanError[pos] - error[pos]) < 1.0f ) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
finalWarp[pos] = make_float2(0.0f, 0.0f);
}*/
}
__global__
void LitePlaneSweepMeanCleanup(float* error, float* meanError, float standardDev, float* disparity,
int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
if ((meanError[pos] - error[pos]) > standardDev) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
}
}
// Window-based SAD
__global__
void LitePlaneSweepCorrelationKernel(float* imError, float* disparity, float sweepDistance, int maxDisparity,
int windowSize, int width, int height, int stride, float *error, float *meanError)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float currError = 0.0f;
int windowCount = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
currError += imError[col + stride * row];
windowCount++;
}
}
}
currError = currError / windowCount;
meanError[pos] = ((float)sweepDistance * meanError[pos] + currError) / ((float)sweepDistance + 1.0f);
if (currError < error[pos]) {
/*if (sweepDistance == maxDisparity) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
}
else {*/
error[pos] = currError;
disparity[pos] = (float)sweepDistance;
//}
}
}
void StereoLite::PlaneSweepCorrelation(float *i0, float *i1, float* disparity, float sweepDistance, int windowSize,
int w, int h, int s, float *error)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
//LitePlaneSweepCorrelationKernel << <blocks, threads >> > (i0, i1, disparity, sweepDistance, windowSize, w, h, s, error);
LitePlaneSweepGetErrorKernel << < blocks, threads >> > (i0, i1, w, h, s, ps_errorHolder);
LitePlaneSweepCorrelationKernel << <blocks, threads >> > (ps_errorHolder, disparity, sweepDistance,
planeSweepMaxDisparity, windowSize, w, h, s, error, ps_meanError);
LitePlaneSweepMeanCleanup << < blocks, threads >> > (error, ps_meanError, planeSweepStandardDev, disparity,
w, h, s);
}
// Window-based SAD with warping vector fetch for left-right consistency calculation
__global__
void LitePlaneSweepCorrelationGetWarpKernel(float* imError, float* disparity, float sweepDistance, float sweepStride, int maxDisparity,
int windowSize, float2* currentWarp, float2 * finalWarp, float2* tv,
int width, int height, int stride, float *error, float *meanError)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float2 currWarp;
currWarp.x = currentWarp[pos].x + sweepStride * tv[pos].x;
currWarp.y = currentWarp[pos].y + sweepStride * tv[pos].y;
currentWarp[pos] = currWarp;
float currError = 0.0f;
int windowCount = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
currError += imError[col + stride * row];
windowCount++;
}
}
}
currError = currError / windowCount;
float totalSweep = sweepDistance / sweepStride;
/*if ((error[pos] != 1000.0f) && (currError > error[pos])) {
meanError[pos] = currError;
}*/
meanError[pos] = (totalSweep * meanError[pos] + currError) / (totalSweep + 1.0f);
//meanError[pos] = (meanError[pos] + currError) / (1.0f + 1.0f);
if (currError < error[pos]) {
/*if (sweepDistance == maxDisparity) {
error[pos] = 0.0f;
disparity[pos] = 0.0f;
finalWarp[pos] = make_float2(0.0f, 0.0f);
}
else {*/
error[pos] = currError;
disparity[pos] = sweepDistance;
finalWarp[pos] = currWarp;
//}
}
}
void StereoLite::PlaneSweepCorrelationGetWarp(float *i0, float *i1, float* disparity,
float sweepDistance, float sweepStride, int windowSize,
float2* currentWarp, float2* finalWarp, float2 * translationVector, int w, int h, int s, float *error)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
/*LitePlaneSweepCorrelationGetWarpKernel << <blocks, threads >> > (i0, i1, disparity, sweepDistance, windowSize,
currentWarp, finalWarp, translationVector, w, h, s, error);*/
LitePlaneSweepGetErrorKernel << < blocks, threads >> > (i0, i1, w, h, s, ps_errorHolder);
LitePlaneSweepCorrelationGetWarpKernel << <blocks, threads >> > (ps_errorHolder, disparity,
sweepDistance, sweepStride,
planeSweepMaxDisparity, windowSize, currentWarp, finalWarp,
translationVector, w, h, s, error, ps_meanError);
/*LitePlaneSweepMeanCleanup << < blocks, threads >> > (error, ps_meanError, planeSweepStandardDev, disparity, finalWarp,
w, h, s);*/
}
// Left to Right Consistency
texture<float, cudaTextureType2D, cudaReadModeElementType> disparityTex;
__global__
void LiteLeftRightConsistencyKernel(float *disparityForward, float2* warpingVector, float* leftRightDiff,
float epsilon, float* disparityFinal, float2* finalWarpForward, int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float2 warpUV = warpingVector[pos];
int windowSize = 3;
bool isConsistent = true;
float currDiff = 0.0f;
int windowCnt = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
/*if (leftRightDiff[col + stride * row] > epsilon) {
isConsistent = false;
}*/
currDiff += leftRightDiff[col + stride * row];
windowCnt++;
}
}
}
currDiff = currDiff / (float)windowCnt;
if (currDiff > epsilon) {
isConsistent = false;
}
if (!isConsistent){
disparityFinal[pos] = 0.0f;
finalWarpForward[pos] = make_float2(0.0f, 0.0f);
}
else {
disparityFinal[pos] = disparityForward[pos];
finalWarpForward[pos] = warpUV;
}
}
__global__
void LiteLeftRightDiffKernel(float *disparityForward, float2* warpingVector, float* leftRightDiff,
int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float2 warpUV = warpingVector[pos];
float x = ((float)ix + warpUV.x + 0.5f) / (float)width;
float y = ((float)iy + warpUV.y + 0.5f) / (float)height;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float dispBackward = tex2D(disparityTex, x, y);
leftRightDiff[pos] = abs(dispBackward - disparityForward[pos]);
}
void StereoLite::LeftRightConsistency(float *disparityForward, float* disparityBackward, float2* warpingVector,
float epsilon, float* disparityFinal, float2* finalWarpForward,
int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
// mirror if a coordinate value is out-of-range
disparityTex.addressMode[0] = cudaAddressModeMirror;
disparityTex.addressMode[1] = cudaAddressModeMirror;
disparityTex.filterMode = cudaFilterModeLinear;
disparityTex.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, disparityTex, disparityBackward, w, h, s * sizeof(float));
LiteLeftRightDiffKernel << < blocks, threads >> > (disparityForward, warpingVector, ps_leftRightDiff, w, h, s);
LiteLeftRightConsistencyKernel << <blocks, threads >> > (disparityForward, warpingVector, ps_leftRightDiff,
epsilon, disparityFinal, finalWarpForward, w, h, s);
}
// [Hirata] Upsampling/Propagation
__global__ void LitePropagateColorOnlyKernel(float* grad, float* lidar, float2* warpUV, float2 * warpUVOut, float* depthOut, int radius,
int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;
int maxRad = radius;
int kernelSize = maxRad * 2 + 1;
int shift = maxRad;
// Find closest lidar point
float dnearest = 0.0f;
float2 uvnearest;
int dnearest_idx;
float r0 = 10000.0f;
for (int j = 0; j < kernelSize; j++) {
for (int i = 0; i < kernelSize; i++) {
int col = (ix + i - shift);
int row = (iy + j - shift);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
//col + stride*row
float currLidar = lidar[col + stride * row];
float2 currUV = warpUV[col + stride * row];
if (currLidar != 0.0f) {
float r = sqrtf((ix - col)*(ix - col) + (iy - row)*(iy - row));
if (r < r0) {
r0 = r;
dnearest_idx = col + stride * row;
dnearest = currLidar;
uvnearest = currUV;
}
}
}
}
}
// Propagation
float sum = 0.0f;
float sumU = 0.0f;
float sumV = 0.0f;
float count = 0.0f;
int countPoint = 0;
for (int j = 0; j < kernelSize; j++) {
for (int i = 0; i < kernelSize; i++) {
int col = (ix + i - shift);
int row = (iy + j - shift);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
//col + stride*row
float currLidar = lidar[col + stride * row];
float2 currUV = warpUV[col + stride * row];
if (currLidar != 0.0f) {
countPoint++;
float gs = 1.0f / (1.0f + sqrtf((ix - col)*(ix - col) + (iy - row)*(iy - row)));
float gr = 1.0f / (1.0f + fabsf(dnearest - currLidar));
float grU = 1.0f / (1.0f + fabsf(uvnearest.x - currUV.x));
float grV = 1.0f / (1.0f + fabsf(uvnearest.y - currUV.y));
// Find maximum gradient in between the current lidar point and the current pixel
float gmax = grad[pos];
// y-direction
if (iy < row) {
for (int gy = iy; gy <= row; gy++) {
if (grad[ix + stride * gy] > gmax) {
gmax = grad[ix + stride * gy];
}
}
}
else if (iy > row) {
for (int gy = row; gy <= iy; gy++) {
if (grad[ix + stride * gy] > gmax) {
gmax = grad[ix + stride * gy];
}
}
}
// x-direction
if (ix < col) {
for (int gx = ix; gx <= col; gx++) {
if (grad[gx + stride * iy] > gmax) {
gmax = grad[gx + stride * iy];
}
}
}
else if (ix > col) {
for (int gx = col; gx <= ix; gx++) {
if (grad[gx + stride * iy] > gmax) {
gmax = grad[gx + stride * iy];
}
}
}
sum += currLidar * gs * gr * (1.0f / (gmax + 0.001f));
sumU += warpUV[col + stride * row].x * gs * grU * (1.0f / (gmax + 0.001f));
sumV += warpUV[col + stride * row].y * gs * grV * (1.0f / (gmax + 0.001f));
count += gs * gr * (1.0f / (gmax + 0.001f));
}
}
}
}
float propagatedDepth;
float2 propagatedUV;
if (count != 0.0f) {
propagatedDepth = sum / count;
propagatedUV.x = sumU / count;
propagatedUV.y = sumV / count;
}
else {
propagatedDepth = 0.0f;
propagatedUV.x = 0.0f;
propagatedUV.y = 0.0f;
}
depthOut[pos] = propagatedDepth;
}
}
void StereoLite::PropagateColorOnly(float* grad, float* lidar, float2 * warpUV, float2* warpUVOut, float* depthOut, int radius)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y));
LitePropagateColorOnlyKernel << < blocks, threads >> > (grad, lidar, warpUV, warpUVOut, depthOut, radius, width, height, stride);
}
texture<float, 2, cudaReadModeElementType> texForGradient;
__global__ void LiteGradientKernel(float* output, int width, int height, int stride) {
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float2 grad;
float t0;
// x derivative
t0 = tex2D(texForGradient, x + 1.0f * dx, y);
t0 -= tex2D(texForGradient, x, y);
t0 = tex2D(texForGradient, x + 1.0f * dx, y + 1.0f * dy);
t0 -= tex2D(texForGradient, x, y + 1.0f * dy);
grad.x = t0;
// y derivative
t0 = tex2D(texForGradient, x, y + 1.0f * dy);
t0 -= tex2D(texForGradient, x, y);
t0 = tex2D(texForGradient, x + 1.0f * dx, y + 1.0f * dy);
t0 -= tex2D(texForGradient, x + 1.0f * dx, y);
grad.y = t0;
output[pos] = sqrtf(grad.x * grad.x + grad.y * grad.y);
}
}
void StereoLite::Gradient(float* input, float* output) {
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y));
// mirror if a coordinate value is out-of-range
texForGradient.addressMode[0] = cudaAddressModeMirror;
texForGradient.addressMode[1] = cudaAddressModeMirror;
texForGradient.filterMode = cudaFilterModeLinear;
texForGradient.normalized = true;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(0, texForGradient, input, width, height, stride * sizeof(float));
LiteGradientKernel << < blocks, threads >> > (output, width, height, stride);
}
__global__ void GetMaskPositiveKernel(float* input, float* output, int width, int height, int stride) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
if (input[pos] > 0.0f) {
output[pos] = 1.0f;
}
else {
output[pos] = 0.0f;
}
}
__global__ void GetMaskNegativeKernel(float* input, float* output, int width, int height, int stride) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
if (input[pos] > 0.0f) {
output[pos] = 0.0f;
}
else {
output[pos] = 1.0f;
}
}
void StereoLite::GetMask(float* input, float* output, bool isPositive, int w, int h, int s) {
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y));
if (isPositive) {
GetMaskPositiveKernel << < blocks, threads >> > (input, output, w, h, s);
}
else {
GetMaskNegativeKernel << < blocks, threads >> > (input, output, w, h, s);
}
}
// OLD KERNELS
__global__
void LitePlaneSweepCorrelationKernel(float* i0, float* i1, float* disparity, int sweepDistance,
int windowSize, int width, int height, int stride, float *error)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float currError = 0.0f;
int windowCount = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
currError += fabsf(i0[col + stride * row] - i1[col + stride * row]);
windowCount++;
}
}
}
currError = currError / windowCount;
if (currError < error[pos]) {
error[pos] = currError;
disparity[pos] = (float)sweepDistance;
}
}
__global__
void LitePlaneSweepCorrelationGetWarpKernel(float* i0, float* i1, float* disparity, int sweepDistance,
int windowSize, float2* currentWarp, float2 * finalWarp, float2* tv,
int width, int height, int stride, float *error)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
currentWarp[pos].x = currentWarp[pos].x + tv[pos].x;
currentWarp[pos].y = currentWarp[pos].y + tv[pos].y;
float currError = 0.0f;
int windowCount = 0;
for (int j = 0; j < windowSize; j++) {
for (int i = 0; i < windowSize; i++) {
//get values
int col = (ix + i - (windowSize - 1) / 2);
int row = (iy + j - (windowSize - 1) / 2);
if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) {
currError += fabsf(i0[col + stride * row] - i1[col + stride * row]);
windowCount++;
}
}
}
currError = currError / windowCount;
if (currError < error[pos]) {
error[pos] = currError;
disparity[pos] = (float)sweepDistance;
finalWarp[pos] = currentWarp[pos];
}
} |
1523fb22266baba4669ed50712b6917d683b4fb6.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const complex<float>* values_in,
complex<float>* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
hipStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
| 1523fb22266baba4669ed50712b6917d683b4fb6.cu | /* Copyright 2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/sort/thrust_sort.cuh"
namespace cunumeric {
void thrust_local_sort(const complex<float>* values_in,
complex<float>* values_out,
const int64_t* indices_in,
int64_t* indices_out,
const size_t volume,
const size_t sort_dim_size,
const bool stable,
cudaStream_t stream)
{
detail::thrust_local_sort(
values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream);
}
} // namespace cunumeric
|
83f803eb183936d9e85dba581fc3c7a0a178d8f4.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv/cv.h>
#include <opencv/highgui.h>
//#include <opencv2/core/core.hpp>
//#include <opencv2/highgui/highgui.hpp>
//#include <opencv2/opencv.hpp>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <string>
#include "utils.h"
#include "timer.h"
cv::Mat imageRGBA;
cv::Mat imageGrey;
uchar4 *d_rgbaImage__;
unsigned char *d_greyImage__;
size_t numRows() { return imageRGBA.rows; }
size_t numCols() { return imageRGBA.cols; }
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
//returns a pointer to an RGBA version of the input image
//and a pointer to the single channel grey-scale output
//on both the host and device
void preProcess(uchar4 **inputImage, unsigned char **greyImage,
uchar4 **d_rgbaImage, unsigned char **d_greyImage,
const std::string &filename) {
//make sure the context initializes ok
checkCudaErrors(hipFree(0));
cv::Mat image;
image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
//cv::namedWindow( "Display Image", CV_WINDOW_AUTOSIZE );
//cv::imshow( "Display Image", image );
//cv::waitKey(0);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
//allocate memory for the output
imageGrey.create(image.rows, image.cols, CV_8UC1);
//This shouldn't ever happen given the way the images are created
//at least based upon my limited understanding of OpenCV, but better to check
if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
*greyImage = imageGrey.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
checkCudaErrors(hipMalloc(d_rgbaImage, sizeof(uchar4) * numPixels));
checkCudaErrors(hipMalloc(d_greyImage, sizeof(unsigned char) * numPixels));
checkCudaErrors(hipMemset(*d_greyImage, 0, numPixels * sizeof(unsigned char))); //make sure no memory is left laying around
//copy input array to the GPU
std::cout << typeid(d_rgbaImage).name() << "\n" << typeid(inputImage).name();
checkCudaErrors(hipMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice));
d_rgbaImage__ = *d_rgbaImage;
d_greyImage__ = *d_greyImage;
}
void postProcess(const std::string& output_file) {
const int numPixels = numRows() * numCols();
//copy the output back to the host
checkCudaErrors(hipMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage__, sizeof(unsigned char) * numPixels, hipMemcpyDeviceToHost));
//output the image
cv::namedWindow( "Display Image", CV_WINDOW_AUTOSIZE );
cv::imshow( "Display Image", imageGrey );
cv::waitKey(0);
cv::imwrite(output_file.c_str(), imageGrey);
//cleanup
hipFree(d_rgbaImage__);
hipFree(d_greyImage__);
}
__global__ void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols) {
int r = blockIdx.x; //row
int c = threadIdx.y; //col
uchar4 rgba = rgbaImage[r*numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, numCols, 1); //TODO
const dim3 gridSize(numRows, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
void referenceCalculation(const uchar4* const rgbaImage, unsigned char *const greyImage, size_t numRows, size_t numCols)
{
for (size_t r = 0; r < numRows; ++r) {
for (size_t c = 0; c < numCols; ++c) {
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
}
//include the definitions of the above functions for this homework
int main(int argc, char **argv) {
#include "timer.h"
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
std::string input_file;
std::string output_file;
if (argc == 3) {
input_file = std::string(argv[1]);
output_file = std::string(argv[2]);
}
else {
std::cerr << "Usage: ./hw input_file output_file" << std::endl;
exit(1);
}
//load the image and give us our input and output pointers
preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file);
GpuTimer timer;
timer.Start();
//call the students' code
//referenceCalculation(h_rgbaImage, h_greyImage, numRows(), numCols());
your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
timer.Stop();
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
printf("\n");
int err = printf("%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
//check results and output the grey image
postProcess(output_file);
return 0;
}
| 83f803eb183936d9e85dba581fc3c7a0a178d8f4.cu | #include <opencv/cv.h>
#include <opencv/highgui.h>
//#include <opencv2/core/core.hpp>
//#include <opencv2/highgui/highgui.hpp>
//#include <opencv2/opencv.hpp>
#include <cuda.h>
#include <cuda_runtime.h>
#include <string>
#include "utils.h"
#include "timer.h"
cv::Mat imageRGBA;
cv::Mat imageGrey;
uchar4 *d_rgbaImage__;
unsigned char *d_greyImage__;
size_t numRows() { return imageRGBA.rows; }
size_t numCols() { return imageRGBA.cols; }
//return types are void since any internal error will be handled by quitting
//no point in returning error codes...
//returns a pointer to an RGBA version of the input image
//and a pointer to the single channel grey-scale output
//on both the host and device
void preProcess(uchar4 **inputImage, unsigned char **greyImage,
uchar4 **d_rgbaImage, unsigned char **d_greyImage,
const std::string &filename) {
//make sure the context initializes ok
checkCudaErrors(cudaFree(0));
cv::Mat image;
image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
//cv::namedWindow( "Display Image", CV_WINDOW_AUTOSIZE );
//cv::imshow( "Display Image", image );
//cv::waitKey(0);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
//allocate memory for the output
imageGrey.create(image.rows, image.cols, CV_8UC1);
//This shouldn't ever happen given the way the images are created
//at least based upon my limited understanding of OpenCV, but better to check
if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
*greyImage = imageGrey.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
checkCudaErrors(cudaMalloc(d_rgbaImage, sizeof(uchar4) * numPixels));
checkCudaErrors(cudaMalloc(d_greyImage, sizeof(unsigned char) * numPixels));
checkCudaErrors(cudaMemset(*d_greyImage, 0, numPixels * sizeof(unsigned char))); //make sure no memory is left laying around
//copy input array to the GPU
std::cout << typeid(d_rgbaImage).name() << "\n" << typeid(inputImage).name();
checkCudaErrors(cudaMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice));
d_rgbaImage__ = *d_rgbaImage;
d_greyImage__ = *d_greyImage;
}
void postProcess(const std::string& output_file) {
const int numPixels = numRows() * numCols();
//copy the output back to the host
checkCudaErrors(cudaMemcpy(imageGrey.ptr<unsigned char>(0), d_greyImage__, sizeof(unsigned char) * numPixels, cudaMemcpyDeviceToHost));
//output the image
cv::namedWindow( "Display Image", CV_WINDOW_AUTOSIZE );
cv::imshow( "Display Image", imageGrey );
cv::waitKey(0);
cv::imwrite(output_file.c_str(), imageGrey);
//cleanup
cudaFree(d_rgbaImage__);
cudaFree(d_greyImage__);
}
__global__ void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols) {
int r = blockIdx.x; //row
int c = threadIdx.y; //col
uchar4 rgba = rgbaImage[r*numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, numCols, 1); //TODO
const dim3 gridSize(numRows, 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
void referenceCalculation(const uchar4* const rgbaImage, unsigned char *const greyImage, size_t numRows, size_t numCols)
{
for (size_t r = 0; r < numRows; ++r) {
for (size_t c = 0; c < numCols; ++c) {
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
}
//include the definitions of the above functions for this homework
int main(int argc, char **argv) {
#include "timer.h"
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
std::string input_file;
std::string output_file;
if (argc == 3) {
input_file = std::string(argv[1]);
output_file = std::string(argv[2]);
}
else {
std::cerr << "Usage: ./hw input_file output_file" << std::endl;
exit(1);
}
//load the image and give us our input and output pointers
preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file);
GpuTimer timer;
timer.Start();
//call the students' code
//referenceCalculation(h_rgbaImage, h_greyImage, numRows(), numCols());
your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
timer.Stop();
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
printf("\n");
int err = printf("%f msecs.\n", timer.Elapsed());
if (err < 0) {
//Couldn't print! Probably the student closed stdout - bad news
std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl;
exit(1);
}
//check results and output the grey image
postProcess(output_file);
return 0;
}
|
13500a59a1c23acb9dd0a112899b00cf7698348f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include "utils/cuda/errors.cuh"
#include "utils/tsdf/voxel_mem.cuh"
__global__ static void heap_init_kernel(int* heap) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < NUM_BLOCK) {
heap[idx] = idx;
}
}
VoxelMemPool::VoxelMemPool() {
// initialize free block counter
CUDA_SAFE_CALL(hipMalloc(&num_free_blocks_, sizeof(int)));
const int tmp = NUM_BLOCK;
CUDA_SAFE_CALL(hipMemcpy(num_free_blocks_, &tmp, sizeof(int), hipMemcpyHostToDevice));
// intialize voxel data buffer
CUDA_SAFE_CALL(hipMalloc(&voxels_rgbw_, sizeof(VoxelRGBW) * NUM_BLOCK * BLOCK_VOLUME));
CUDA_SAFE_CALL(hipMalloc(&voxels_tsdf_, sizeof(VoxelTSDF) * NUM_BLOCK * BLOCK_VOLUME));
CUDA_SAFE_CALL(hipMalloc(&voxels_segm_, sizeof(VoxelSEGM) * NUM_BLOCK * BLOCK_VOLUME));
// initialize heap array
CUDA_SAFE_CALL(hipMalloc(&heap_, sizeof(int) * NUM_BLOCK));
hipLaunchKernelGGL(( heap_init_kernel), dim3(NUM_BLOCK / 256), dim3(256), 0, 0, heap_);
CUDA_CHECK_ERROR;
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
void VoxelMemPool::ReleaseMemory() {
CUDA_SAFE_CALL(hipFree(voxels_rgbw_));
CUDA_SAFE_CALL(hipFree(voxels_tsdf_));
CUDA_SAFE_CALL(hipFree(voxels_segm_));
CUDA_SAFE_CALL(hipFree(num_free_blocks_));
CUDA_SAFE_CALL(hipFree(heap_));
}
__device__ int VoxelMemPool::AquireBlock() {
const int idx = atomicSub(num_free_blocks_, 1);
assert(idx >= 1);
const VoxelBlock block(heap_[idx - 1]);
#pragma unroll
for (int i = 0; i < BLOCK_VOLUME; ++i) {
VoxelRGBW& voxel_rgbw = GetVoxel<VoxelRGBW>(i, block);
VoxelTSDF& voxel_tsdf = GetVoxel<VoxelTSDF>(i, block);
VoxelSEGM& voxel_segm = GetVoxel<VoxelSEGM>(i, block);
voxel_rgbw.weight = 0;
voxel_tsdf.tsdf = -1;
voxel_segm.probability = .5;
}
return block.idx;
}
__device__ void VoxelMemPool::ReleaseBlock(const int block_idx) {
const int idx = atomicAdd(num_free_blocks_, 1);
assert(idx < NUM_BLOCK);
heap_[idx] = block_idx;
}
__host__ int VoxelMemPool::NumFreeBlocks() const {
int tmp;
CUDA_SAFE_CALL(hipMemcpy(&tmp, num_free_blocks_, sizeof(int), hipMemcpyDeviceToHost));
return tmp;
}
| 13500a59a1c23acb9dd0a112899b00cf7698348f.cu | #include <cassert>
#include "utils/cuda/errors.cuh"
#include "utils/tsdf/voxel_mem.cuh"
__global__ static void heap_init_kernel(int* heap) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < NUM_BLOCK) {
heap[idx] = idx;
}
}
VoxelMemPool::VoxelMemPool() {
// initialize free block counter
CUDA_SAFE_CALL(cudaMalloc(&num_free_blocks_, sizeof(int)));
const int tmp = NUM_BLOCK;
CUDA_SAFE_CALL(cudaMemcpy(num_free_blocks_, &tmp, sizeof(int), cudaMemcpyHostToDevice));
// intialize voxel data buffer
CUDA_SAFE_CALL(cudaMalloc(&voxels_rgbw_, sizeof(VoxelRGBW) * NUM_BLOCK * BLOCK_VOLUME));
CUDA_SAFE_CALL(cudaMalloc(&voxels_tsdf_, sizeof(VoxelTSDF) * NUM_BLOCK * BLOCK_VOLUME));
CUDA_SAFE_CALL(cudaMalloc(&voxels_segm_, sizeof(VoxelSEGM) * NUM_BLOCK * BLOCK_VOLUME));
// initialize heap array
CUDA_SAFE_CALL(cudaMalloc(&heap_, sizeof(int) * NUM_BLOCK));
heap_init_kernel<<<NUM_BLOCK / 256, 256>>>(heap_);
CUDA_CHECK_ERROR;
CUDA_SAFE_CALL(cudaDeviceSynchronize());
}
void VoxelMemPool::ReleaseMemory() {
CUDA_SAFE_CALL(cudaFree(voxels_rgbw_));
CUDA_SAFE_CALL(cudaFree(voxels_tsdf_));
CUDA_SAFE_CALL(cudaFree(voxels_segm_));
CUDA_SAFE_CALL(cudaFree(num_free_blocks_));
CUDA_SAFE_CALL(cudaFree(heap_));
}
__device__ int VoxelMemPool::AquireBlock() {
const int idx = atomicSub(num_free_blocks_, 1);
assert(idx >= 1);
const VoxelBlock block(heap_[idx - 1]);
#pragma unroll
for (int i = 0; i < BLOCK_VOLUME; ++i) {
VoxelRGBW& voxel_rgbw = GetVoxel<VoxelRGBW>(i, block);
VoxelTSDF& voxel_tsdf = GetVoxel<VoxelTSDF>(i, block);
VoxelSEGM& voxel_segm = GetVoxel<VoxelSEGM>(i, block);
voxel_rgbw.weight = 0;
voxel_tsdf.tsdf = -1;
voxel_segm.probability = .5;
}
return block.idx;
}
__device__ void VoxelMemPool::ReleaseBlock(const int block_idx) {
const int idx = atomicAdd(num_free_blocks_, 1);
assert(idx < NUM_BLOCK);
heap_[idx] = block_idx;
}
__host__ int VoxelMemPool::NumFreeBlocks() const {
int tmp;
CUDA_SAFE_CALL(cudaMemcpy(&tmp, num_free_blocks_, sizeof(int), cudaMemcpyDeviceToHost));
return tmp;
}
|
9b0b19f96c6319b7db8fa248950156884ab56c6d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "PIDControllerKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *goal = NULL;
hipMalloc(&goal, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float *previousError = NULL;
hipMalloc(&previousError, XSIZE*YSIZE);
float *integral = NULL;
hipMalloc(&integral, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
PIDControllerKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,goal,output,previousError,integral);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
PIDControllerKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,goal,output,previousError,integral);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
PIDControllerKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,goal,output,previousError,integral);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9b0b19f96c6319b7db8fa248950156884ab56c6d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "PIDControllerKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *goal = NULL;
cudaMalloc(&goal, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float *previousError = NULL;
cudaMalloc(&previousError, XSIZE*YSIZE);
float *integral = NULL;
cudaMalloc(&integral, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
PIDControllerKernel<<<gridBlock,threadBlock>>>(input,goal,output,previousError,integral);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
PIDControllerKernel<<<gridBlock,threadBlock>>>(input,goal,output,previousError,integral);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
PIDControllerKernel<<<gridBlock,threadBlock>>>(input,goal,output,previousError,integral);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e909ee0e5ba7e5b1c01ff89b109a137bf1f1c33b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <functional>
#include <map>
#include <vector>
#include "thrust/functional.h"
#include "thrust/sort.h"
#include "caffe/common.hpp"
#include "caffe/util/bbox_util.hpp"
namespace caffe {
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
__global__ void DecodeBBoxesOriKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 6;
const int c = (index / 6) % num_loc_classes;
const int d = (index / 6 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
const Dtype angle_cos = loc_data[index - i + 4];
const Dtype angle_sin = loc_data[index - i + 5];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
case 4:
bbox_data[index] = angle_cos;
break;
case 5:
bbox_data[index] = angle_sin;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
__global__ void CasRegDecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = bbox_data[index] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
bbox_data[index] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = bbox_data[index - i];
const Dtype p_ymin = bbox_data[index - i + 1];
const Dtype p_xmax = bbox_data[index - i + 2];
const Dtype p_ymax = bbox_data[index - i + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = bbox_data[index - i];
const Dtype p_ymin = bbox_data[index - i + 1];
const Dtype p_xmax = bbox_data[index - i + 2];
const Dtype p_ymax = bbox_data[index - i + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = bbox_data[index] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
bbox_data[index] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location ,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void DecodeBBoxesOriGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location ,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DecodeBBoxesOriKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location ,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
template void DecodeBBoxesOriGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesOriGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location ,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
template <typename Dtype>
void CasRegDecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data, const Dtype* arm_loc_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, arm_loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( CasRegDecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void CasRegDecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data, const float* arm_loc_data);
template void CasRegDecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data, const double* arm_loc_data);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PermuteDataKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
template <typename Dtype>
__global__ void OSPermuteDataKernel(const int nthreads,
const Dtype* data, const Dtype* arm_data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data, float objectness_score) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
const int arm_index = ((n * num_data + d) * 2 + 1) * num_dim + i;
if (arm_data[arm_index] < objectness_score) {
if (c == 0)
new_data[new_index] = 1.0;
else
new_data[new_index] = 0.0;
} else {
new_data[new_index] = data[index];
}
}
}
template <typename Dtype>
void OSPermuteDataGPU(const int nthreads,
const Dtype* data, const Dtype* arm_data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data, float objectness_score) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( OSPermuteDataKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, arm_data, num_classes, num_data,
num_dim, new_data, objectness_score);
CUDA_POST_KERNEL_CHECK;
}
template void OSPermuteDataGPU(const int nthreads,
const float* data, const float* arm_data, const int num_classes, const int num_data,
const int num_dim, float* new_data, float objectness_score);
template void OSPermuteDataGPU(const int nthreads,
const double* data, const double* arm_data, const int num_classes, const int num_data,
const int num_dim, double* new_data, float objectness_score);
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outer_num,
const int channels, const int inner_num, Dtype* prob) {
vector<int> shape(4, 1);
shape[0] = outer_num;
shape[1] = channels;
shape[2] = inner_num;
Blob<Dtype> scale(shape);
Dtype* scale_data = scale.mutable_gpu_data();
int count = outer_num * channels * inner_num;
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num,
data, scale_data, prob);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, prob, prob);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, prob,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num,
scale_data, prob);
}
template void SoftMaxGPU(const float* data, const int outer_num,
const int channels, const int inner_num, float* prob);
template void SoftMaxGPU(const double* data, const int outer_num,
const int channels, const int inner_num, double* prob);
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedByIdxKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * 7] = image_id;
detection_data[index * 7 + 1] = label;
detection_data[index * 7 + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, 7);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( GetDetectionsKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_det)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads,
const Dtype* conf_data, const int num_preds_per_class,
const int num_classes, const ConfLossType loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(Max(prob, Dtype(FLT_MIN)));
} else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss) {
CHECK_LT(background_label_id, num_classes);
Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1);
Dtype* match_data = match_blob.mutable_cpu_data();
for (int i = 0; i < num; ++i) {
const map<int, vector<int> >& match_indices = all_match_indices[i];
for (int p = 0; p < num_preds_per_class; ++p) {
// Get the label index.
int label = background_label_id;
for (map<int, vector<int> >::const_iterator it =
match_indices.begin(); it != match_indices.end(); ++it) {
const vector<int>& match_index = it->second;
CHECK_EQ(match_index.size(), num_preds_per_class);
if (match_index[p] > -1) {
CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end());
const vector<NormalizedBBox>& gt_bboxes =
all_gt_bboxes.find(i)->second;
CHECK_LT(match_index[p], gt_bboxes.size());
label = gt_bboxes[match_index[p]].label();
CHECK_GE(label, 0);
CHECK_NE(label, background_label_id);
CHECK_LT(label, num_classes);
// A prior can only be matched to one gt bbox.
break;
}
}
match_data[i * num_preds_per_class + p] = label;
}
}
// Get probability data.
const Dtype* conf_gpu_data = conf_blob.gpu_data();
Blob<Dtype> prob_blob;
prob_blob.ReshapeLike(conf_blob);
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
Dtype* prob_gpu_data = prob_blob.mutable_gpu_data();
SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1,
prob_gpu_data);
conf_gpu_data = prob_blob.gpu_data();
}
// Compute the loss.
Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1);
Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data();
const int num_threads = num * num_preds_per_class;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeConfLossKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, conf_gpu_data, num_preds_per_class,
num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data);
// Save the loss.
all_conf_loss->clear();
const Dtype* loss_data = conf_loss_blob.cpu_data();
for (int i = 0; i < num; ++i) {
vector<float> conf_loss(loss_data, loss_data + num_preds_per_class);
all_conf_loss->push_back(conf_loss);
loss_data += num_preds_per_class;
}
}
// Explicit initialization.
template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
} // namespace caffe
| e909ee0e5ba7e5b1c01ff89b109a137bf1f1c33b.cu | #include <algorithm>
#include <functional>
#include <map>
#include <vector>
#include "thrust/functional.h"
#include "thrust/sort.h"
#include "caffe/common.hpp"
#include "caffe/util/bbox_util.hpp"
namespace caffe {
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
__global__ void DecodeBBoxesOriKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 6;
const int c = (index / 6) % num_loc_classes;
const int d = (index / 6 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
const Dtype angle_cos = loc_data[index - i + 4];
const Dtype angle_sin = loc_data[index - i + 5];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
case 4:
bbox_data[index] = angle_cos;
break;
case 5:
bbox_data[index] = angle_sin;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
__global__ void CasRegDecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = bbox_data[index] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
bbox_data[index] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = bbox_data[index - i];
const Dtype p_ymin = bbox_data[index - i + 1];
const Dtype p_xmax = bbox_data[index - i + 2];
const Dtype p_ymax = bbox_data[index - i + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = bbox_data[index - i];
const Dtype p_ymin = bbox_data[index - i + 1];
const Dtype p_xmax = bbox_data[index - i + 2];
const Dtype p_ymax = bbox_data[index - i + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = bbox_data[index] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
bbox_data[index] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location ,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
DecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void DecodeBBoxesOriGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location ,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
DecodeBBoxesOriKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location ,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
template void DecodeBBoxesOriGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesOriGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location ,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
template <typename Dtype>
void CasRegDecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data, const Dtype* arm_loc_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
DecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, arm_loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
CasRegDecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void CasRegDecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data, const float* arm_loc_data);
template void CasRegDecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data, const double* arm_loc_data);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
PermuteDataKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
template <typename Dtype>
__global__ void OSPermuteDataKernel(const int nthreads,
const Dtype* data, const Dtype* arm_data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data, float objectness_score) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
const int arm_index = ((n * num_data + d) * 2 + 1) * num_dim + i;
if (arm_data[arm_index] < objectness_score) {
if (c == 0)
new_data[new_index] = 1.0;
else
new_data[new_index] = 0.0;
} else {
new_data[new_index] = data[index];
}
}
}
template <typename Dtype>
void OSPermuteDataGPU(const int nthreads,
const Dtype* data, const Dtype* arm_data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data, float objectness_score) {
// NOLINT_NEXT_LINE(whitespace/operators)
OSPermuteDataKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, arm_data, num_classes, num_data,
num_dim, new_data, objectness_score);
CUDA_POST_KERNEL_CHECK;
}
template void OSPermuteDataGPU(const int nthreads,
const float* data, const float* arm_data, const int num_classes, const int num_data,
const int num_dim, float* new_data, float objectness_score);
template void OSPermuteDataGPU(const int nthreads,
const double* data, const double* arm_data, const int num_classes, const int num_data,
const int num_dim, double* new_data, float objectness_score);
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outer_num,
const int channels, const int inner_num, Dtype* prob) {
vector<int> shape(4, 1);
shape[0] = outer_num;
shape[1] = channels;
shape[2] = inner_num;
Blob<Dtype> scale(shape);
Dtype* scale_data = scale.mutable_gpu_data();
int count = outer_num * channels * inner_num;
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num),
CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num,
data, scale_data, prob);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, prob, prob);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num),
CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, prob,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num,
scale_data, prob);
}
template void SoftMaxGPU(const float* data, const int outer_num,
const int channels, const int inner_num, float* prob);
template void SoftMaxGPU(const double* data, const int outer_num,
const int channels, const int inner_num, double* prob);
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedByIdxKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * 7] = image_id;
detection_data[index * 7 + 1] = label;
detection_data[index * 7 + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, 7);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
GetDetectionsKernel<Dtype><<<CAFFE_GET_BLOCKS(num_det),
CAFFE_CUDA_NUM_THREADS>>>(num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads,
const Dtype* conf_data, const int num_preds_per_class,
const int num_classes, const ConfLossType loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(Max(prob, Dtype(FLT_MIN)));
} else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss) {
CHECK_LT(background_label_id, num_classes);
Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1);
Dtype* match_data = match_blob.mutable_cpu_data();
for (int i = 0; i < num; ++i) {
const map<int, vector<int> >& match_indices = all_match_indices[i];
for (int p = 0; p < num_preds_per_class; ++p) {
// Get the label index.
int label = background_label_id;
for (map<int, vector<int> >::const_iterator it =
match_indices.begin(); it != match_indices.end(); ++it) {
const vector<int>& match_index = it->second;
CHECK_EQ(match_index.size(), num_preds_per_class);
if (match_index[p] > -1) {
CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end());
const vector<NormalizedBBox>& gt_bboxes =
all_gt_bboxes.find(i)->second;
CHECK_LT(match_index[p], gt_bboxes.size());
label = gt_bboxes[match_index[p]].label();
CHECK_GE(label, 0);
CHECK_NE(label, background_label_id);
CHECK_LT(label, num_classes);
// A prior can only be matched to one gt bbox.
break;
}
}
match_data[i * num_preds_per_class + p] = label;
}
}
// Get probability data.
const Dtype* conf_gpu_data = conf_blob.gpu_data();
Blob<Dtype> prob_blob;
prob_blob.ReshapeLike(conf_blob);
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
Dtype* prob_gpu_data = prob_blob.mutable_gpu_data();
SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1,
prob_gpu_data);
conf_gpu_data = prob_blob.gpu_data();
}
// Compute the loss.
Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1);
Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data();
const int num_threads = num * num_preds_per_class;
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeConfLossKernel<Dtype><<<CAFFE_GET_BLOCKS(num_threads),
CAFFE_CUDA_NUM_THREADS>>>(num_threads, conf_gpu_data, num_preds_per_class,
num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data);
// Save the loss.
all_conf_loss->clear();
const Dtype* loss_data = conf_loss_blob.cpu_data();
for (int i = 0; i < num; ++i) {
vector<float> conf_loss(loss_data, loss_data + num_preds_per_class);
all_conf_loss->push_back(conf_loss);
loss_data += num_preds_per_class;
}
}
// Explicit initialization.
template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
} // namespace caffe
|
47c974b17e830ad3c8621b7a8c658faf5cd94284.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Lab 5, image filters with CUDA.
// Compile with a command-line similar to Lab 4:
// nvcc filter.cu -c -arch=sm_30 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter
// or (multicore lab)
// nvcc filter.cu -c -arch=sm_20 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter
// 2017-11-27: Early pre-release, dubbed "beta".
// 2017-12-03: First official version! Brand new lab 5 based on the old lab 6.
// Better variable names, better prepared for some lab tasks. More changes may come
// but I call this version 1.0b2.
// 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages
// that allocated too much memory. b3
// 2017-12-04: More fixes: Tightened up the kernel with edge clamping.
// Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#include "readppm.h"
#include "milli.h"
// Use these for setting shared memory size.
#define maxKernelSizeX 10
#define maxKernelSizeY 10
__global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey)
{
// map from blockIdx to pixel position
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int dy, dx;
unsigned int sumx, sumy, sumz;
int divby = (2*kernelsizex+1)*(2*kernelsizey+1); // Works for box filters only!
if (x < imagesizex && y < imagesizey) // If inside image
{
// Filter kernel (simple box filter)
sumx=0;sumy=0;sumz=0;
for(dy=-kernelsizey;dy<=kernelsizey;dy++)
for(dx=-kernelsizex;dx<=kernelsizex;dx++)
{
// Use max and min to avoid branching!
int yy = min(max(y+dy, 0), imagesizey-1);
int xx = min(max(x+dx, 0), imagesizex-1);
sumx += image[((yy)*imagesizex+(xx))*3+0];
sumy += image[((yy)*imagesizex+(xx))*3+1];
sumz += image[((yy)*imagesizex+(xx))*3+2];
}
out[(y*imagesizex+x)*3+0] = sumx/divby;
out[(y*imagesizex+x)*3+1] = sumy/divby;
out[(y*imagesizex+x)*3+2] = sumz/divby;
}
}
// Global variables for image data
unsigned char *image, *pixels, *dev_bitmap, *dev_input;
unsigned int imagesizey, imagesizex; // Image size
////////////////////////////////////////////////////////////////////////////////
// main computation function
////////////////////////////////////////////////////////////////////////////////
void computeImages(int kernelsizex, int kernelsizey)
{
if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY)
{
printf("Kernel size out of bounds!\n");
return;
}
pixels = (unsigned char *) malloc(imagesizex*imagesizey*3);
hipMalloc( (void**)&dev_input, imagesizex*imagesizey*3);
hipMemcpy( dev_input, image, imagesizey*imagesizex*3, hipMemcpyHostToDevice );
hipMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3);
dim3 grid(imagesizex,imagesizey);
int start = GetMicroseconds();
hipLaunchKernelGGL(( filter), dim3(grid),dim3(1), 0, 0, dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // Awful load balance
hipDeviceSynchronize();
int end = GetMicroseconds();
printf("Time: %i us\n", end-start);
// Check for errors!
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, hipMemcpyDeviceToHost );
hipFree( dev_bitmap );
hipFree( dev_input );
}
// Display images
void Draw()
{
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
if (imagesizey >= imagesizex)
{ // Not wide - probably square. Original left, result right.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels);
}
else
{ // Wide image! Original on top, result below.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels );
glRasterPos2i(-1, 0);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
}
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
if (argc > 1)
image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey);
else
image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey);
if (imagesizey >= imagesizex)
glutInitWindowSize( imagesizex*2, imagesizey );
else
glutInitWindowSize( imagesizex, imagesizey*2 );
glutCreateWindow("Lab 5");
glutDisplayFunc(Draw);
ResetMilli();
computeImages(2, 2);
// You can save the result to a file like this:
// writeppm("out.ppm", imagesizey, imagesizex, pixels);
glutMainLoop();
return 0;
}
| 47c974b17e830ad3c8621b7a8c658faf5cd94284.cu | // Lab 5, image filters with CUDA.
// Compile with a command-line similar to Lab 4:
// nvcc filter.cu -c -arch=sm_30 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter
// or (multicore lab)
// nvcc filter.cu -c -arch=sm_20 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter
// 2017-11-27: Early pre-release, dubbed "beta".
// 2017-12-03: First official version! Brand new lab 5 based on the old lab 6.
// Better variable names, better prepared for some lab tasks. More changes may come
// but I call this version 1.0b2.
// 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages
// that allocated too much memory. b3
// 2017-12-04: More fixes: Tightened up the kernel with edge clamping.
// Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#include "readppm.h"
#include "milli.h"
// Use these for setting shared memory size.
#define maxKernelSizeX 10
#define maxKernelSizeY 10
__global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey)
{
// map from blockIdx to pixel position
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int dy, dx;
unsigned int sumx, sumy, sumz;
int divby = (2*kernelsizex+1)*(2*kernelsizey+1); // Works for box filters only!
if (x < imagesizex && y < imagesizey) // If inside image
{
// Filter kernel (simple box filter)
sumx=0;sumy=0;sumz=0;
for(dy=-kernelsizey;dy<=kernelsizey;dy++)
for(dx=-kernelsizex;dx<=kernelsizex;dx++)
{
// Use max and min to avoid branching!
int yy = min(max(y+dy, 0), imagesizey-1);
int xx = min(max(x+dx, 0), imagesizex-1);
sumx += image[((yy)*imagesizex+(xx))*3+0];
sumy += image[((yy)*imagesizex+(xx))*3+1];
sumz += image[((yy)*imagesizex+(xx))*3+2];
}
out[(y*imagesizex+x)*3+0] = sumx/divby;
out[(y*imagesizex+x)*3+1] = sumy/divby;
out[(y*imagesizex+x)*3+2] = sumz/divby;
}
}
// Global variables for image data
unsigned char *image, *pixels, *dev_bitmap, *dev_input;
unsigned int imagesizey, imagesizex; // Image size
////////////////////////////////////////////////////////////////////////////////
// main computation function
////////////////////////////////////////////////////////////////////////////////
void computeImages(int kernelsizex, int kernelsizey)
{
if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY)
{
printf("Kernel size out of bounds!\n");
return;
}
pixels = (unsigned char *) malloc(imagesizex*imagesizey*3);
cudaMalloc( (void**)&dev_input, imagesizex*imagesizey*3);
cudaMemcpy( dev_input, image, imagesizey*imagesizex*3, cudaMemcpyHostToDevice );
cudaMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3);
dim3 grid(imagesizex,imagesizey);
int start = GetMicroseconds();
filter<<<grid,1>>>(dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // Awful load balance
cudaThreadSynchronize();
int end = GetMicroseconds();
printf("Time: %i us\n", end-start);
// Check for errors!
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, cudaMemcpyDeviceToHost );
cudaFree( dev_bitmap );
cudaFree( dev_input );
}
// Display images
void Draw()
{
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
if (imagesizey >= imagesizex)
{ // Not wide - probably square. Original left, result right.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels);
}
else
{ // Wide image! Original on top, result below.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels );
glRasterPos2i(-1, 0);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
}
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
if (argc > 1)
image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey);
else
image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey);
if (imagesizey >= imagesizex)
glutInitWindowSize( imagesizex*2, imagesizey );
else
glutInitWindowSize( imagesizex, imagesizey*2 );
glutCreateWindow("Lab 5");
glutDisplayFunc(Draw);
ResetMilli();
computeImages(2, 2);
// You can save the result to a file like this:
// writeppm("out.ppm", imagesizey, imagesizex, pixels);
glutMainLoop();
return 0;
}
|
b61eb969f1d3658f4a64a5bf1a096d248ddfbdaf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kTranspose.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kTranspose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,dest,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kTranspose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,dest,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kTranspose), dim3(gridBlock),dim3(threadBlock), 0, 0, a,dest,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b61eb969f1d3658f4a64a5bf1a096d248ddfbdaf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kTranspose.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kTranspose<<<gridBlock,threadBlock>>>(a,dest,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kTranspose<<<gridBlock,threadBlock>>>(a,dest,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kTranspose<<<gridBlock,threadBlock>>>(a,dest,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
da22684a0c6b9daf79b1722988960806f32f4c86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Created on Mon Feb 10 10:00:00 2014
Oren Freifeld
Email: freifeld@csail.mit.edu
*/
#ifndef DIM
#define DIM 2
#endif
#ifndef TESS_TYPE
#define TESS_TYPE 2
#endif
__device__ inline int mymin(int a,double b){
return !(b<a)?a:round(b);
}
__device__ inline void A_times_b_affine(double x[], double A[], double b[])
{
// Result is computed inside x.
x[0] = A[0]*b[0] + A[1]*b[1] + A[2];
x[1] = A[3]*b[0] + A[4]*b[1] + A[5];
};
__device__ inline void A_times_b_linear(double x[], double A[], double b[])
{
// Result is computed inside x.
x[0] = A[0]*b[0] + A[1]*b[1];
x[1] = A[3]*b[0] + A[4]*b[1];
};
__device__ inline int compute_cell_idx(double* p,
int nC0, int nC1, int nC2,
double inc_x,double inc_y)
{
int cell_idx=0;
if (TESS_TYPE == 2){
cell_idx = round(min(double(nC0-1),max(0.0,(p[0] - fmod(p[0] , inc_x))/inc_x))) +
round(min(double(nC1-1),max(0.0,(p[1] - fmod(p[1] , inc_y))/inc_y))) * nC0;
}
else
{
double p0 = min((nC0*inc_x-0.0000000001),max(0.0,p[0])) ;
double p1 = min((nC1*inc_y-0.0000000001),max(0.0,p[1])) ;
// BAD IDEA: This fails.
//double p0 = min(((nC0-1)*inc_x),max(0.0,p[0])) ;
//double p1 = min(((nC1-1)*inc_y),max(0.0,p[1])) ;
double xmod = fmod(p0,inc_x);
double ymod = fmod(p1,inc_y);
double x = xmod/inc_x ;
double y = ymod/inc_y ;
// We already took care of the case of negative values.
// But for values that are too high we still need to check
// since above we used nC0 and nC1, and not nC0-1 and nC1-1.
//cell_idx = round(min(double(nC0-1),((p0 - xmod)/inc_x))) +
// round(min(double(nC1-1),((p1 - ymod)/inc_y))) * nC0;
cell_idx = mymin(nC0-1,(p0 - xmod)/inc_x) +
mymin(nC1-1,(p1 - ymod)/inc_y) * nC0;
cell_idx *=4; // every rect consists of 4 triangles
/*
Recall the order of triangles is
0
3 1
2
*/
// Out of bounds (left)
if (p[0]<=0){
if (p[1]<=0 && p[1]/inc_y<p[0]/inc_x){
// Nothing to do here.
//cell_idx += 0;
}
else if (p[1]>=nC1*inc_y && p[1]/inc_y-nC1>-p[0]/inc_x){
cell_idx += 2;
}
else{
cell_idx += 3;
}
return cell_idx;
}
// Out of bounds (right)
if (p[0]>=nC0*inc_x){
if (p[1]<=0 && -p[1]/inc_y>p[0]/inc_x-nC0){
// Nothing to do here.
//cell_idx += 0;
}
else if (p[1]>=nC1*inc_y && p[1]/inc_y-nC1>p[0]/inc_x-nC0){
cell_idx += 2;
}
else{
cell_idx += 1;
}
return cell_idx;
}
// Out of bounds (up)
if (p[1]<=0){
return cell_idx;
}
// Out of bounds (bottom)
if (p[1]>=nC1*inc_y){
cell_idx+=2;
return cell_idx;
}
// OK, we are inbound
if (x<y){
if (1-x<y) {
cell_idx+=2;
}
else {
cell_idx+=3;
}
}
else if (1-x<y) {
cell_idx+=1;
}
/* This does nothing... I leave it for clarity
else {
cell_idx+=0;
}
*/
}
return cell_idx;
};
__device__ inline bool inBoundary(double *p, double *bbs)
{
return (bbs[0*2] <= p[0] && p[0] < bbs[0*2+1]) &&
(bbs[1*2] <= p[1] && p[1] < bbs[1*2+1]);
}
__device__ void solveODE(double *p, double* As, const double h,
const int nStepsOdeSolver, const int nC0, const int nC1, const int nC2,
const double inc_x, const double inc_y)
{
//modifies p
double v[DIM];
double pMid[DIM];
int cell_idx;
for(int t=0; t<nStepsOdeSolver; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
int mi = cell_idx*DIM*(DIM+1); // index of As
// compute at the current location
A_times_b_affine(v,As+mi,p);
// compute mid point
pMid[0] = p[0] + h*v[0]/2.;
pMid[1] = p[1] + h*v[1]/2.;
// compute velocity at mid point
A_times_b_affine(v,As+mi,pMid);
// update p
p[0] += v[0]*h;
p[1] += v[1]*h;
}
}
__device__ void solveODE2(double *p, double* As, double* Bs,
double* grad_per_point, // shape: (nPts,dim_range,d=len(BasMats)),
int idx,
int d,
int nPts,
const double h,
const int nStepsOdeSolver, const int nC0, const int nC1, const int nC2,
const double inc_x, const double inc_y)
{
//modifies p
double v[DIM];
double pMid[DIM];
double vMid[DIM];
double q[DIM];
double qMid[DIM];
double u[DIM];
double uMid[DIM];
double B_times_T[DIM];
double A_times_dTdtheta[DIM];
int cell_idx;
int nEntries = DIM*(DIM+1);
// set to zero
for (int j=0; j<d; j++){
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
grad_per_point[idx*DIM*d + i * d + j] = 0;
}
}
for(int t=0; t<nStepsOdeSolver; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
int mi = cell_idx*nEntries; // index of As
// compute at the current location
A_times_b_affine(v,As+mi,p);
// compute mid point
#pragma unroll
for(int i=0; i<DIM; ++i){
pMid[i] = p[i] + h*v[i]/2.;
}
// compute velocity at mid point
A_times_b_affine(vMid,As+mi,pMid);
for (int j=0; j<d; j++){
int bi = j * nEntries*N_CELLS + mi ; // index of the Bs
// copy q
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
q[i] = grad_per_point[idx*DIM*d + i * d + j];
}
// Step 1: Compute u using the old location
// Find current RHS (term1 + term2)
// Term1
A_times_b_affine(B_times_T,Bs+ bi , p);
// Term2
A_times_b_linear(A_times_dTdtheta,As+mi , q);
// Sum both terms
#pragma unroll
for(int i=0; i<DIM; ++i){
u[i] = B_times_T[i] + A_times_dTdtheta[i] ;
}
// Step 2: Compute mid "point"
#pragma unroll
for(int i=0; i<DIM; ++i){
qMid[i] = q[i] + h*u[i]/2.;
}
// Step 3: compute uMid
// Term1
A_times_b_affine(B_times_T,Bs+ bi , pMid);
// Term2
A_times_b_linear(A_times_dTdtheta,As+mi , qMid);
// Sum both terms
#pragma unroll
for(int i=0; i<DIM; ++i){
uMid[i] = B_times_T[i] + A_times_dTdtheta[i] ;
}
// update q
#pragma unroll
for(int i=0; i<DIM; ++i){
q[i] += uMid[i]*h;
}
//
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
grad_per_point[idx*DIM*d + i * d + j] = q[i];
}
}
// update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
}
}
__global__ void calc_cell_idx(double* pts,
int* cell_idx,
const int nPts,const int nC0, const int nC1, const int nC2,
double inc_x,double inc_y,double inc_z){
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// Do we still need the command below?
__syncthreads();
if(idx >= nPts)
return;
double p[DIM];
p[0] = pts[idx*DIM+0];
p[1] = pts[idx*DIM+1];
cell_idx[idx] = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
}
__global__ void calc_T(const double* pos0,double* pos ,const double* Trels, const double* As,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double Trels_[N_CELLS*DIM*(DIM+1)];
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
// copy from GPU RAM into grid-cell shared memory
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i){
Trels_[i] = Trels[i];
As_[i] = As[i];
}
}
__syncthreads();
if(idx < nPts)
{
double p[DIM];
double pNew[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
pos[idx*DIM+i]=pos0[idx*DIM+i]; // copy the initial location
p[i] = pos[idx*DIM+i];
}
double h = dt/double(nStepsOdeSolver);
int cell_idx=0;
int cell_idx_new =0;
for (int t=0; t<nTimeSteps; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
A_times_b_affine(pNew,Trels_ + cell_idx*DIM*(DIM+1),p);
cell_idx_new = compute_cell_idx(pNew,nC0,nC1,nC2,inc_x,inc_y);
if (cell_idx_new == cell_idx){
// great, we didn't leave the cell
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pNew[i];
}
}
else{
// compute using ODE solver
solveODE(p, As_, h, nStepsOdeSolver,nC0,nC1,nC2,inc_x,inc_y);
}
}
pos[idx*DIM ] = p[0];
pos[idx*DIM+1] = p[1];
}
}
__global__ void calc_T_simple(const double* pos0,double* pos , const double* As,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
// copy from GPU RAM into grid-cell shared memory
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i)
As_[i] = As[i];
}
__syncthreads();
if(idx < nPts)
{
double p[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
p[i]=pos0[idx*DIM+i]; // copy the initial location
}
double h = dt/double(nStepsOdeSolver);
solveODE(p, As_, h, nStepsOdeSolver * nTimeSteps,
nC0,nC1,nC2,inc_x,inc_y);
pos[idx*DIM ] = p[0];
pos[idx*DIM+1] = p[1];
}
}
__global__ void calc_grad_theta(const double* pos0,double* pos ,
const double* As,
double* Bs,
double* grad_per_point, // shape: (nPts,dim_range,d=len(BasMats)),
const int d,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
// copy from GPU RAM into grid-cell shared memory
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i)
As_[i] = As[i];
}
__syncthreads();
if(idx < nPts)
{
double p[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
p[i]=pos0[idx*DIM+i]; // copy the initial location
}
double h = dt/double(nStepsOdeSolver);
solveODE2(p, As_, Bs,
grad_per_point,
idx,
d,
nPts,
h, nStepsOdeSolver * nTimeSteps,
nC0,nC1,nC2,inc_x,inc_y);
#pragma unroll
for(int i=0; i<DIM; ++i)
pos[idx*DIM+i] = p[i];
}
}
__global__ void calc_trajectory(double* pos,
const double* Trels, const double* As, double dt, int nTimeSteps, int nStepsOdeSolver,
const int nPts,const int nC0,const int nC1,const int nC2,
const double inc_x, const double inc_y, const double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double Trels_[N_CELLS*DIM*(DIM+1)];
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i)
Trels_[i] = Trels[i];
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i)
As_[i] = As[i];
}
__syncthreads();
if(idx < nPts)
{
double p[DIM];
double pNew[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pos[idx*DIM+i]; // copy initial location
}
double h = dt/double(nStepsOdeSolver);
int cell_idx=0;
int cell_idx_new =0;
for (int t=0; t<nTimeSteps; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
A_times_b_affine(pNew,Trels_ + cell_idx*DIM*(DIM+1),p);
cell_idx_new = compute_cell_idx(pNew,nC0,nC1,nC2,inc_x,inc_y);
if (cell_idx_new == cell_idx){
// great, we didn't leave the cell. So we can use pNew.
p[0] = pNew[0];
p[1] = pNew[1];
}
else{// We stepped outside the cell. So discard pNew
// and compute using ODE solver instead.
solveODE(p, As_, h, nStepsOdeSolver,nC0,nC1,nC2,inc_x,inc_y);
}
pos[(idx+t*nPts)*DIM+0] = p[0];
pos[(idx+t*nPts)*DIM+1] = p[1];
}
}
}
__global__ void calc_v(double* pos, double* vel,
double* As, int nPts,int nC0,int nC1,int nC2,double inc_x,
double inc_y, double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
// copy from GPU RAM into grid-cell shared memory
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); i++)
As_[i] = As[i];
}
__syncthreads();
if(idx < nPts)
{
int cell_idx=0;
double p[DIM];
double v[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pos[idx*DIM+i];
v[i] = vel[idx*DIM+i];
}
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
A_times_b_affine(v,As_ + cell_idx*DIM*(DIM+1),p);
vel[idx*DIM ] = v[0];
vel[idx*DIM+1] = v[1];
}
}
| da22684a0c6b9daf79b1722988960806f32f4c86.cu | /*Created on Mon Feb 10 10:00:00 2014
Oren Freifeld
Email: freifeld@csail.mit.edu
*/
#ifndef DIM
#define DIM 2
#endif
#ifndef TESS_TYPE
#define TESS_TYPE 2
#endif
__device__ inline int mymin(int a,double b){
return !(b<a)?a:round(b);
}
__device__ inline void A_times_b_affine(double x[], double A[], double b[])
{
// Result is computed inside x.
x[0] = A[0]*b[0] + A[1]*b[1] + A[2];
x[1] = A[3]*b[0] + A[4]*b[1] + A[5];
};
__device__ inline void A_times_b_linear(double x[], double A[], double b[])
{
// Result is computed inside x.
x[0] = A[0]*b[0] + A[1]*b[1];
x[1] = A[3]*b[0] + A[4]*b[1];
};
__device__ inline int compute_cell_idx(double* p,
int nC0, int nC1, int nC2,
double inc_x,double inc_y)
{
int cell_idx=0;
if (TESS_TYPE == 2){
cell_idx = round(min(double(nC0-1),max(0.0,(p[0] - fmod(p[0] , inc_x))/inc_x))) +
round(min(double(nC1-1),max(0.0,(p[1] - fmod(p[1] , inc_y))/inc_y))) * nC0;
}
else
{
double p0 = min((nC0*inc_x-0.0000000001),max(0.0,p[0])) ;
double p1 = min((nC1*inc_y-0.0000000001),max(0.0,p[1])) ;
// BAD IDEA: This fails.
//double p0 = min(((nC0-1)*inc_x),max(0.0,p[0])) ;
//double p1 = min(((nC1-1)*inc_y),max(0.0,p[1])) ;
double xmod = fmod(p0,inc_x);
double ymod = fmod(p1,inc_y);
double x = xmod/inc_x ;
double y = ymod/inc_y ;
// We already took care of the case of negative values.
// But for values that are too high we still need to check
// since above we used nC0 and nC1, and not nC0-1 and nC1-1.
//cell_idx = round(min(double(nC0-1),((p0 - xmod)/inc_x))) +
// round(min(double(nC1-1),((p1 - ymod)/inc_y))) * nC0;
cell_idx = mymin(nC0-1,(p0 - xmod)/inc_x) +
mymin(nC1-1,(p1 - ymod)/inc_y) * nC0;
cell_idx *=4; // every rect consists of 4 triangles
/*
Recall the order of triangles is
0
3 1
2
*/
// Out of bounds (left)
if (p[0]<=0){
if (p[1]<=0 && p[1]/inc_y<p[0]/inc_x){
// Nothing to do here.
//cell_idx += 0;
}
else if (p[1]>=nC1*inc_y && p[1]/inc_y-nC1>-p[0]/inc_x){
cell_idx += 2;
}
else{
cell_idx += 3;
}
return cell_idx;
}
// Out of bounds (right)
if (p[0]>=nC0*inc_x){
if (p[1]<=0 && -p[1]/inc_y>p[0]/inc_x-nC0){
// Nothing to do here.
//cell_idx += 0;
}
else if (p[1]>=nC1*inc_y && p[1]/inc_y-nC1>p[0]/inc_x-nC0){
cell_idx += 2;
}
else{
cell_idx += 1;
}
return cell_idx;
}
// Out of bounds (up)
if (p[1]<=0){
return cell_idx;
}
// Out of bounds (bottom)
if (p[1]>=nC1*inc_y){
cell_idx+=2;
return cell_idx;
}
// OK, we are inbound
if (x<y){
if (1-x<y) {
cell_idx+=2;
}
else {
cell_idx+=3;
}
}
else if (1-x<y) {
cell_idx+=1;
}
/* This does nothing... I leave it for clarity
else {
cell_idx+=0;
}
*/
}
return cell_idx;
};
__device__ inline bool inBoundary(double *p, double *bbs)
{
return (bbs[0*2] <= p[0] && p[0] < bbs[0*2+1]) &&
(bbs[1*2] <= p[1] && p[1] < bbs[1*2+1]);
}
__device__ void solveODE(double *p, double* As, const double h,
const int nStepsOdeSolver, const int nC0, const int nC1, const int nC2,
const double inc_x, const double inc_y)
{
//modifies p
double v[DIM];
double pMid[DIM];
int cell_idx;
for(int t=0; t<nStepsOdeSolver; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
int mi = cell_idx*DIM*(DIM+1); // index of As
// compute at the current location
A_times_b_affine(v,As+mi,p);
// compute mid point
pMid[0] = p[0] + h*v[0]/2.;
pMid[1] = p[1] + h*v[1]/2.;
// compute velocity at mid point
A_times_b_affine(v,As+mi,pMid);
// update p
p[0] += v[0]*h;
p[1] += v[1]*h;
}
}
__device__ void solveODE2(double *p, double* As, double* Bs,
double* grad_per_point, // shape: (nPts,dim_range,d=len(BasMats)),
int idx,
int d,
int nPts,
const double h,
const int nStepsOdeSolver, const int nC0, const int nC1, const int nC2,
const double inc_x, const double inc_y)
{
//modifies p
double v[DIM];
double pMid[DIM];
double vMid[DIM];
double q[DIM];
double qMid[DIM];
double u[DIM];
double uMid[DIM];
double B_times_T[DIM];
double A_times_dTdtheta[DIM];
int cell_idx;
int nEntries = DIM*(DIM+1);
// set to zero
for (int j=0; j<d; j++){
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
grad_per_point[idx*DIM*d + i * d + j] = 0;
}
}
for(int t=0; t<nStepsOdeSolver; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
int mi = cell_idx*nEntries; // index of As
// compute at the current location
A_times_b_affine(v,As+mi,p);
// compute mid point
#pragma unroll
for(int i=0; i<DIM; ++i){
pMid[i] = p[i] + h*v[i]/2.;
}
// compute velocity at mid point
A_times_b_affine(vMid,As+mi,pMid);
for (int j=0; j<d; j++){
int bi = j * nEntries*N_CELLS + mi ; // index of the Bs
// copy q
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
q[i] = grad_per_point[idx*DIM*d + i * d + j];
}
// Step 1: Compute u using the old location
// Find current RHS (term1 + term2)
// Term1
A_times_b_affine(B_times_T,Bs+ bi , p);
// Term2
A_times_b_linear(A_times_dTdtheta,As+mi , q);
// Sum both terms
#pragma unroll
for(int i=0; i<DIM; ++i){
u[i] = B_times_T[i] + A_times_dTdtheta[i] ;
}
// Step 2: Compute mid "point"
#pragma unroll
for(int i=0; i<DIM; ++i){
qMid[i] = q[i] + h*u[i]/2.;
}
// Step 3: compute uMid
// Term1
A_times_b_affine(B_times_T,Bs+ bi , pMid);
// Term2
A_times_b_linear(A_times_dTdtheta,As+mi , qMid);
// Sum both terms
#pragma unroll
for(int i=0; i<DIM; ++i){
uMid[i] = B_times_T[i] + A_times_dTdtheta[i] ;
}
// update q
#pragma unroll
for(int i=0; i<DIM; ++i){
q[i] += uMid[i]*h;
}
//
#pragma unroll
for(int i=0; i<DIM; ++i){
// nPts,dim_range,d
grad_per_point[idx*DIM*d + i * d + j] = q[i];
}
}
// update p
p[0] += vMid[0]*h;
p[1] += vMid[1]*h;
}
}
__global__ void calc_cell_idx(double* pts,
int* cell_idx,
const int nPts,const int nC0, const int nC1, const int nC2,
double inc_x,double inc_y,double inc_z){
//int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
// Do we still need the command below?
__syncthreads();
if(idx >= nPts)
return;
double p[DIM];
p[0] = pts[idx*DIM+0];
p[1] = pts[idx*DIM+1];
cell_idx[idx] = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
}
__global__ void calc_T(const double* pos0,double* pos ,const double* Trels, const double* As,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double Trels_[N_CELLS*DIM*(DIM+1)];
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
// copy from GPU RAM into grid-cell shared memory
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i){
Trels_[i] = Trels[i];
As_[i] = As[i];
}
}
__syncthreads();
if(idx < nPts)
{
double p[DIM];
double pNew[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
pos[idx*DIM+i]=pos0[idx*DIM+i]; // copy the initial location
p[i] = pos[idx*DIM+i];
}
double h = dt/double(nStepsOdeSolver);
int cell_idx=0;
int cell_idx_new =0;
for (int t=0; t<nTimeSteps; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
A_times_b_affine(pNew,Trels_ + cell_idx*DIM*(DIM+1),p);
cell_idx_new = compute_cell_idx(pNew,nC0,nC1,nC2,inc_x,inc_y);
if (cell_idx_new == cell_idx){
// great, we didn't leave the cell
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pNew[i];
}
}
else{
// compute using ODE solver
solveODE(p, As_, h, nStepsOdeSolver,nC0,nC1,nC2,inc_x,inc_y);
}
}
pos[idx*DIM ] = p[0];
pos[idx*DIM+1] = p[1];
}
}
__global__ void calc_T_simple(const double* pos0,double* pos , const double* As,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
// copy from GPU RAM into grid-cell shared memory
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i)
As_[i] = As[i];
}
__syncthreads();
if(idx < nPts)
{
double p[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
p[i]=pos0[idx*DIM+i]; // copy the initial location
}
double h = dt/double(nStepsOdeSolver);
solveODE(p, As_, h, nStepsOdeSolver * nTimeSteps,
nC0,nC1,nC2,inc_x,inc_y);
pos[idx*DIM ] = p[0];
pos[idx*DIM+1] = p[1];
}
}
__global__ void calc_grad_theta(const double* pos0,double* pos ,
const double* As,
double* Bs,
double* grad_per_point, // shape: (nPts,dim_range,d=len(BasMats)),
const int d,
const double dt, const int nTimeSteps, const int nStepsOdeSolver,
const int nPts , const int nC0, const int nC1, const int nC2,
const double inc_x,const double inc_y, const double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
// copy from GPU RAM into grid-cell shared memory
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i)
As_[i] = As[i];
}
__syncthreads();
if(idx < nPts)
{
double p[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i)
{
p[i]=pos0[idx*DIM+i]; // copy the initial location
}
double h = dt/double(nStepsOdeSolver);
solveODE2(p, As_, Bs,
grad_per_point,
idx,
d,
nPts,
h, nStepsOdeSolver * nTimeSteps,
nC0,nC1,nC2,inc_x,inc_y);
#pragma unroll
for(int i=0; i<DIM; ++i)
pos[idx*DIM+i] = p[i];
}
}
__global__ void calc_trajectory(double* pos,
const double* Trels, const double* As, double dt, int nTimeSteps, int nStepsOdeSolver,
const int nPts,const int nC0,const int nC1,const int nC2,
const double inc_x, const double inc_y, const double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double Trels_[N_CELLS*DIM*(DIM+1)];
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i)
Trels_[i] = Trels[i];
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); ++i)
As_[i] = As[i];
}
__syncthreads();
if(idx < nPts)
{
double p[DIM];
double pNew[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pos[idx*DIM+i]; // copy initial location
}
double h = dt/double(nStepsOdeSolver);
int cell_idx=0;
int cell_idx_new =0;
for (int t=0; t<nTimeSteps; ++t)
{
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
A_times_b_affine(pNew,Trels_ + cell_idx*DIM*(DIM+1),p);
cell_idx_new = compute_cell_idx(pNew,nC0,nC1,nC2,inc_x,inc_y);
if (cell_idx_new == cell_idx){
// great, we didn't leave the cell. So we can use pNew.
p[0] = pNew[0];
p[1] = pNew[1];
}
else{// We stepped outside the cell. So discard pNew
// and compute using ODE solver instead.
solveODE(p, As_, h, nStepsOdeSolver,nC0,nC1,nC2,inc_x,inc_y);
}
pos[(idx+t*nPts)*DIM+0] = p[0];
pos[(idx+t*nPts)*DIM+1] = p[1];
}
}
}
__global__ void calc_v(double* pos, double* vel,
double* As, int nPts,int nC0,int nC1,int nC2,double inc_x,
double inc_y, double inc_z)
{
int tid = threadIdx.x;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double As_[N_CELLS*DIM*(DIM+1)];
if(tid < N_CELLS)
{
// copy from GPU RAM into grid-cell shared memory
#pragma unroll
for(int i=tid*DIM*(DIM+1); i<(tid+1)*DIM*(DIM+1); i++)
As_[i] = As[i];
}
__syncthreads();
if(idx < nPts)
{
int cell_idx=0;
double p[DIM];
double v[DIM];
#pragma unroll
for(int i=0; i<DIM; ++i){
p[i] = pos[idx*DIM+i];
v[i] = vel[idx*DIM+i];
}
cell_idx = compute_cell_idx(p,nC0,nC1,nC2,inc_x,inc_y);
A_times_b_affine(v,As_ + cell_idx*DIM*(DIM+1),p);
vel[idx*DIM ] = v[0];
vel[idx*DIM+1] = v[1];
}
}
|
36b1a41e539103c6fb9d7a2c8cb21d0e7326c315.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/jit_utils.h>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/NumericUtils.h>
#include <ATen/OpMathType.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/core/Scalar.h>
#include <c10/util/complex.h>
namespace at::native {
void bitwise_not_kernel_cuda(TensorIteratorBase& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
constexpr char exp_name[] = "exp_kernel";
void exp_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto exp_string = jiterator_stringify(
template <typename T>
T exp_kernel(T x) {
return ::exp(x);
}); // exp_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "exp_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/exp_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, exp_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "exp_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::exp(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, common_dtype, "exp_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp(a);
});
});
}
}
void expm1_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::BFloat16, ScalarType::Half,
iter.common_dtype(), "expm1_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
// We manually overload rsqrt because std::rsqrt does not work with complex types.
template<typename scalar_t>
C10_HOST_DEVICE static inline scalar_t rsqrt_wrapper(scalar_t v) {
return ::rsqrt(v);
}
template<typename T>
C10_HOST_DEVICE static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) {
const c10::complex<T> one = c10::complex<T>(1.0, 0);
// std::sqrt for c10::complex is overloaded in c10/util/complex_math.h
return one / ::sqrt(v);
}
constexpr char rsqrt_name[] = "rsqrt_kernel";
void rsqrt_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto rsqrt_string = jiterator_stringify(
template <typename T>
T rsqrt_kernel(T x) {
const T one = T{1};
return one / std::sqrt(x);
}); // rsqrt_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "rsqrt_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/rsqrt_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, rsqrt_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return rsqrt_wrapper(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::BFloat16, ScalarType::Half,
iter.common_dtype(), "rsqrt_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return rsqrt_wrapper(a);
});
});
}
}
constexpr char sqrt_name[] = "sqrt_kernel";
void sqrt_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sqrt_string = jiterator_stringify(
template <typename T>
T sqrt_kernel(T x) {
return std::sqrt(x);
}); // sqrt_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sqrt_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/sqrt_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sqrt_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sqrt(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, common_dtype, "sqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::sqrt(a);
});
});
}
}
void clamp_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value, const Scalar& max_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_min_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_max_kernel_cuda(TensorIteratorBase& iter, const Scalar& max_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() {
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
void nan_to_num_kernel_cuda(
TensorIteratorBase& iter,
c10::optional<double> nan,
c10::optional<double> pos_inf,
c10::optional<double> neg_inf) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "nan_to_num_cuda", [&]() {
scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.));
scalar_t pos_inf_replacement = pos_inf.has_value()
? static_cast<scalar_t>(pos_inf.value())
: std::numeric_limits<scalar_t>::max();
scalar_t neg_inf_replacement = neg_inf.has_value()
? static_cast<scalar_t>(neg_inf.value())
: std::numeric_limits<scalar_t>::lowest();
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t {
return (
at::_isnan(a)
? nan_replacement
: (a == std::numeric_limits<scalar_t>::infinity()
? pos_inf_replacement
: (a == -std::numeric_limits<scalar_t>::infinity()
? neg_inf_replacement
: a)));
});
});
}
void frexp_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half,
// The iter.dtype() here is the dtype of mantissa output.
// It's a floating point type and must be the same as the input's dtype.
iter.dtype(),
"frexp_cuda", [&]() {
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t a) -> thrust::tuple<scalar_t, int32_t> {
int32_t exponent;
scalar_t mantissa = ::frexp(a, &exponent);
return {mantissa, exponent};
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda);
REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda);
REGISTER_DISPATCH(frexp_stub, &frexp_kernel_cuda);
} // namespace at::native
| 36b1a41e539103c6fb9d7a2c8cb21d0e7326c315.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/NumericUtils.h>
#include <ATen/OpMathType.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/core/Scalar.h>
#include <c10/util/complex.h>
namespace at::native {
void bitwise_not_kernel_cuda(TensorIteratorBase& iter) {
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a) {
return !a;
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ~a;
});
});
}
}
constexpr char exp_name[] = "exp_kernel";
void exp_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto exp_string = jiterator_stringify(
template <typename T>
T exp_kernel(T x) {
return std::exp(x);
}); // exp_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "exp_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/exp_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, exp_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "exp_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return std::exp(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, common_dtype, "exp_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::exp(a);
});
});
}
}
void expm1_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::BFloat16, ScalarType::Half,
iter.common_dtype(), "expm1_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::expm1(a);
});
});
}
// We manually overload rsqrt because std::rsqrt does not work with complex types.
template<typename scalar_t>
C10_HOST_DEVICE static inline scalar_t rsqrt_wrapper(scalar_t v) {
return ::rsqrt(v);
}
template<typename T>
C10_HOST_DEVICE static inline c10::complex<T> rsqrt_wrapper(c10::complex<T> v) {
const c10::complex<T> one = c10::complex<T>(1.0, 0);
// std::sqrt for c10::complex is overloaded in c10/util/complex_math.h
return one / ::sqrt(v);
}
constexpr char rsqrt_name[] = "rsqrt_kernel";
void rsqrt_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto rsqrt_string = jiterator_stringify(
template <typename T>
T rsqrt_kernel(T x) {
const T one = T{1};
return one / std::sqrt(x);
}); // rsqrt_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "rsqrt_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/rsqrt_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, rsqrt_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "rsqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return rsqrt_wrapper(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::BFloat16, ScalarType::Half,
iter.common_dtype(), "rsqrt_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// In CUDA, ::rsqrt is overloaded for float and at::Half here is implicitly cast to float.
return rsqrt_wrapper(a);
});
});
}
}
constexpr char sqrt_name[] = "sqrt_kernel";
void sqrt_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sqrt_string = jiterator_stringify(
template <typename T>
T sqrt_kernel(T x) {
return std::sqrt(x);
}); // sqrt_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sqrt_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/sqrt_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sqrt_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sqrt(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, common_dtype, "sqrt_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return std::sqrt(a);
});
});
}
}
void clamp_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value, const Scalar& max_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_min_kernel_cuda(TensorIteratorBase& iter, const Scalar& min_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_min_cuda", [&]() {
auto lower = min_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_max_kernel_cuda(TensorIteratorBase& iter, const Scalar& max_value) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "clamp_max_cuda", [&]() {
auto upper = max_value.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
void nan_to_num_kernel_cuda(
TensorIteratorBase& iter,
c10::optional<double> nan,
c10::optional<double> pos_inf,
c10::optional<double> neg_inf) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "nan_to_num_cuda", [&]() {
scalar_t nan_replacement = static_cast<scalar_t>(nan.value_or(0.));
scalar_t pos_inf_replacement = pos_inf.has_value()
? static_cast<scalar_t>(pos_inf.value())
: std::numeric_limits<scalar_t>::max();
scalar_t neg_inf_replacement = neg_inf.has_value()
? static_cast<scalar_t>(neg_inf.value())
: std::numeric_limits<scalar_t>::lowest();
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t {
return (
at::_isnan(a)
? nan_replacement
: (a == std::numeric_limits<scalar_t>::infinity()
? pos_inf_replacement
: (a == -std::numeric_limits<scalar_t>::infinity()
? neg_inf_replacement
: a)));
});
});
}
void frexp_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half,
// The iter.dtype() here is the dtype of mantissa output.
// It's a floating point type and must be the same as the input's dtype.
iter.dtype(),
"frexp_cuda", [&]() {
gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t a) -> thrust::tuple<scalar_t, int32_t> {
int32_t exponent;
scalar_t mantissa = std::frexp(a, &exponent);
return {mantissa, exponent};
});
});
}
REGISTER_DISPATCH(bitwise_not_stub, &bitwise_not_kernel_cuda);
REGISTER_DISPATCH(exp_stub, &exp_kernel_cuda);
REGISTER_DISPATCH(expm1_stub, &expm1_kernel_cuda);
REGISTER_DISPATCH(rsqrt_stub, &rsqrt_kernel_cuda);
REGISTER_DISPATCH(sqrt_stub, &sqrt_kernel_cuda);
REGISTER_DISPATCH(nan_to_num_stub, &nan_to_num_kernel_cuda);
REGISTER_DISPATCH(frexp_stub, &frexp_kernel_cuda);
} // namespace at::native
|
42ef0b9c36e28a6c68fdfb478ca00275324a204f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "reference_calc.cpp"
#include "utils.h"
#include <iostream>
using namespace std;
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__ void hist(unsigned int * inputVals, unsigned int * histVals, unsigned int bitLocation, int numElems)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= numElems)
return;
int bin = ((inputVals[index] & bitLocation) == 0) ? 0 : 1;
atomicAdd(&histVals[bin],1);
}
__global__ void pred(unsigned int * inputVals, unsigned int * predicate, unsigned int bitLocation, int compactVal, int numElems)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= numElems)
return;
int p = ((inputVals[index] & bitLocation) == compactVal) ? 1 : 0;
__syncthreads();
predicate[index] = p;
}
__global__ void exclusiveScan(unsigned int * hist, unsigned int * oldVals, int numBins, int j, int numElems) // Hillis Steele Scan
{
int index = threadIdx.x + j*numBins;
if(index >= numElems)
return;
for(int i=1;i<=numBins;i <<= 1)
{
int otherVal = index-i;
unsigned int val = 0;
if(otherVal-j*numBins >= 0)
val=hist[otherVal];
__syncthreads();
if(otherVal-j*numBins >= 0)
hist[index]+=val;
__syncthreads();
}
int lastVal=0;
if(j>0)
lastVal = hist[j*numBins-1]+oldVals[j*numBins-1];
__syncthreads();
int a = hist[index]+lastVal;
__syncthreads();
hist[index] = a - oldVals[index];
}
__global__ void move(unsigned int * inputVals, unsigned int * inputPos, unsigned int * outputVals, unsigned int * outputPos, unsigned int * predicate0, unsigned int * scannedPredicate0, unsigned int * predicate1, unsigned int * scannedPredicate1, unsigned int * histVals, int numElems)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= numElems)
return;
int HV = histVals[0];
__syncthreads();
if(predicate0[index] == 1) {
outputVals[scannedPredicate0[index]] = inputVals[index];
__syncthreads();
outputPos[scannedPredicate0[index]] = inputPos[index];
__syncthreads();
}
else if(predicate1[index] == 1) {
outputVals[scannedPredicate1[index]+HV] = inputVals[index];
__syncthreads();
outputPos[scannedPredicate1[index]+HV] = inputPos[index];
__syncthreads();
}
__syncthreads();
}
/*void testScan()
{
int threads=1024;
int blocks = 4000/threads + 1;
int a[4000];
unsigned int *b;
unsigned int *c;
hipMalloc(&b, sizeof(int)*4000);
hipMalloc(&c, sizeof(int)*4000);
for(int i=0;i<4000;i++)
a[i]=1;
hipMemcpy(b,a,sizeof(int)*4000,hipMemcpyHostToDevice);
hipMemcpy(c,a,sizeof(int)*4000,hipMemcpyHostToDevice);
for(int i=0;i<blocks;i++)
hipLaunchKernelGGL(( exclusiveScan), dim3(1),dim3(threads), 0, 0, b,c,1024,i,4000);
hipMemcpy(a,b,sizeof(int)*4000,hipMemcpyDeviceToHost);
for(int i=0;i<4000;i++)
cout << a[i] << endl;
}*/
void testMove(unsigned int * inVals, unsigned int * inPos, unsigned int * outVals, unsigned int * outPos, int numElems)
{
//int numElems = 30000;
/*unsigned int h_a[numElems];
unsigned int h_b[numElems];
int a = numElems*200;
for(int i=0;i<numElems;i++)
{
h_a[i] = a;
h_b[i] = a;
a--;
}*/
int aSize = sizeof(unsigned int)*numElems;
unsigned int * d_inputVals;
unsigned int * d_inputPos;
unsigned int * d_outputVals;
unsigned int * d_outputPos;
hipMalloc(&d_inputVals, aSize);
hipMalloc(&d_inputPos, aSize);
hipMalloc(&d_outputVals, aSize);
hipMalloc(&d_outputPos, aSize);
checkCudaErrors(hipMemcpy(d_inputVals, inVals, aSize, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_inputPos, inPos, aSize, hipMemcpyDeviceToDevice));
unsigned int * histVals;
unsigned int * predicate0;
unsigned int * scannedPredicate0;
unsigned int * predicate1;
unsigned int * scannedPredicate1;
hipMalloc(&histVals, sizeof(unsigned int)*2);
hipMalloc(&predicate0, aSize);
hipMalloc(&scannedPredicate0, aSize);
hipMalloc(&predicate1, aSize);
hipMalloc(&scannedPredicate1, aSize);
int threads = 1024;
int blocks = numElems/threads + 1;
for(int i=0;i<32;i++)
{
unsigned int bitLoc = 1 << i;
// 1) Histogram of number of occurences of each bit
checkCudaErrors(hipMemset(histVals, 0, 2*sizeof(unsigned int)));
hipLaunchKernelGGL(( hist), dim3(blocks),dim3(threads), 0, 0, d_inputVals, histVals, bitLoc, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Using compaction to compute a predicate array for 0's
hipLaunchKernelGGL(( pred), dim3(blocks),dim3(threads), 0, 0, d_inputVals, predicate0, bitLoc, 0, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(scannedPredicate0, predicate0, aSize, hipMemcpyDeviceToDevice));
// Use compaction to compute predicate array for 1's
hipLaunchKernelGGL(( pred), dim3(blocks),dim3(threads), 0, 0, d_inputVals, predicate1, bitLoc, bitLoc, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(scannedPredicate1, predicate1,aSize, hipMemcpyDeviceToDevice));
for(int j=0;j<blocks;j++) {
// Exclusive scan on predicate array to get index values of 0's
hipLaunchKernelGGL(( exclusiveScan), dim3(1),dim3(threads), 0, 0, scannedPredicate0, predicate0, threads, j, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Exclusive scan on predicate array to get index values of 1's
hipLaunchKernelGGL(( exclusiveScan), dim3(1),dim3(threads), 0, 0, scannedPredicate1, predicate1, threads, j, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
// Move input data into output data based on index specified in predicate array
hipLaunchKernelGGL(( move), dim3(blocks),dim3(threads), 0, 0, d_inputVals, d_inputPos, d_outputVals, d_outputPos, predicate0, scannedPredicate0, predicate1, scannedPredicate1, histVals, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Copy output values into input values to update the sorted list
checkCudaErrors(hipMemcpy(d_inputVals, d_outputVals, aSize, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_inputPos, d_outputPos, aSize, hipMemcpyDeviceToDevice));
}
/* unsigned int pv[numElems];
hipMemcpy(pv, d_outputVals, aSize, hipMemcpyDeviceToHost);
for(int i=0;i<numElems;i++)
cout << pv[i] << endl;*/
checkCudaErrors(hipMemcpy(outVals, d_outputVals, aSize, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(outPos, d_outputPos, aSize, hipMemcpyDeviceToDevice));
hipFree(d_inputVals);
hipFree(d_inputPos);
hipFree(d_outputVals);
hipFree(d_outputPos);
hipFree(histVals);
hipFree(scannedPredicate0);
hipFree(predicate0);
hipFree(scannedPredicate1);
hipFree(predicate1);
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//testScan();
testMove(d_inputVals, d_inputPos, d_outputVals, d_outputPos, numElems);
}
| 42ef0b9c36e28a6c68fdfb478ca00275324a204f.cu | //Udacity HW 4
//Radix Sorting
#include "reference_calc.cpp"
#include "utils.h"
#include <iostream>
using namespace std;
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
__global__ void hist(unsigned int * inputVals, unsigned int * histVals, unsigned int bitLocation, int numElems)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= numElems)
return;
int bin = ((inputVals[index] & bitLocation) == 0) ? 0 : 1;
atomicAdd(&histVals[bin],1);
}
__global__ void pred(unsigned int * inputVals, unsigned int * predicate, unsigned int bitLocation, int compactVal, int numElems)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= numElems)
return;
int p = ((inputVals[index] & bitLocation) == compactVal) ? 1 : 0;
__syncthreads();
predicate[index] = p;
}
__global__ void exclusiveScan(unsigned int * hist, unsigned int * oldVals, int numBins, int j, int numElems) // Hillis Steele Scan
{
int index = threadIdx.x + j*numBins;
if(index >= numElems)
return;
for(int i=1;i<=numBins;i <<= 1)
{
int otherVal = index-i;
unsigned int val = 0;
if(otherVal-j*numBins >= 0)
val=hist[otherVal];
__syncthreads();
if(otherVal-j*numBins >= 0)
hist[index]+=val;
__syncthreads();
}
int lastVal=0;
if(j>0)
lastVal = hist[j*numBins-1]+oldVals[j*numBins-1];
__syncthreads();
int a = hist[index]+lastVal;
__syncthreads();
hist[index] = a - oldVals[index];
}
__global__ void move(unsigned int * inputVals, unsigned int * inputPos, unsigned int * outputVals, unsigned int * outputPos, unsigned int * predicate0, unsigned int * scannedPredicate0, unsigned int * predicate1, unsigned int * scannedPredicate1, unsigned int * histVals, int numElems)
{
int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index >= numElems)
return;
int HV = histVals[0];
__syncthreads();
if(predicate0[index] == 1) {
outputVals[scannedPredicate0[index]] = inputVals[index];
__syncthreads();
outputPos[scannedPredicate0[index]] = inputPos[index];
__syncthreads();
}
else if(predicate1[index] == 1) {
outputVals[scannedPredicate1[index]+HV] = inputVals[index];
__syncthreads();
outputPos[scannedPredicate1[index]+HV] = inputPos[index];
__syncthreads();
}
__syncthreads();
}
/*void testScan()
{
int threads=1024;
int blocks = 4000/threads + 1;
int a[4000];
unsigned int *b;
unsigned int *c;
cudaMalloc(&b, sizeof(int)*4000);
cudaMalloc(&c, sizeof(int)*4000);
for(int i=0;i<4000;i++)
a[i]=1;
cudaMemcpy(b,a,sizeof(int)*4000,cudaMemcpyHostToDevice);
cudaMemcpy(c,a,sizeof(int)*4000,cudaMemcpyHostToDevice);
for(int i=0;i<blocks;i++)
exclusiveScan<<<1,threads>>>(b,c,1024,i,4000);
cudaMemcpy(a,b,sizeof(int)*4000,cudaMemcpyDeviceToHost);
for(int i=0;i<4000;i++)
cout << a[i] << endl;
}*/
void testMove(unsigned int * inVals, unsigned int * inPos, unsigned int * outVals, unsigned int * outPos, int numElems)
{
//int numElems = 30000;
/*unsigned int h_a[numElems];
unsigned int h_b[numElems];
int a = numElems*200;
for(int i=0;i<numElems;i++)
{
h_a[i] = a;
h_b[i] = a;
a--;
}*/
int aSize = sizeof(unsigned int)*numElems;
unsigned int * d_inputVals;
unsigned int * d_inputPos;
unsigned int * d_outputVals;
unsigned int * d_outputPos;
cudaMalloc(&d_inputVals, aSize);
cudaMalloc(&d_inputPos, aSize);
cudaMalloc(&d_outputVals, aSize);
cudaMalloc(&d_outputPos, aSize);
checkCudaErrors(cudaMemcpy(d_inputVals, inVals, aSize, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_inputPos, inPos, aSize, cudaMemcpyDeviceToDevice));
unsigned int * histVals;
unsigned int * predicate0;
unsigned int * scannedPredicate0;
unsigned int * predicate1;
unsigned int * scannedPredicate1;
cudaMalloc(&histVals, sizeof(unsigned int)*2);
cudaMalloc(&predicate0, aSize);
cudaMalloc(&scannedPredicate0, aSize);
cudaMalloc(&predicate1, aSize);
cudaMalloc(&scannedPredicate1, aSize);
int threads = 1024;
int blocks = numElems/threads + 1;
for(int i=0;i<32;i++)
{
unsigned int bitLoc = 1 << i;
// 1) Histogram of number of occurences of each bit
checkCudaErrors(cudaMemset(histVals, 0, 2*sizeof(unsigned int)));
hist<<<blocks,threads>>>(d_inputVals, histVals, bitLoc, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Using compaction to compute a predicate array for 0's
pred<<<blocks,threads>>>(d_inputVals, predicate0, bitLoc, 0, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(scannedPredicate0, predicate0, aSize, cudaMemcpyDeviceToDevice));
// Use compaction to compute predicate array for 1's
pred<<<blocks,threads>>>(d_inputVals, predicate1, bitLoc, bitLoc, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(scannedPredicate1, predicate1,aSize, cudaMemcpyDeviceToDevice));
for(int j=0;j<blocks;j++) {
// Exclusive scan on predicate array to get index values of 0's
exclusiveScan<<<1,threads>>>(scannedPredicate0, predicate0, threads, j, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Exclusive scan on predicate array to get index values of 1's
exclusiveScan<<<1,threads>>>(scannedPredicate1, predicate1, threads, j, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
// Move input data into output data based on index specified in predicate array
move<<<blocks,threads>>>(d_inputVals, d_inputPos, d_outputVals, d_outputPos, predicate0, scannedPredicate0, predicate1, scannedPredicate1, histVals, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Copy output values into input values to update the sorted list
checkCudaErrors(cudaMemcpy(d_inputVals, d_outputVals, aSize, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_inputPos, d_outputPos, aSize, cudaMemcpyDeviceToDevice));
}
/* unsigned int pv[numElems];
cudaMemcpy(pv, d_outputVals, aSize, cudaMemcpyDeviceToHost);
for(int i=0;i<numElems;i++)
cout << pv[i] << endl;*/
checkCudaErrors(cudaMemcpy(outVals, d_outputVals, aSize, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(outPos, d_outputPos, aSize, cudaMemcpyDeviceToDevice));
cudaFree(d_inputVals);
cudaFree(d_inputPos);
cudaFree(d_outputVals);
cudaFree(d_outputPos);
cudaFree(histVals);
cudaFree(scannedPredicate0);
cudaFree(predicate0);
cudaFree(scannedPredicate1);
cudaFree(predicate1);
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//testScan();
testMove(d_inputVals, d_inputPos, d_outputVals, d_outputPos, numElems);
}
|
17d771384dcbb97cc7746877816aa07954d048e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void canberra_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p)
{
size_t x = blockIdx.x;
size_t y = blockIdx.y;
// If all element is to be computed
if(x < n_a && y < n_b) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) {
float num = abs(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]);
float den = abs(vg_a[x * pitch_a + offset] + vg_b[y * pitch_b + offset]);
if(den != 0.0) {
temp[threadIdx.x] += num / den;
}
}
// Sync with other threads
__syncthreads();
// Reduce
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride) {
temp[threadIdx.x] += temp[threadIdx.x + stride];
}
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
d[y * pitch_d + x] = temp[0];
}
}
} | 17d771384dcbb97cc7746877816aa07954d048e7.cu | #include "includes.h"
__global__ void canberra_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p)
{
size_t x = blockIdx.x;
size_t y = blockIdx.y;
// If all element is to be computed
if(x < n_a && y < n_b) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) {
float num = abs(vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset]);
float den = abs(vg_a[x * pitch_a + offset] + vg_b[y * pitch_b + offset]);
if(den != 0.0) {
temp[threadIdx.x] += num / den;
}
}
// Sync with other threads
__syncthreads();
// Reduce
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride) {
temp[threadIdx.x] += temp[threadIdx.x + stride];
}
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
d[y * pitch_d + x] = temp[0];
}
}
} |
0693e82613d5a0ed396d2bd1ce5e5842d3c29d7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <thrust/sort.h>
__global__ void histo_kernel(unsigned int * d_out, unsigned int* const d_in,
unsigned int shift, const unsigned int numElems)
{
unsigned int mask = 1 << shift;
if (threadIdx.x + blockDim.x * blockIdx.x >= numElems) return;
int bin = (d_in[threadIdx.x + blockDim.x * blockIdx.x] & mask) >> shift;
atomicAdd(&d_out[bin], 1);
}
__global__ void sumscan_kernel(unsigned int * d_in, const size_t numBins, const unsigned int numElems)
{
if (threadIdx.x >= numElems) return;
extern __shared__ float s_data[];
s_data[threadIdx.x] = d_in[threadIdx.x];
__syncthreads();
for (int i = 1; d < numBins; d <<= 2) {
if (threadIdx.x >= i) {
s_data[threadIdx.x] += s_data[threadIdx.x - i];
}
__syncthreads();
}
if (threadIdx.x == 0){
d_in[0] = 0;
}
else{
d_in[threadIdx.x] = s_data[threadIdx.x- 1];
}
}
__global__ void makescan_kernel(unsigned int * d_in, unsigned int *d_scan,
unsigned int shift, const unsigned int numElems)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if ( threadIdx.x + blockDim.x * blockIdx.x >= numElems){
return;
}
if(((d_in[myId] & 1 << shift) >> shift)){
d_scan[ threadIdx.x + blockDim.x * blockIdx.x] = 0;
}
else{
d_scan[ threadIdx.x + blockDim.x * blockIdx.x] = 1;
}
}
__global__ void move_kernel(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const unsigned int numElems,
unsigned int* const d_histogram,
unsigned int* const d_scaned,
unsigned int shift)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= numElems)
return;
int desid = 0;
if ((d_inputVals[threadIdx.x + blockDim.x * blockIdx.x] & 1 << shift;) >> shift) {
desid = threadIdx.x + blockDim.x * blockIdx.x + d_histogram[1] - d_scaned[threadIdx.x + blockDim.x * blockIdx.x];
} else {
desid = d_scaned[threadIdx.x + blockDim.x * blockIdx.x];
}
d_outputPos[desid] = d_inputPos[threadIdx.x + blockDim.x * blockIdx.x];
d_outputVals[desid] = d_inputVals[threadIdx.x + blockDim.x * blockIdx.x];
}
void sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
const int numBits = 1;
const int numBins = 1 << numBits;
const int m = 1 << 10;
int blocks = ceil((float)numElems / m);
printf("m %d blocks %d\n", m ,blocks);
unsigned int *d_binHistogram;
hipMalloc(&d_binHistogram, sizeof(unsigned int)* numBins);
thrust::device_vector<unsigned int> d_scan(numElems);
for (unsigned int i = 0; i < 8 * sizeof(unsigned int); i++) {
checkCudaErrors(hipMemset(d_binHistogram, 0, sizeof(unsigned int)* numBins));
histo_kernel << <blocks, m >> >(d_binHistogram, d_inputVals, i, numElems);
hipDeviceSynchronize();
sumscan_kernel << <1, numBins, sizeof(unsigned int)* numBins>> >(d_binHistogram, numBins, numElems);
makescan_kernel << <blocks, m >> >(d_inputVals, thrust::raw_pointer_cast(&d_scan[0]), i, numElems);
hipDeviceSynchronize();
thrust::exclusive_scan(d_scan.begin(), d_scan.end(), d_scan.begin());
hipDeviceSynchronize();
move_kernel << <blocks, m >> >(d_inputVals, d_inputPos, d_outputVals, d_outputPos,
numElems, d_binHistogram, thrust::raw_pointer_cast(&d_scan[0]), i);
hipDeviceSynchronize();
hipMemcpy(d_inputVals, d_outputVals, numElems * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipMemcpy(d_inputPos, d_outputPos, numElems * sizeof(unsigned int), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
}
checkCudaErrors(hipFree(d_binHistogram));
}
| 0693e82613d5a0ed396d2bd1ce5e5842d3c29d7b.cu |
#include "utils.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <thrust/sort.h>
__global__ void histo_kernel(unsigned int * d_out, unsigned int* const d_in,
unsigned int shift, const unsigned int numElems)
{
unsigned int mask = 1 << shift;
if (threadIdx.x + blockDim.x * blockIdx.x >= numElems) return;
int bin = (d_in[threadIdx.x + blockDim.x * blockIdx.x] & mask) >> shift;
atomicAdd(&d_out[bin], 1);
}
__global__ void sumscan_kernel(unsigned int * d_in, const size_t numBins, const unsigned int numElems)
{
if (threadIdx.x >= numElems) return;
extern __shared__ float s_data[];
s_data[threadIdx.x] = d_in[threadIdx.x];
__syncthreads();
for (int i = 1; d < numBins; d <<= 2) {
if (threadIdx.x >= i) {
s_data[threadIdx.x] += s_data[threadIdx.x - i];
}
__syncthreads();
}
if (threadIdx.x == 0){
d_in[0] = 0;
}
else{
d_in[threadIdx.x] = s_data[threadIdx.x- 1];
}
}
__global__ void makescan_kernel(unsigned int * d_in, unsigned int *d_scan,
unsigned int shift, const unsigned int numElems)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if ( threadIdx.x + blockDim.x * blockIdx.x >= numElems){
return;
}
if(((d_in[myId] & 1 << shift) >> shift)){
d_scan[ threadIdx.x + blockDim.x * blockIdx.x] = 0;
}
else{
d_scan[ threadIdx.x + blockDim.x * blockIdx.x] = 1;
}
}
__global__ void move_kernel(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const unsigned int numElems,
unsigned int* const d_histogram,
unsigned int* const d_scaned,
unsigned int shift)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= numElems)
return;
int desid = 0;
if ((d_inputVals[threadIdx.x + blockDim.x * blockIdx.x] & 1 << shift;) >> shift) {
desid = threadIdx.x + blockDim.x * blockIdx.x + d_histogram[1] - d_scaned[threadIdx.x + blockDim.x * blockIdx.x];
} else {
desid = d_scaned[threadIdx.x + blockDim.x * blockIdx.x];
}
d_outputPos[desid] = d_inputPos[threadIdx.x + blockDim.x * blockIdx.x];
d_outputVals[desid] = d_inputVals[threadIdx.x + blockDim.x * blockIdx.x];
}
void sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
const int numBits = 1;
const int numBins = 1 << numBits;
const int m = 1 << 10;
int blocks = ceil((float)numElems / m);
printf("m %d blocks %d\n", m ,blocks);
unsigned int *d_binHistogram;
cudaMalloc(&d_binHistogram, sizeof(unsigned int)* numBins);
thrust::device_vector<unsigned int> d_scan(numElems);
for (unsigned int i = 0; i < 8 * sizeof(unsigned int); i++) {
checkCudaErrors(cudaMemset(d_binHistogram, 0, sizeof(unsigned int)* numBins));
histo_kernel << <blocks, m >> >(d_binHistogram, d_inputVals, i, numElems);
cudaDeviceSynchronize();
sumscan_kernel << <1, numBins, sizeof(unsigned int)* numBins>> >(d_binHistogram, numBins, numElems);
makescan_kernel << <blocks, m >> >(d_inputVals, thrust::raw_pointer_cast(&d_scan[0]), i, numElems);
cudaDeviceSynchronize();
thrust::exclusive_scan(d_scan.begin(), d_scan.end(), d_scan.begin());
cudaDeviceSynchronize();
move_kernel << <blocks, m >> >(d_inputVals, d_inputPos, d_outputVals, d_outputPos,
numElems, d_binHistogram, thrust::raw_pointer_cast(&d_scan[0]), i);
cudaDeviceSynchronize();
cudaMemcpy(d_inputVals, d_outputVals, numElems * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_inputPos, d_outputPos, numElems * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
}
checkCudaErrors(cudaFree(d_binHistogram));
}
|
825fdb51e8f5465cb21b91ba59d523de160b970e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* VolumeEvaluator.cu
*
* Created on: Dec 20, 2015
* Author: Karl Haubenwallner
*/
#include "combined_header.cuh"
#define EPSILON 0.0000001f
struct Object {
unsigned int triangle_offset;
unsigned int num_triangles;
unsigned int matrix_offset;
};
struct Triangle {
math::float3 v0;
math::float3 v1;
math::float3 v2;
};
__device__ bool ray_triangle_intersect(const math::float3& O, const math::float3& D, const math::float3& v0, const math::float3& v1, const math::float3& v2)
{
// Moeller / Trumbore ray-triangle intersection algorithm
math::float3 E1 = v1 - v0;
math::float3 E2 = v2 - v0;
// q = P
math::float3 P = cross(D, E2);
// a = det
float det = dot(E1, P);
if ((det > -EPSILON && det < EPSILON)) {
return false;;
}
math::float3 T = (O - v0);
math::float3 Q = cross(T, E1);
math::float3 result(dot(E2, Q), dot(T, P), dot(D, Q));
result = result * 1.0f/det;
bool miss = false;
miss = (result.y < 0.0f || result.y > 1.0f);
miss |= (result.z < 0.0f || (result.y + result.z) > 1.0f);
miss |= (result.x < 0.0f);
return !miss;
}
#ifdef not_used
__global__ void d_calc_voxels_raystabbing( const Triangle* triangles,
const unsigned int num_triangles,
unsigned int* voxel_outline,
unsigned int* voxel_volume,
math::VoxelCoords *vc)
{
math::int3 voxel;
voxel.x = threadIdx.x + blockIdx.x * blockDim.x;
voxel.y = threadIdx.y + blockIdx.y * blockDim.y;
voxel.z = 0;
// find all triangles that intersect with this voxel line
math::float3 origin = vc->toWorld(voxel);
math::float3 direction(0.0f, 0.0f, 1.0f);
__syncthreads();
for (int i = 0; i < num_triangles; ++i) {
math::float3 intersection;
if (ray_triangle_intersect(origin, direction, triangles[i].v0, triangles[i].v1, triangles[i].v2, intersection)) {
int index = vc->toIndex(vc->toVoxel(intersection));
voxel_outline[index] = 1;
}
}
__syncthreads();
// do first flood-fill
unsigned short val = 0;
for (voxel.z = 0; voxel.z < vc->voxelPerDimension().z; voxel.z++) {
int index = vc->toIndex(voxel);
val += voxel_outline[index];
voxel_volume[index] = clamp(val, 0, 1);
val = val % 2;
}
// second flood fill from the other direction, to prevent single-voxel-triangles from flood-filling to the end
val = 0;
for (voxel.z = vc->voxelPerDimension().z -1; voxel.z >= 0; voxel.z--) {
int index = vc->toIndex(voxel);
val += voxel_outline[index];
voxel_volume[index] += clamp(val, 0, 1);
val = val % 2;
}
}
#endif
__global__ void d_calc_voxels_objects(
const Object* objects,
int num_objects,
Triangle* triangles,
math::float3x4* matrices,
unsigned int* voxel_volume,
int *outside_voxels,
math::VoxelCoords *vc)
{
math::VoxelCoords local_vc = *vc;
math::float3x4 instance = matrices[blockIdx.x];
Object object = objects[0];
for (int i = 0; i < num_objects; ++i) {
if (objects[i].triangle_offset < blockIdx.x) {
object = objects[i];
}
}
__shared__ math::int3 s_min;
__shared__ math::int3 s_max;
Triangle triangle = triangles[object.triangle_offset];
// apply the matrix to the triangle
triangle.v0 = instance * math::float4(triangle.v0, 1.0f);
triangle.v1 = instance * math::float4(triangle.v1, 1.0f);
triangle.v2 = instance * math::float4(triangle.v2, 1.0f);
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
s_min = local_vc.toVoxel(triangle.v0);
s_max = local_vc.toVoxel(triangle.v0);
}
__syncthreads();
// calc the bounding box of the object
if (threadIdx.y == 0) {
for (int offset = threadIdx.x; offset < object.num_triangles; offset += blockDim.x) {
int triangle_index = object.triangle_offset + offset;
Triangle t = triangles[triangle_index];
// apply the matrix to the triangle
t.v0 = instance * math::float4(t.v0, 1.0f);
t.v1 = instance * math::float4(t.v1, 1.0f);
t.v2 = instance * math::float4(t.v2, 1.0f);
math::int3 v0 = local_vc.toVoxel(t.v0);
math::int3 v1 = local_vc.toVoxel(t.v1);
math::int3 v2 = local_vc.toVoxel(t.v2);
atomicMin(&s_min.x, min(v0.x, min(v1.x, v2.x)));
atomicMin(&s_min.y, min(v0.y, min(v1.y, v2.y)));
atomicMin(&s_min.z, min(v0.z, min(v1.z, v2.z)));
atomicMax(&s_max.x, max(v0.x, max(v1.x, v2.x)));
atomicMax(&s_max.y, max(v0.y, max(v1.y, v2.y)));
atomicMax(&s_max.z, max(v0.z, max(v1.z, v2.z)));
}
}
__syncthreads();
// find out how many voxels we have to check
math::int3 span = (s_max - s_min) + math::int3(1, 1, 1);
int num_voxels = local_vc.calcNumVoxels(span);
math::float3 ray_direction(0.0f, 1.0f, 0.0f);
//if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
// printf("span(%d, %d, %d), num voxels = %d, s_min(%d, %d, %d)\n", span.x, span.y, span.z, num_voxels, s_min.x, s_min.y, s_min.z);
//}
for (int index = threadIdx.x; index < num_voxels; index += blockDim.x) {
math::int3 voxel_coord = local_vc.getVoxelCoordsInsideBox(index, span) + s_min;
if (local_vc.check(voxel_coord) == false) {
atomicAdd(outside_voxels, 1);
}
else {
//int voxel_index = local_vc.toIndex(voxel_coord);
math::float3 voxel_center = local_vc.toWorld(voxel_coord);
bool hits = false;
for (int i = 0; i < object.num_triangles; ++i) {
Triangle t = triangles[object.triangle_offset + i];
// apply the matrix to the triangle
t.v0 = instance * math::float4(t.v0, 1.0f);
t.v1 = instance * math::float4(t.v1, 1.0f);
t.v2 = instance * math::float4(t.v2, 1.0f);
// count triangle check if the voxel is inside the object
if (ray_triangle_intersect(voxel_center, ray_direction, t.v0, t.v1, t.v2) == true) {
hits = !hits;
}
}
if (hits) {
//printf("%d %d %d -> %d\n", voxel_coord.x, voxel_coord.y, voxel_coord.z, local_vc.toIndex(voxel_coord));
atomicAdd(&voxel_volume[local_vc.toIndex(voxel_coord)], 1);
}
}
}
// __syncthreads();
}
__global__ void d_count_voxels(unsigned int* voxels, unsigned int* num_voxel, math::VoxelCoords* vc)
{
math::int3 voxel;
voxel.x = threadIdx.x + blockIdx.x * blockDim.x;
voxel.y = threadIdx.y + blockIdx.y * blockDim.y;
voxel.z = threadIdx.z + blockIdx.z * blockDim.z;
__shared__ int num;
int index = vc->toIndex(voxel);
if (voxels[index] > 0) {
atomicAdd(&num, 1);
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
atomicAdd(num_voxel, num);
}
}
__global__ void d_compare_voxels(unsigned int* voxels_target, unsigned int* voxels_object, int* ratings, math::VoxelCoords* vc)
{
math::int3 voxel;
voxel.x = threadIdx.x + blockIdx.x * blockDim.x;
voxel.y = threadIdx.y + blockIdx.y * blockDim.y;
voxel.z = threadIdx.z + blockIdx.z * blockDim.z;
__shared__ int good;
__shared__ int bad;
__shared__ int overlap;
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
good = 0;
bad = 0;
overlap = 0;
}
__syncthreads();
int index = vc->toIndex(voxel);
bool v_t = (voxels_target[index] != 0);
bool v_o = (voxels_object[index] != 0);
bool v_overlap = (voxels_object[index] > 1);
voxels_object[index] = 0;
if (v_t && v_o) {
atomicAdd(&good, 1);
}
else if (v_o && !v_t) {
atomicAdd(&bad, 1);
}
if (v_overlap) {
atomicAdd(&overlap, 1);
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
atomicAdd(&ratings[0], good);
atomicAdd(&ratings[1], bad);
atomicAdd(&ratings[2], overlap);
}
}
__global__ void d_export_voxels(unsigned int* voxel_space, math::float4* voxels, unsigned int* num_voxels, math::VoxelCoords* vc)
{
math::int3 voxel;
voxel.x = threadIdx.x + blockIdx.x * blockDim.x;
voxel.y = threadIdx.y + blockIdx.y * blockDim.y;
voxel.z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned int index = vc->toIndex(voxel);
unsigned int voxel_val = voxel_space[index];
if (voxel_val > 0) {
unsigned int offset = atomicAdd(num_voxels, 1);
math::float3 world_coords = vc->toWorld(voxel);
math::float4 voxel_data;
voxel_data.x = world_coords.x;
voxel_data.y = world_coords.y;
voxel_data.z = world_coords.z;
voxel_data.w = voxel_val * 1.0f;
voxels[offset] = voxel_data;
}
}
extern "C" {
__global__ void _fct_count_voxels(unsigned int* voxels, unsigned int* num_voxel, math::VoxelCoords *vc) {
d_count_voxels(voxels, num_voxel, vc);
}
__global__ void _fct_compare_voxels(unsigned int* voxels_target, unsigned int* voxels_object, int* ratings, math::VoxelCoords *vc) {
d_compare_voxels(voxels_target, voxels_object, ratings, vc);
}
__global__ void _fct_export_voxels(unsigned int* voxels, math::float4* voxel_centers, unsigned int* num_voxels, math::VoxelCoords *vc) {
d_export_voxels(voxels, voxel_centers, num_voxels, vc);
}
__global__ void _fct_calc_voxels(
const Object* objects,
int num_objects,
Triangle* triangles,
math::float3x4* matrices,
unsigned int* voxel_volume,
int *outside_voxels,
math::VoxelCoords* vc)
{
d_calc_voxels_objects(objects, num_objects, triangles, matrices, voxel_volume, outside_voxels, vc);
}
}
| 825fdb51e8f5465cb21b91ba59d523de160b970e.cu | /*
* VolumeEvaluator.cu
*
* Created on: Dec 20, 2015
* Author: Karl Haubenwallner
*/
#include "combined_header.cuh"
#define EPSILON 0.0000001f
struct Object {
unsigned int triangle_offset;
unsigned int num_triangles;
unsigned int matrix_offset;
};
struct Triangle {
math::float3 v0;
math::float3 v1;
math::float3 v2;
};
__device__ bool ray_triangle_intersect(const math::float3& O, const math::float3& D, const math::float3& v0, const math::float3& v1, const math::float3& v2)
{
// Moeller / Trumbore ray-triangle intersection algorithm
math::float3 E1 = v1 - v0;
math::float3 E2 = v2 - v0;
// q = P
math::float3 P = cross(D, E2);
// a = det
float det = dot(E1, P);
if ((det > -EPSILON && det < EPSILON)) {
return false;;
}
math::float3 T = (O - v0);
math::float3 Q = cross(T, E1);
math::float3 result(dot(E2, Q), dot(T, P), dot(D, Q));
result = result * 1.0f/det;
bool miss = false;
miss = (result.y < 0.0f || result.y > 1.0f);
miss |= (result.z < 0.0f || (result.y + result.z) > 1.0f);
miss |= (result.x < 0.0f);
return !miss;
}
#ifdef not_used
__global__ void d_calc_voxels_raystabbing( const Triangle* triangles,
const unsigned int num_triangles,
unsigned int* voxel_outline,
unsigned int* voxel_volume,
math::VoxelCoords *vc)
{
math::int3 voxel;
voxel.x = threadIdx.x + blockIdx.x * blockDim.x;
voxel.y = threadIdx.y + blockIdx.y * blockDim.y;
voxel.z = 0;
// find all triangles that intersect with this voxel line
math::float3 origin = vc->toWorld(voxel);
math::float3 direction(0.0f, 0.0f, 1.0f);
__syncthreads();
for (int i = 0; i < num_triangles; ++i) {
math::float3 intersection;
if (ray_triangle_intersect(origin, direction, triangles[i].v0, triangles[i].v1, triangles[i].v2, intersection)) {
int index = vc->toIndex(vc->toVoxel(intersection));
voxel_outline[index] = 1;
}
}
__syncthreads();
// do first flood-fill
unsigned short val = 0;
for (voxel.z = 0; voxel.z < vc->voxelPerDimension().z; voxel.z++) {
int index = vc->toIndex(voxel);
val += voxel_outline[index];
voxel_volume[index] = clamp(val, 0, 1);
val = val % 2;
}
// second flood fill from the other direction, to prevent single-voxel-triangles from flood-filling to the end
val = 0;
for (voxel.z = vc->voxelPerDimension().z -1; voxel.z >= 0; voxel.z--) {
int index = vc->toIndex(voxel);
val += voxel_outline[index];
voxel_volume[index] += clamp(val, 0, 1);
val = val % 2;
}
}
#endif
__global__ void d_calc_voxels_objects(
const Object* objects,
int num_objects,
Triangle* triangles,
math::float3x4* matrices,
unsigned int* voxel_volume,
int *outside_voxels,
math::VoxelCoords *vc)
{
math::VoxelCoords local_vc = *vc;
math::float3x4 instance = matrices[blockIdx.x];
Object object = objects[0];
for (int i = 0; i < num_objects; ++i) {
if (objects[i].triangle_offset < blockIdx.x) {
object = objects[i];
}
}
__shared__ math::int3 s_min;
__shared__ math::int3 s_max;
Triangle triangle = triangles[object.triangle_offset];
// apply the matrix to the triangle
triangle.v0 = instance * math::float4(triangle.v0, 1.0f);
triangle.v1 = instance * math::float4(triangle.v1, 1.0f);
triangle.v2 = instance * math::float4(triangle.v2, 1.0f);
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
s_min = local_vc.toVoxel(triangle.v0);
s_max = local_vc.toVoxel(triangle.v0);
}
__syncthreads();
// calc the bounding box of the object
if (threadIdx.y == 0) {
for (int offset = threadIdx.x; offset < object.num_triangles; offset += blockDim.x) {
int triangle_index = object.triangle_offset + offset;
Triangle t = triangles[triangle_index];
// apply the matrix to the triangle
t.v0 = instance * math::float4(t.v0, 1.0f);
t.v1 = instance * math::float4(t.v1, 1.0f);
t.v2 = instance * math::float4(t.v2, 1.0f);
math::int3 v0 = local_vc.toVoxel(t.v0);
math::int3 v1 = local_vc.toVoxel(t.v1);
math::int3 v2 = local_vc.toVoxel(t.v2);
atomicMin(&s_min.x, min(v0.x, min(v1.x, v2.x)));
atomicMin(&s_min.y, min(v0.y, min(v1.y, v2.y)));
atomicMin(&s_min.z, min(v0.z, min(v1.z, v2.z)));
atomicMax(&s_max.x, max(v0.x, max(v1.x, v2.x)));
atomicMax(&s_max.y, max(v0.y, max(v1.y, v2.y)));
atomicMax(&s_max.z, max(v0.z, max(v1.z, v2.z)));
}
}
__syncthreads();
// find out how many voxels we have to check
math::int3 span = (s_max - s_min) + math::int3(1, 1, 1);
int num_voxels = local_vc.calcNumVoxels(span);
math::float3 ray_direction(0.0f, 1.0f, 0.0f);
//if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
// printf("span(%d, %d, %d), num voxels = %d, s_min(%d, %d, %d)\n", span.x, span.y, span.z, num_voxels, s_min.x, s_min.y, s_min.z);
//}
for (int index = threadIdx.x; index < num_voxels; index += blockDim.x) {
math::int3 voxel_coord = local_vc.getVoxelCoordsInsideBox(index, span) + s_min;
if (local_vc.check(voxel_coord) == false) {
atomicAdd(outside_voxels, 1);
}
else {
//int voxel_index = local_vc.toIndex(voxel_coord);
math::float3 voxel_center = local_vc.toWorld(voxel_coord);
bool hits = false;
for (int i = 0; i < object.num_triangles; ++i) {
Triangle t = triangles[object.triangle_offset + i];
// apply the matrix to the triangle
t.v0 = instance * math::float4(t.v0, 1.0f);
t.v1 = instance * math::float4(t.v1, 1.0f);
t.v2 = instance * math::float4(t.v2, 1.0f);
// count triangle check if the voxel is inside the object
if (ray_triangle_intersect(voxel_center, ray_direction, t.v0, t.v1, t.v2) == true) {
hits = !hits;
}
}
if (hits) {
//printf("%d %d %d -> %d\n", voxel_coord.x, voxel_coord.y, voxel_coord.z, local_vc.toIndex(voxel_coord));
atomicAdd(&voxel_volume[local_vc.toIndex(voxel_coord)], 1);
}
}
}
// __syncthreads();
}
__global__ void d_count_voxels(unsigned int* voxels, unsigned int* num_voxel, math::VoxelCoords* vc)
{
math::int3 voxel;
voxel.x = threadIdx.x + blockIdx.x * blockDim.x;
voxel.y = threadIdx.y + blockIdx.y * blockDim.y;
voxel.z = threadIdx.z + blockIdx.z * blockDim.z;
__shared__ int num;
int index = vc->toIndex(voxel);
if (voxels[index] > 0) {
atomicAdd(&num, 1);
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
atomicAdd(num_voxel, num);
}
}
__global__ void d_compare_voxels(unsigned int* voxels_target, unsigned int* voxels_object, int* ratings, math::VoxelCoords* vc)
{
math::int3 voxel;
voxel.x = threadIdx.x + blockIdx.x * blockDim.x;
voxel.y = threadIdx.y + blockIdx.y * blockDim.y;
voxel.z = threadIdx.z + blockIdx.z * blockDim.z;
__shared__ int good;
__shared__ int bad;
__shared__ int overlap;
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
good = 0;
bad = 0;
overlap = 0;
}
__syncthreads();
int index = vc->toIndex(voxel);
bool v_t = (voxels_target[index] != 0);
bool v_o = (voxels_object[index] != 0);
bool v_overlap = (voxels_object[index] > 1);
voxels_object[index] = 0;
if (v_t && v_o) {
atomicAdd(&good, 1);
}
else if (v_o && !v_t) {
atomicAdd(&bad, 1);
}
if (v_overlap) {
atomicAdd(&overlap, 1);
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) {
atomicAdd(&ratings[0], good);
atomicAdd(&ratings[1], bad);
atomicAdd(&ratings[2], overlap);
}
}
__global__ void d_export_voxels(unsigned int* voxel_space, math::float4* voxels, unsigned int* num_voxels, math::VoxelCoords* vc)
{
math::int3 voxel;
voxel.x = threadIdx.x + blockIdx.x * blockDim.x;
voxel.y = threadIdx.y + blockIdx.y * blockDim.y;
voxel.z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned int index = vc->toIndex(voxel);
unsigned int voxel_val = voxel_space[index];
if (voxel_val > 0) {
unsigned int offset = atomicAdd(num_voxels, 1);
math::float3 world_coords = vc->toWorld(voxel);
math::float4 voxel_data;
voxel_data.x = world_coords.x;
voxel_data.y = world_coords.y;
voxel_data.z = world_coords.z;
voxel_data.w = voxel_val * 1.0f;
voxels[offset] = voxel_data;
}
}
extern "C" {
__global__ void _fct_count_voxels(unsigned int* voxels, unsigned int* num_voxel, math::VoxelCoords *vc) {
d_count_voxels(voxels, num_voxel, vc);
}
__global__ void _fct_compare_voxels(unsigned int* voxels_target, unsigned int* voxels_object, int* ratings, math::VoxelCoords *vc) {
d_compare_voxels(voxels_target, voxels_object, ratings, vc);
}
__global__ void _fct_export_voxels(unsigned int* voxels, math::float4* voxel_centers, unsigned int* num_voxels, math::VoxelCoords *vc) {
d_export_voxels(voxels, voxel_centers, num_voxels, vc);
}
__global__ void _fct_calc_voxels(
const Object* objects,
int num_objects,
Triangle* triangles,
math::float3x4* matrices,
unsigned int* voxel_volume,
int *outside_voxels,
math::VoxelCoords* vc)
{
d_calc_voxels_objects(objects, num_objects, triangles, matrices, voxel_volume, outside_voxels, vc);
}
}
|
0c7f0441a0484b1c94046db0fefe8350c6eea3e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
// Apoorv Vyas <avyas@idiap.ch>
//
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <torch/extension.h>
typedef torch::PackedTensorAccessor32<int32_t, 1, torch::RestrictPtrTraits> int32_accessor_1d;
typedef torch::PackedTensorAccessor32<int32_t, 3, torch::RestrictPtrTraits> int32_accessor_3d;
typedef torch::PackedTensorAccessor32<int32_t, 4, torch::RestrictPtrTraits> int32_accessor_4d;
typedef torch::PackedTensorAccessor32<int64_t, 3, torch::RestrictPtrTraits> int64_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d;
/**
* Compute hamming distances
*/
__device__
int hamming_distance(int64_t a, int64_t b) {
return __popcll(a ^ b);
}
/**
* Set up the kernel to generate cuda random numbers
*/
__global__
void setup_kernel(hiprandState_t *state) {
int idx = threadIdx.x+blockDim.x*blockIdx.x;
hiprand_init(1234, idx, 0, &state[idx]);
}
/**
* This kernel assigns datapoints to the closest centroids based on the hamming
* distance
*
* Arguments:
* ---------
* Inputs:
* hash_codes : hash codes tensor to be clustered
* lengths : sequence lengths array
* centroids : current estimate of the centroids
* n_blocks_per_sequence : number of blocks allocated per sequence
* MAX : MAX distance possible (64 int_64 hamming)
*
* Outputs:
* labels : labels to be assigned to each data point
* distances : distances to the closest cluster
*/
__global__
void assign_clusters_kernel(
const int64_accessor_3d hash_codes,
const int32_accessor_1d lengths,
const int64_accessor_3d centroids,
int32_accessor_3d labels,
int32_accessor_3d distances,
const int n_blocks_per_sequence,
int MAX=65
) {
int H = centroids.size(1);
int L = hash_codes.size(2);
int K = centroids.size(2);
// Load the shared memory
const int sequence_index = blockIdx.x / n_blocks_per_sequence;
const int n = sequence_index / H;
const int h = sequence_index % H;
extern __shared__ int64_t shared_means[];
if (threadIdx.x < K) {
shared_means[threadIdx.x] = centroids[n][h][threadIdx.x];
}
__syncthreads();
// Extract the indexes
const int l = ((blockIdx.x % n_blocks_per_sequence)*blockDim.x) + threadIdx.x;
// Each block is only responsible for one sequence
if(l >= L) {
return;
}
// Beyond the sequence length set the cluster label to (K+1) where K is the clusters
if(l >= lengths[n]) {
labels[n][h][l] = K+1;
distances[n][h][l] = -1;
return;
}
// Make global loads once.
const int64_t x = hash_codes[n][h][l];
// update the cluster assingments
// 64 bit hashcodes can have maximum hamming distance as 64
int best_distance = MAX;
int best_cluster = 0;
int dist = 0;
for (int cluster = 0; cluster < K; ++cluster) {
dist = hamming_distance(x, shared_means[cluster]);
if (dist < best_distance) {
best_distance = dist;
best_cluster = cluster;
}
}
labels[n][h][l] = best_cluster;
distances[n][h][l] = best_distance;
}
/**
* This kernel counts the number of data points belonging to each cluster and
* also updates cluster_bit_counts for each cluster cluster_bit_counts for any
* cluster is an array with size [B x 1]. Each position stores the
* difference of number of data points with ones and number of data points with
* zeros at that position in the binary representation of the number.
*
* Arguments:
* ---------
* Inputs:
* labels : labels to be assigned to each data point
* hash_codes : hash codes to be clustered
*
* Outputs:
* counts : array to store the number of datapoints
* belonging to any cluster
* cluster_bit_counts : array containing the bit counts
*/
__global__
void bit_count_kernel(
const int32_accessor_3d labels,
const int64_accessor_3d hash_codes,
int32_accessor_3d counts,
int32_accessor_4d cluster_bit_counts
) {
const int N = labels.size(0);
const int H = labels.size(1);
const int L = labels.size(2);
const int K = counts.size(2);
const int B = cluster_bit_counts.size(3);
const int hl = H*L;
// Extract the indices
int full_idx = (blockDim.x * blockIdx.x) + threadIdx.x;
const int sequence_index = full_idx / L;
const int n = sequence_index / H;
const int h = sequence_index % H;
const int l = full_idx - n*hl - h*L;
if (n >= N)
return;
const int64_t x = hash_codes[n][h][l];
int val_to_add = -1;
const int best_cluster = labels[n][h][l];
if(best_cluster == (K+1)) {
return;
}
for (int i=0; i<B; i++) {
int64_t bit= 1L << i;
if((x & bit) > 0) {
val_to_add = 1;
}
else {
val_to_add = -1;
}
atomicAdd(&cluster_bit_counts[n][h][best_cluster][i], val_to_add);
}
atomicAdd(&counts[n][h][best_cluster], 1);
}
/**
* This kernel computes the new means based on the cluster_bit_counts
* Arguments:
* ---------
* Inputs:
* state : cuda randome state for the random number generation
* counts : array to store the number of datapoints
* belonging to any cluster
* Outputs:
* centroids : centroids to be updated
* cluster_bit_counts : array containing the bit counts
*/
__global__
void compute_means_kernel(
const int32_accessor_3d counts,
int32_accessor_4d cluster_bit_counts,
int64_accessor_3d centroids,
hiprandState_t* state
) {
const int N = counts.size(0);
const int H = counts.size(1);
const int K = counts.size(2);
const int B = cluster_bit_counts.size(3);
// Extract indices
const int full_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if( full_idx >= (K*N*H))
return;
const int sequence_idx = full_idx / K;
const int n = sequence_idx / H;
const int h = sequence_idx % H;
const int k = full_idx % K;
int64_t mean_k = 0;
const int64_t MAX = (1L << (B));
// if the counts for the current cluster is 0 set mean to random
if(counts[n][h][k] == 0) {
centroids[n][h][k] = int64_t(hiprand(state + k) % MAX);
return;
}
//update otherwise
for( int i=0; i<B; i++) {
if(cluster_bit_counts[n][h][k][i] == 0) {
cluster_bit_counts[n][h][k][i] =
(hiprand(state + k) & 1L);
}
if(cluster_bit_counts[n][h][k][i] > 0) {
mean_k = mean_k | (1L << i);
}
}
centroids[n][h][k] = mean_k;
}
/**
* Kmeans runs lloyd iteratively to first assign the points and then update
* the clusters
* Arguments:
* ---------
* Inputs:
* hash_codes : the hash codes to be clustered
* lengths : sequence lengths array
* centroids : centroid buffer
* distances : distances buffer
* cluster_bit_counts : bit counts buffer
* iterations : number of iterations of Lloyd
*
* Outputs:
* labels : array to store the labels assigned to each point
* counts : array to store the number of datapoints belonging
* to any cluster
* Size (L*NH*K)
* [0..K-1] are counts for 1st sequence
* [K..2K-1] are counts for 2nd sequence.
*/
void kmeans(
const torch::Tensor hash_codes,
const torch::Tensor lengths,
torch::Tensor centroids,
torch::Tensor distances,
torch::Tensor cluster_bit_counts,
torch::Tensor labels,
torch::Tensor counts,
int iterations
) {
const int64_accessor_3d hash_codes_acc = hash_codes.packed_accessor32<int64_t, 3, torch::RestrictPtrTraits>();
const int32_accessor_1d lengths_acc = lengths.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>();
int64_accessor_3d centroids_acc = centroids.packed_accessor32<int64_t, 3, torch::RestrictPtrTraits>();
int32_accessor_3d distances_acc = distances.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
int32_accessor_4d cluster_bit_counts_acc = cluster_bit_counts.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>();
int32_accessor_3d labels_acc = labels.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
int32_accessor_3d counts_acc = counts.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
const int N = hash_codes.size(0);
const int H = hash_codes.size(1);
const int NH = N*H;
const int L = hash_codes.size(2);
const int K = centroids.size(2);
const int B = cluster_bit_counts.size(3);
// allocate the temporary arrays we will need
hiprandState_t *d_state;
hipMalloc(&d_state, sizeof(hiprandState_t));
hipLaunchKernelGGL(( setup_kernel), dim3(1),dim3(K), 0, 0, d_state);
// Estimate the number of threads we will need
const int n_blocks_per_sequence = (L-1)/1024 + 1;
// Dividing the number of threads roughly equally among blocks
// Max because each blocks needs K threads to load shared memory
const int n_threads_assign = max((L-1)/n_blocks_per_sequence + 1, K);
const int n_blocks_assign = NH * n_blocks_per_sequence;
const int shared_mem_assign = K * sizeof(int64_t);
const int n_threads_cnt = 1024;
const int n_blocks_cnt = ((L*NH)-1)/n_threads_cnt + 1;
const int n_threads_centroids = 1024;
const int n_blocks_centroids = ((K*NH)-1)/n_threads_cnt + 1;
//Lloyd iterations
for (size_t itr = 0; itr < iterations; ++itr) {
hipLaunchKernelGGL(( assign_clusters_kernel), dim3(n_blocks_assign),
dim3(n_threads_assign),
shared_mem_assign, 0,
hash_codes_acc,
lengths_acc,
centroids_acc,
labels_acc,
distances_acc,
n_blocks_per_sequence
);
counts.zero_();
cluster_bit_counts.zero_();
hipLaunchKernelGGL(( bit_count_kernel), dim3(n_blocks_cnt),
dim3(n_threads_cnt), 0, 0,
labels_acc,
hash_codes_acc,
counts_acc,
cluster_bit_counts_acc
);
hipLaunchKernelGGL(( compute_means_kernel), dim3(n_blocks_centroids),
dim3(n_threads_centroids), 0, 0,
counts_acc,
cluster_bit_counts_acc,
centroids_acc,
d_state
);
}
hipLaunchKernelGGL(( assign_clusters_kernel), dim3(n_blocks_assign),
dim3(n_threads_assign),
shared_mem_assign, 0,
hash_codes_acc,
lengths_acc,
centroids_acc,
labels_acc,
distances_acc,
n_blocks_per_sequence
);
counts.zero_();
cluster_bit_counts.zero_();
hipLaunchKernelGGL(( bit_count_kernel), dim3(n_blocks_cnt),
dim3(n_threads_cnt), 0, 0,
labels_acc,
hash_codes_acc,
counts_acc,
cluster_bit_counts_acc
);
hipFree(d_state);
return;
}
/**
* Cluster the hash codes H using Llyod's K-Means clustering
* Inputs:
*
* Arguments:
* ---------
* Inputs:
* hashes : hashes to be clustered
*
* Buffers:
* centroids : centroids buffer
* distances : distances buffer
* bitcounts : cluster_bit_counts buffer
*
* Outputs:
* clusters : Store the groups/labels/assignments
* counts : Store the counts of the number of points in each cluster
*/
void cluster(
const torch::Tensor hashes,
const torch::Tensor lengths,
torch::Tensor centroids,
torch::Tensor distances,
torch::Tensor bitcounts,
torch::Tensor clusters,
torch::Tensor counts,
int n_iterations,
int B
) {
int K = centroids.size(2);
int N = hashes.size(0);
int H = hashes.size(1);
int L = hashes.size(2);
// initialize the centroids
//centroids.view({-1, K}) = hashes.view({-1, L}).narrow(1, 0, K);
kmeans(
hashes,
lengths,
centroids,
distances,
bitcounts,
clusters,
counts,
n_iterations
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("cluster", &cluster, "Cluster the hashed vectors by "
"performing a few iterations of k-means");
}
| 0c7f0441a0484b1c94046db0fefe8350c6eea3e4.cu | //
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
// Apoorv Vyas <avyas@idiap.ch>
//
#include <curand.h>
#include <curand_kernel.h>
#include <torch/extension.h>
typedef torch::PackedTensorAccessor32<int32_t, 1, torch::RestrictPtrTraits> int32_accessor_1d;
typedef torch::PackedTensorAccessor32<int32_t, 3, torch::RestrictPtrTraits> int32_accessor_3d;
typedef torch::PackedTensorAccessor32<int32_t, 4, torch::RestrictPtrTraits> int32_accessor_4d;
typedef torch::PackedTensorAccessor32<int64_t, 3, torch::RestrictPtrTraits> int64_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d;
/**
* Compute hamming distances
*/
__device__
int hamming_distance(int64_t a, int64_t b) {
return __popcll(a ^ b);
}
/**
* Set up the kernel to generate cuda random numbers
*/
__global__
void setup_kernel(curandState *state) {
int idx = threadIdx.x+blockDim.x*blockIdx.x;
curand_init(1234, idx, 0, &state[idx]);
}
/**
* This kernel assigns datapoints to the closest centroids based on the hamming
* distance
*
* Arguments:
* ---------
* Inputs:
* hash_codes : hash codes tensor to be clustered
* lengths : sequence lengths array
* centroids : current estimate of the centroids
* n_blocks_per_sequence : number of blocks allocated per sequence
* MAX : MAX distance possible (64 int_64 hamming)
*
* Outputs:
* labels : labels to be assigned to each data point
* distances : distances to the closest cluster
*/
__global__
void assign_clusters_kernel(
const int64_accessor_3d hash_codes,
const int32_accessor_1d lengths,
const int64_accessor_3d centroids,
int32_accessor_3d labels,
int32_accessor_3d distances,
const int n_blocks_per_sequence,
int MAX=65
) {
int H = centroids.size(1);
int L = hash_codes.size(2);
int K = centroids.size(2);
// Load the shared memory
const int sequence_index = blockIdx.x / n_blocks_per_sequence;
const int n = sequence_index / H;
const int h = sequence_index % H;
extern __shared__ int64_t shared_means[];
if (threadIdx.x < K) {
shared_means[threadIdx.x] = centroids[n][h][threadIdx.x];
}
__syncthreads();
// Extract the indexes
const int l = ((blockIdx.x % n_blocks_per_sequence)*blockDim.x) + threadIdx.x;
// Each block is only responsible for one sequence
if(l >= L) {
return;
}
// Beyond the sequence length set the cluster label to (K+1) where K is the clusters
if(l >= lengths[n]) {
labels[n][h][l] = K+1;
distances[n][h][l] = -1;
return;
}
// Make global loads once.
const int64_t x = hash_codes[n][h][l];
// update the cluster assingments
// 64 bit hashcodes can have maximum hamming distance as 64
int best_distance = MAX;
int best_cluster = 0;
int dist = 0;
for (int cluster = 0; cluster < K; ++cluster) {
dist = hamming_distance(x, shared_means[cluster]);
if (dist < best_distance) {
best_distance = dist;
best_cluster = cluster;
}
}
labels[n][h][l] = best_cluster;
distances[n][h][l] = best_distance;
}
/**
* This kernel counts the number of data points belonging to each cluster and
* also updates cluster_bit_counts for each cluster cluster_bit_counts for any
* cluster is an array with size [B x 1]. Each position stores the
* difference of number of data points with ones and number of data points with
* zeros at that position in the binary representation of the number.
*
* Arguments:
* ---------
* Inputs:
* labels : labels to be assigned to each data point
* hash_codes : hash codes to be clustered
*
* Outputs:
* counts : array to store the number of datapoints
* belonging to any cluster
* cluster_bit_counts : array containing the bit counts
*/
__global__
void bit_count_kernel(
const int32_accessor_3d labels,
const int64_accessor_3d hash_codes,
int32_accessor_3d counts,
int32_accessor_4d cluster_bit_counts
) {
const int N = labels.size(0);
const int H = labels.size(1);
const int L = labels.size(2);
const int K = counts.size(2);
const int B = cluster_bit_counts.size(3);
const int hl = H*L;
// Extract the indices
int full_idx = (blockDim.x * blockIdx.x) + threadIdx.x;
const int sequence_index = full_idx / L;
const int n = sequence_index / H;
const int h = sequence_index % H;
const int l = full_idx - n*hl - h*L;
if (n >= N)
return;
const int64_t x = hash_codes[n][h][l];
int val_to_add = -1;
const int best_cluster = labels[n][h][l];
if(best_cluster == (K+1)) {
return;
}
for (int i=0; i<B; i++) {
int64_t bit= 1L << i;
if((x & bit) > 0) {
val_to_add = 1;
}
else {
val_to_add = -1;
}
atomicAdd(&cluster_bit_counts[n][h][best_cluster][i], val_to_add);
}
atomicAdd(&counts[n][h][best_cluster], 1);
}
/**
* This kernel computes the new means based on the cluster_bit_counts
* Arguments:
* ---------
* Inputs:
* state : cuda randome state for the random number generation
* counts : array to store the number of datapoints
* belonging to any cluster
* Outputs:
* centroids : centroids to be updated
* cluster_bit_counts : array containing the bit counts
*/
__global__
void compute_means_kernel(
const int32_accessor_3d counts,
int32_accessor_4d cluster_bit_counts,
int64_accessor_3d centroids,
curandState* state
) {
const int N = counts.size(0);
const int H = counts.size(1);
const int K = counts.size(2);
const int B = cluster_bit_counts.size(3);
// Extract indices
const int full_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if( full_idx >= (K*N*H))
return;
const int sequence_idx = full_idx / K;
const int n = sequence_idx / H;
const int h = sequence_idx % H;
const int k = full_idx % K;
int64_t mean_k = 0;
const int64_t MAX = (1L << (B));
// if the counts for the current cluster is 0 set mean to random
if(counts[n][h][k] == 0) {
centroids[n][h][k] = int64_t(curand(state + k) % MAX);
return;
}
//update otherwise
for( int i=0; i<B; i++) {
if(cluster_bit_counts[n][h][k][i] == 0) {
cluster_bit_counts[n][h][k][i] =
(curand(state + k) & 1L);
}
if(cluster_bit_counts[n][h][k][i] > 0) {
mean_k = mean_k | (1L << i);
}
}
centroids[n][h][k] = mean_k;
}
/**
* Kmeans runs lloyd iteratively to first assign the points and then update
* the clusters
* Arguments:
* ---------
* Inputs:
* hash_codes : the hash codes to be clustered
* lengths : sequence lengths array
* centroids : centroid buffer
* distances : distances buffer
* cluster_bit_counts : bit counts buffer
* iterations : number of iterations of Lloyd
*
* Outputs:
* labels : array to store the labels assigned to each point
* counts : array to store the number of datapoints belonging
* to any cluster
* Size (L*NH*K)
* [0..K-1] are counts for 1st sequence
* [K..2K-1] are counts for 2nd sequence.
*/
void kmeans(
const torch::Tensor hash_codes,
const torch::Tensor lengths,
torch::Tensor centroids,
torch::Tensor distances,
torch::Tensor cluster_bit_counts,
torch::Tensor labels,
torch::Tensor counts,
int iterations
) {
const int64_accessor_3d hash_codes_acc = hash_codes.packed_accessor32<int64_t, 3, torch::RestrictPtrTraits>();
const int32_accessor_1d lengths_acc = lengths.packed_accessor32<int32_t, 1, torch::RestrictPtrTraits>();
int64_accessor_3d centroids_acc = centroids.packed_accessor32<int64_t, 3, torch::RestrictPtrTraits>();
int32_accessor_3d distances_acc = distances.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
int32_accessor_4d cluster_bit_counts_acc = cluster_bit_counts.packed_accessor32<int32_t, 4, torch::RestrictPtrTraits>();
int32_accessor_3d labels_acc = labels.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
int32_accessor_3d counts_acc = counts.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>();
const int N = hash_codes.size(0);
const int H = hash_codes.size(1);
const int NH = N*H;
const int L = hash_codes.size(2);
const int K = centroids.size(2);
const int B = cluster_bit_counts.size(3);
// allocate the temporary arrays we will need
curandState *d_state;
cudaMalloc(&d_state, sizeof(curandState));
setup_kernel<<<1,K>>>(d_state);
// Estimate the number of threads we will need
const int n_blocks_per_sequence = (L-1)/1024 + 1;
// Dividing the number of threads roughly equally among blocks
// Max because each blocks needs K threads to load shared memory
const int n_threads_assign = max((L-1)/n_blocks_per_sequence + 1, K);
const int n_blocks_assign = NH * n_blocks_per_sequence;
const int shared_mem_assign = K * sizeof(int64_t);
const int n_threads_cnt = 1024;
const int n_blocks_cnt = ((L*NH)-1)/n_threads_cnt + 1;
const int n_threads_centroids = 1024;
const int n_blocks_centroids = ((K*NH)-1)/n_threads_cnt + 1;
//Lloyd iterations
for (size_t itr = 0; itr < iterations; ++itr) {
assign_clusters_kernel<<<n_blocks_assign,
n_threads_assign,
shared_mem_assign>>>(
hash_codes_acc,
lengths_acc,
centroids_acc,
labels_acc,
distances_acc,
n_blocks_per_sequence
);
counts.zero_();
cluster_bit_counts.zero_();
bit_count_kernel<<<n_blocks_cnt,
n_threads_cnt>>>(
labels_acc,
hash_codes_acc,
counts_acc,
cluster_bit_counts_acc
);
compute_means_kernel<<<n_blocks_centroids,
n_threads_centroids>>>(
counts_acc,
cluster_bit_counts_acc,
centroids_acc,
d_state
);
}
assign_clusters_kernel<<<n_blocks_assign,
n_threads_assign,
shared_mem_assign>>>(
hash_codes_acc,
lengths_acc,
centroids_acc,
labels_acc,
distances_acc,
n_blocks_per_sequence
);
counts.zero_();
cluster_bit_counts.zero_();
bit_count_kernel<<<n_blocks_cnt,
n_threads_cnt>>>(
labels_acc,
hash_codes_acc,
counts_acc,
cluster_bit_counts_acc
);
cudaFree(d_state);
return;
}
/**
* Cluster the hash codes H using Llyod's K-Means clustering
* Inputs:
*
* Arguments:
* ---------
* Inputs:
* hashes : hashes to be clustered
*
* Buffers:
* centroids : centroids buffer
* distances : distances buffer
* bitcounts : cluster_bit_counts buffer
*
* Outputs:
* clusters : Store the groups/labels/assignments
* counts : Store the counts of the number of points in each cluster
*/
void cluster(
const torch::Tensor hashes,
const torch::Tensor lengths,
torch::Tensor centroids,
torch::Tensor distances,
torch::Tensor bitcounts,
torch::Tensor clusters,
torch::Tensor counts,
int n_iterations,
int B
) {
int K = centroids.size(2);
int N = hashes.size(0);
int H = hashes.size(1);
int L = hashes.size(2);
// initialize the centroids
//centroids.view({-1, K}) = hashes.view({-1, L}).narrow(1, 0, K);
kmeans(
hashes,
lengths,
centroids,
distances,
bitcounts,
clusters,
counts,
n_iterations
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("cluster", &cluster, "Cluster the hashed vectors by "
"performing a few iterations of k-means");
}
|
6277575facfb35f900de6ed6103814c49f8b2104.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
{% set wdesc = "weighted" if weighted else "unweighted" %}
#include "codegen/embedding_backward_template_helpers.cuh"
{% if not dense %}
constexpr int32_t kCacheLocationMissing = -1;
{% endif %}
enum {
DEVICE = 0,
MANAGED = 1,
MANAGED_CACHING = 2,
};
constexpr size_t kBackwardMaxThreads = 512;
using namespace at;
using namespace fbgemm_gpu;
template <
typename emb_t,
typename cache_t,
size_t kMaxVecsPerThread>
__global__ void
__launch_bounds__(kMaxThreads)
split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1(
const PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits>
grad_output,
PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights,
{% if not dense %}
PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights,
PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
weights_placements,
{% endif %}
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits>
hash_size_cumsum,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits>
sorted_linear_indices_run,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_cumulative_run_lengths,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_run_lengths,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_infos,
{% if not dense %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_lxu_cache_locations,
{% endif %}
{% if weighted %}
const PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits> sorted_indice_weights,
{% endif %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_num_runs,
int32_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
PhiloxCudaState stochastic_rounding_philox_args,
{% else %}
PackedTensorAccessor64<cache_t, 1, RestrictPtrTraits> grad_dev_weights,
{% endif %}
FixedDivisor fd,
{{ args.split_kernel_args | join(", ") }}) {
int32_t T = D_offsets.size(0) - 1;
const int32_t B = grad_output.size(0);
const int32_t num_runs = sorted_linear_indices_num_runs[0];
for (int32_t run_id = blockIdx.x * kWarpSize; run_id < num_runs;
run_id += kWarpSize * gridDim.x) {
const int32_t candidate_run_id = run_id + threadIdx.x;
int candidate_run_active = candidate_run_id < num_runs &&
sorted_linear_indices_run_lengths[candidate_run_id] >=
max_segment_length_per_warp;
uint32_t candidate_mask = __ballot_sync(0xFFFFFFFF, candidate_run_active);
while (candidate_mask != 0) {
int32_t current_thread_id = __ffs(candidate_mask) - 1;
candidate_mask ^= (static_cast<uint32_t>(1) << current_thread_id);
int32_t current_run_id = run_id + current_thread_id;
const int64_t linear_index = sorted_linear_indices_run[current_run_id];
const int32_t segment_start =
sorted_linear_indices_cumulative_run_lengths[current_run_id];
const int32_t segment_end =
sorted_linear_indices_cumulative_run_lengths[current_run_id + 1];
const int32_t SL = segment_end - segment_start;
// TODO: should never be hit!
if (SL < max_segment_length_per_warp) {
return;
}
const int32_t warp_id = threadIdx.y;
const int32_t lane_id = threadIdx.x;
// Note that with shared embedding tables we can have multiple tables
// (i.e. different values of `t` sharing the same segment).
//
const auto info_0 = sorted_infos[segment_start];
int32_t t_0 = fd.Div(info_0); //info_0 / B;
int64_t hash_size = hash_size_cumsum[t_0];
int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0];
int64_t idx = linear_index - hash_size;
const int32_t SL_per_warp = div_round_up(SL, blockDim.y);
const int32_t sl_start = SL_per_warp * warp_id;
const int32_t sl_end = min(SL_per_warp * (warp_id + 1), SL);
Vec4T<acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread];
for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) {
int32_t sl_j = sl + threadIdx.x;
int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t b; //= b_t % B;
int32_t t; //= b_t / B;
fd.DivMod(b_t, &t, &b);
int32_t D_start = sl_j < sl_end ? D_offsets[t] : 0;
{% if weighted %}
acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0;
{% endif %}
for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) {
int32_t b_j = __shfl_sync(0xFFFFFFFF, b, j);
int32_t D_start_j = __shfl_sync(0xFFFFFFFF, D_start, j);
{% if weighted %}
acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j);
{% endif %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<acc_type<cache_t, true>> grad_out_vec(
&grad_output[b_j][0] + D_start_j + d);
{% if weighted %}
grad_sum[i].fma_(grad_out_vec, idx_weight_j);
{% else %}
grad_sum[i].acc.x += grad_out_vec.acc.x;
grad_sum[i].acc.y += grad_out_vec.acc.y;
grad_sum[i].acc.z += grad_out_vec.acc.z;
grad_sum[i].acc.w += grad_out_vec.acc.w;
{% endif %}
}
}
}
// do shared memory reduction only if we used multiple blocks.
if (SL > SL_per_warp) {
struct SharedMemory<Vec4T<acc_type<cache_t, true>>> smem;
Vec4T<acc_type<cache_t, true>>* shared_grad_sums = smem.getPointer();
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] = grad_sum[i];
}
__syncthreads();
if (blockDim.y >= 32) {
if (warp_id < 16) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 16) * kMaxVecsPerThread * kWarpSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 16) {
if (warp_id < 8) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 8) * kMaxVecsPerThread * kWarpSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 8) {
if (warp_id < 4) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 4) * kMaxVecsPerThread * kWarpSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 4) {
if (warp_id < 2) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 2) * kMaxVecsPerThread * kWarpSize]);
}
}
__syncthreads();
}
if (warp_id == 0) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
grad_sum[i] = vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 1) * kMaxVecsPerThread * kWarpSize]);
}
}
}
if (warp_id == 0) {
int64_t weights_offset = weights_offsets[t_0];
{% if not dense %}
emb_t* __restrict__ weights{nullptr};
cache_t* __restrict__ cache_weights{nullptr};
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
const auto weights_placement = weights_placements[t_0];
if (weights_placement == DEVICE) {
weights = &dev_weights[weights_offset + idx * D_emb];
} else {
weights = &uvm_weights[weights_offset + idx * D_emb];
}
if (weights_placement == MANAGED_CACHING) {
int32_t cache_idx = sorted_lxu_cache_locations[segment_start];
if (cache_idx != kCacheLocationMissing) {
cache_weights = &lxu_cache_weights[cache_idx][0];
}
}
{% for tensor in args.split_tensors %}
acc_type<cache_t, true>* __restrict__ {{ tensor }};
const auto {{ tensor }}_placement = {{ tensor }}_placements[t_0];
int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0];
if ({{ tensor }}_placement == DEVICE) {
{{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset];
} else {
{{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset];
}
{% endfor %}
{{ split_precomputation }}
struct SharedMemory<Vec4T<acc_type<cache_t, true>>> weight_update_buffer;
Vec4T<acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer();
auto weight_row_template = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
threadIdx.x + current_run_id * blockDim.x,
&state);
weight_row_template.set_stoc_state(&state);
}
float2 qparams_template;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams_template = weight_row_template.load_qparams();
}
float2 qparams_new;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
auto& grad = grad_sum[i];
{{ split_weight_update }}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
shared_weight_update_row[lane_id + i * kWarpSize] = weight_new;
} else {
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8
}
}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
// calculate qparams from updated weight row
qparams_new = thrust_find_qparams<acc_type<cache_t, true>>(shared_weight_update_row, D);
weight_row_template.store_qparams(qparams_new);
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
weight_row_template.store(shared_weight_update_row[lane_id + i * kWarpSize], d, qparams_new);
}
}
{% else %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
auto& grad = grad_sum[i];
grad.store(&grad_dev_weights[weights_offset + idx * D + d]);
}
{% endif %}
}
}
}
}
template <
typename emb_t,
typename cache_t,
size_t kMaxVecsPerThread>
__global__
__launch_bounds__(kBackwardMaxThreads)
void
split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1(
const PackedTensorAccessor32<acc_type<cache_t,true>, 2, RestrictPtrTraits>
grad_output,
PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights,
{% if not dense %}
PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights,
PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
weights_placements,
{% endif %}
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits>
hash_size_cumsum,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits>
sorted_linear_indices_run,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_cumulative_run_lengths,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_run_lengths,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_infos,
{% if not dense %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_lxu_cache_locations,
{% endif %}
{% if weighted %}
const PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits> sorted_indice_weights,
{% endif %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_num_runs,
int32_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
PhiloxCudaState stochastic_rounding_philox_args,
{% else %}
PackedTensorAccessor64<cache_t, 1, RestrictPtrTraits> grad_dev_weights,
{% endif %}
FixedDivisor fd,
{{ args.split_kernel_args | join(", ") }}) {
const int32_t T = D_offsets.size(0) - 1;
const int32_t B = grad_output.size(0);
const int32_t run_id = blockIdx.x * blockDim.y + threadIdx.y;
if (run_id >= sorted_linear_indices_run.size(0)) {
return;
}
if (run_id >= sorted_linear_indices_num_runs[0]) {
return;
}
const int64_t linear_index = sorted_linear_indices_run[run_id];
const int32_t segment_start =
sorted_linear_indices_cumulative_run_lengths[run_id];
const int32_t segment_end =
sorted_linear_indices_cumulative_run_lengths[run_id + 1];
const int32_t SL = segment_end - segment_start;
if (SL >= max_segment_length_per_warp) {
return;
}
// now, each segment corresponds to exactly one table `t` and row in
// that table (`idx`). Thus, we can hoist out some of the book-keeping.
const auto info_0 = sorted_infos[segment_start];
int32_t t_0 = fd.Div(info_0); // info_0 / B;
int64_t hash_size = hash_size_cumsum[t_0];
int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0];
int64_t idx = linear_index - hash_size;
const int32_t SL_per_warp = div_round_up(SL, blockDim.y);
const int32_t sl_start = 0;
const int32_t sl_end = SL;
Vec4T<acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread];
for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) {
int32_t sl_j = sl + threadIdx.x;
int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t b; //= b_t % B;
int32_t t; //= b_t / B;
fd.DivMod(b_t, &t, &b);
int32_t D_start = D_offsets[t];
{% if weighted %}
acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0;
{% endif %}
for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) {
int32_t b_j = __shfl_sync(0xFFFFFFFF, b, j);
int32_t D_start_j = __shfl_sync(0xFFFFFFFF, D_start, j);
{% if weighted %}
acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j);
{% endif %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<acc_type<cache_t, true>> grad_out_vec(
&grad_output[b_j][0] + D_start_j + d);
{% if weighted %}
grad_sum[i].fma_(grad_out_vec, idx_weight_j);
{% else %}
grad_sum[i].acc.x += grad_out_vec.acc.x;
grad_sum[i].acc.y += grad_out_vec.acc.y;
grad_sum[i].acc.z += grad_out_vec.acc.z;
grad_sum[i].acc.w += grad_out_vec.acc.w;
{% endif %}
}
}
}
int64_t weights_offset = weights_offsets[t_0];
{% if not dense %}
emb_t* __restrict__ weights{nullptr};
cache_t* __restrict__ cache_weights{nullptr};
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
const auto weights_placement = weights_placements[t_0];
if (weights_placement == DEVICE) {
weights = &dev_weights[weights_offset + idx * D_emb];
} else {
weights = &uvm_weights[weights_offset + idx * D_emb];
}
if (weights_placement == MANAGED_CACHING) {
int32_t cache_idx = sorted_lxu_cache_locations[segment_start];
if (cache_idx != kCacheLocationMissing) {
cache_weights = &lxu_cache_weights[cache_idx][0];
}
}
{% for tensor in args.split_tensors %}
acc_type<cache_t, true>* __restrict__ {{ tensor }};
const auto {{ tensor }}_placement = {{ tensor }}_placements[t_0];
int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0];
if ({{ tensor }}_placement == DEVICE) {
{{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset];
} else {
{{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset];
}
{% endfor %}
{{ split_precomputation }}
struct SharedMemory<Vec4T<acc_type<cache_t, true>>> weight_update_buffer;
Vec4T<acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer();
auto weight_row_template = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
threadIdx.x + run_id * blockDim.x,
&state);
weight_row_template.set_stoc_state(&state);
}
float2 qparams_template;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights){
qparams_template = weight_row_template.load_qparams();
}
float2 qparams_new;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
auto& grad = grad_sum[i];
{{ split_weight_update }}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize] = weight_new;
} else {
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if type is not int8
}
}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
// calculate new qparams after row update
qparams_new = thrust_find_qparams<acc_type<cache_t, true>>(&shared_weight_update_row[threadIdx.y * kMaxVecsPerThread * kWarpSize], D);
weight_row_template.store_qparams(qparams_new);
// fetch cached updated row from shared mem and quantize on-the-fly when saving to lowp embedding
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
weight_row_template.store(shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize], d, qparams_new);
}
}
{% else %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
auto& grad = grad_sum[i];
grad.store(&grad_dev_weights[weights_offset + idx * D + d]);
}
{% endif %}
}
template <typename cache_t, typename emb_t>
__global__ void __launch_bounds__(kMaxThreads) grad_mean_kernel(
const PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits>
grad_output,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> offsets,
PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits>
grad_output_mean) {
int32_t B = grad_output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y;
int32_t b = b_t % B;
int32_t t = b_t / B;
if (b_t >= B * T) {
return;
}
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
int64_t indices_start = offsets[t * B + b];
int64_t indices_end = offsets[t * B + b + 1];
int32_t L = indices_end - indices_start;
if (L != 0) {
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<acc_type<cache_t, true>> grad_out_vec(&grad_output[b][D_start + d * 4]);
grad_out_vec.acc.x /= L;
grad_out_vec.acc.y /= L;
grad_out_vec.acc.z /= L;
grad_out_vec.acc.w /= L;
grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]);
}
} else {
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<acc_type<cache_t, true>> grad_out_vec(&grad_output[b][D_start + d * 4]);
grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]);
}
}
}
{{ "void" if not dense else "Tensor" }} split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_exact_cuda(
Tensor grad_output,
Tensor dev_weights,
{% if not dense %}
Tensor uvm_weights,
Tensor lxu_cache_weights,
Tensor weights_placements,
{% endif %}
Tensor weights_offsets,
Tensor D_offsets,
int64_t max_D,
Tensor hash_size_cumsum,
int64_t total_hash_size_bits,
Tensor indices,
Tensor offsets,
int64_t pooling_mode,
{% if weighted %}
Tensor indice_weights,
{% endif %}
{% if not dense %}
Tensor lxu_cache_locations,
{% endif %}
int64_t unused_,
int64_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
{% endif %}
{{ args.split_function_args | join(", ") }}) {
at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
device_guard.set_index(dev_weights.get_device());
{% if dense %}
auto grad_dev_weights = zeros_like(dev_weights);
{% endif %}
// short-circuit if there are zero indices.
if (indices.numel() == 0) {
return {{ "grad_dev_weights" if dense else "" }};
}
int32_t T = D_offsets.numel() - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
const auto B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
auto BT_block_size = kMaxThreads / kWarpSize;
TORCH_CHECK(BT_block_size * kWarpSize <= kMaxThreads);
TORCH_CHECK(max_D <= {{ max_embedding_dim }});
auto infos = at::empty_like(indices, indices.options().dtype(kInt));
auto infos_sorted = at::empty_like(infos);
auto linear_indices = at::empty_like(indices);
auto linear_indices_sorted = at::empty_like(indices);
hipLaunchKernelGGL(( linearize_index_kernel),
dim3(div_round_up(B * T, kMaxThreads)),
dim3(kMaxThreads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
infos.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
linear_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>());
C10_HIP_KERNEL_LAUNCH_CHECK();
{
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
infos.data_ptr<int32_t>(),
infos_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(kByte));
AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
infos.data_ptr<int32_t>(),
infos_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
}
{% if not dense %}
auto lxu_cache_locations_sorted = at::empty_like(lxu_cache_locations);
if (lxu_cache_locations.size(0) > 0) {
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
lxu_cache_locations.data_ptr<int32_t>(),
lxu_cache_locations_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(kByte));
AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
lxu_cache_locations.data_ptr<int32_t>(),
lxu_cache_locations_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
}
{% endif %}
auto sorted_linear_indices_run = at::empty_like(indices);
auto sorted_linear_indices_run_lengths =
at::zeros_like(indices, indices.options().dtype(kInt));
auto sorted_linear_indices_num_runs =
at::zeros({1}, indices.options().dtype(kInt));
{
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode(
nullptr,
temp_storage_bytes,
linear_indices_sorted.data_ptr<int64_t>(),
sorted_linear_indices_run.data_ptr<int64_t>(),
sorted_linear_indices_run_lengths.data_ptr<int32_t>(),
sorted_linear_indices_num_runs.data_ptr<int32_t>(),
linear_indices_sorted.numel(),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
// Allocate temporary storage
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(kByte));
// Run encoding
AT_CUDA_CHECK(hipcub::DeviceRunLengthEncode::Encode(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices_sorted.data_ptr<int64_t>(),
sorted_linear_indices_run.data_ptr<int64_t>(),
sorted_linear_indices_run_lengths.data_ptr<int32_t>(),
sorted_linear_indices_num_runs.data_ptr<int32_t>(),
linear_indices_sorted.numel(),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
}
auto sorted_linear_indices_cumulative_run_lengths =
asynchronous_complete_cumsum(sorted_linear_indices_run_lengths);
{% if not dense %}
DISPATCH_EMB_CACHE_TYPES(
{% else %}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
{% endif %}
dev_weights.type(),
{% if not dense %}
lxu_cache_weights.type(),
{% endif %}
"split_embedding_backward_{{ optimizer }}_exact_kernel",
([&] {
{% if weighted %}
auto indice_weights_sorted = at::empty_like(indice_weights);
{
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
{% if not dense %}
indice_weights.data_ptr<acc_type<cache_t, true>>(),
indice_weights_sorted.data_ptr<acc_type<cache_t, true>>(),
{% else %}
indice_weights.data_ptr<acc_type<scalar_t, true>>(),
indice_weights_sorted.data_ptr<acc_type<scalar_t, true>>(),
{% endif %}
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(kByte));
AT_CUDA_CHECK(hipcub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
{% if not dense %}
indice_weights.data_ptr<acc_type<cache_t, true>>(),
indice_weights_sorted.data_ptr<acc_type<cache_t, true>>(),
{% else %}
indice_weights.data_ptr<acc_type<scalar_t, true>>(),
indice_weights_sorted.data_ptr<acc_type<scalar_t, true>>(),
{% endif %}
linear_indices.numel(),
0,
total_hash_size_bits,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
false));
}
{% endif %}
auto grad_output_accessor = grad_output.packed_accessor32<
acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>,
2,
RestrictPtrTraits>();
Tensor grad_output_mean;
if (pooling_mode == MEAN) {
grad_output_mean = at::empty_like(grad_output);
hipLaunchKernelGGL(( grad_mean_kernel<{{ "scalar_t, scalar_t" if dense else "cache_t, emb_t" }}>)
, dim3(div_round_up((B * T), kMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kMaxThreads / kWarpSize)),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_accessor,
D_offsets
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
offsets
.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
grad_output_mean.packed_accessor32<
acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>,
2,
RestrictPtrTraits>());
C10_HIP_KERNEL_LAUNCH_CHECK();
grad_output_accessor = grad_output_mean.packed_accessor32<
acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>,
2,
RestrictPtrTraits>();
}
{% if not dense %}
PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && !std::is_same<emb_t, float>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs =
at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
{% endif %}
{% for kMaxVecsPerThread in range(1, max_embedding_dim // 128 + 1) %}
if (max_D <= {{ 128 * kMaxVecsPerThread }}) {
// Stay under 64K of shared memory (96K in total), BT_block_size must be a power of two.
// B
while(BT_block_size * sizeof(acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }} >= 64 * 1024) {
BT_block_size /= 2;
}
if (std::is_same<{{ "scalar_t" if dense else "emb_t" }}, double>::value) {
// Otherwise we see CUDA kernel launch failures despite the above checks.
BT_block_size = 1;
}
split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_kernel_cta_per_row_1<
{% if not dense %}
emb_t,
cache_t,
{% else %}
scalar_t,
scalar_t,
{% endif %}
{{ kMaxVecsPerThread }}>)
, dim3(div_round_up(linear_indices.numel(), 32 * kWarpSize)),
dim3(dim3(kWarpSize, BT_block_size)),
BT_block_size * sizeof(acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize *
{{ kMaxVecsPerThread }},
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_accessor,
{% if not dense %}
dev_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% else %}
dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_run
.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_cumulative_run_lengths
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_run_lengths
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
infos_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% if not dense %}
lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% endif %}
{% if weighted %}
indice_weights_sorted.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(),
{% endif %}
sorted_linear_indices_num_runs
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
max_segment_length_per_warp,
{% if not dense %}
stochastic_rounding,
rng_engine_inputs,
{% else %}
grad_dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(),
{% endif %}
FixedDivisor(B),
{{ args.split_kernel_arg_constructors | join(", ") }});
C10_HIP_KERNEL_LAUNCH_CHECK();
split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }hipLaunchKernelGGL((}_kernel_warp_per_row_1<
{% if not dense %}
emb_t,
cache_t,
{% else %}
scalar_t,
scalar_t,
{% endif %}
{{ kMaxVecsPerThread }}>)
, dim3(div_round_up(linear_indices.numel(), kBackwardMaxThreads / kWarpSize)),
dim3(dim3(kWarpSize, kBackwardMaxThreads / kWarpSize)),
BT_block_size * sizeof(
acc_type<
{% if not dense %}
cache_t
{% else %}
scalar_t
{% endif %},
true>) * 4 * kWarpSize *
{{ kMaxVecsPerThread }},
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_accessor,
{% if not dense %}
dev_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% else %}
dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_run
.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_cumulative_run_lengths
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_run_lengths
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
infos_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% if not dense %}
lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% endif %}
{% if weighted %}
indice_weights_sorted.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(),
{% endif %}
sorted_linear_indices_num_runs
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
max_segment_length_per_warp,
{% if not dense %}
stochastic_rounding,
rng_engine_inputs,
{% else %}
grad_dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(),
{% endif %}
FixedDivisor(B),
{{ args.split_kernel_arg_constructors | join(", ") }});
C10_HIP_KERNEL_LAUNCH_CHECK();
return;
}
{% endfor %}
}));
return {{ "grad_dev_weights" if dense else "" }};
}
| 6277575facfb35f900de6ed6103814c49f8b2104.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
{% set wdesc = "weighted" if weighted else "unweighted" %}
#include "codegen/embedding_backward_template_helpers.cuh"
{% if not dense %}
constexpr int32_t kCacheLocationMissing = -1;
{% endif %}
enum {
DEVICE = 0,
MANAGED = 1,
MANAGED_CACHING = 2,
};
constexpr size_t kBackwardMaxThreads = 512;
using namespace at;
using namespace fbgemm_gpu;
template <
typename emb_t,
typename cache_t,
size_t kMaxVecsPerThread>
__global__ void
__launch_bounds__(kMaxThreads)
split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1(
const PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits>
grad_output,
PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights,
{% if not dense %}
PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights,
PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
weights_placements,
{% endif %}
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits>
hash_size_cumsum,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits>
sorted_linear_indices_run,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_cumulative_run_lengths,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_run_lengths,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_infos,
{% if not dense %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_lxu_cache_locations,
{% endif %}
{% if weighted %}
const PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits> sorted_indice_weights,
{% endif %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_num_runs,
int32_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
PhiloxCudaState stochastic_rounding_philox_args,
{% else %}
PackedTensorAccessor64<cache_t, 1, RestrictPtrTraits> grad_dev_weights,
{% endif %}
FixedDivisor fd,
{{ args.split_kernel_args | join(", ") }}) {
int32_t T = D_offsets.size(0) - 1;
const int32_t B = grad_output.size(0);
const int32_t num_runs = sorted_linear_indices_num_runs[0];
for (int32_t run_id = blockIdx.x * kWarpSize; run_id < num_runs;
run_id += kWarpSize * gridDim.x) {
const int32_t candidate_run_id = run_id + threadIdx.x;
int candidate_run_active = candidate_run_id < num_runs &&
sorted_linear_indices_run_lengths[candidate_run_id] >=
max_segment_length_per_warp;
uint32_t candidate_mask = __ballot_sync(0xFFFFFFFF, candidate_run_active);
while (candidate_mask != 0) {
int32_t current_thread_id = __ffs(candidate_mask) - 1;
candidate_mask ^= (static_cast<uint32_t>(1) << current_thread_id);
int32_t current_run_id = run_id + current_thread_id;
const int64_t linear_index = sorted_linear_indices_run[current_run_id];
const int32_t segment_start =
sorted_linear_indices_cumulative_run_lengths[current_run_id];
const int32_t segment_end =
sorted_linear_indices_cumulative_run_lengths[current_run_id + 1];
const int32_t SL = segment_end - segment_start;
// TODO: should never be hit!
if (SL < max_segment_length_per_warp) {
return;
}
const int32_t warp_id = threadIdx.y;
const int32_t lane_id = threadIdx.x;
// Note that with shared embedding tables we can have multiple tables
// (i.e. different values of `t` sharing the same segment).
//
const auto info_0 = sorted_infos[segment_start];
int32_t t_0 = fd.Div(info_0); //info_0 / B;
int64_t hash_size = hash_size_cumsum[t_0];
int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0];
int64_t idx = linear_index - hash_size;
const int32_t SL_per_warp = div_round_up(SL, blockDim.y);
const int32_t sl_start = SL_per_warp * warp_id;
const int32_t sl_end = min(SL_per_warp * (warp_id + 1), SL);
Vec4T<acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread];
for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) {
int32_t sl_j = sl + threadIdx.x;
int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t b; //= b_t % B;
int32_t t; //= b_t / B;
fd.DivMod(b_t, &t, &b);
int32_t D_start = sl_j < sl_end ? D_offsets[t] : 0;
{% if weighted %}
acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0;
{% endif %}
for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) {
int32_t b_j = __shfl_sync(0xFFFFFFFF, b, j);
int32_t D_start_j = __shfl_sync(0xFFFFFFFF, D_start, j);
{% if weighted %}
acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j);
{% endif %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<acc_type<cache_t, true>> grad_out_vec(
&grad_output[b_j][0] + D_start_j + d);
{% if weighted %}
grad_sum[i].fma_(grad_out_vec, idx_weight_j);
{% else %}
grad_sum[i].acc.x += grad_out_vec.acc.x;
grad_sum[i].acc.y += grad_out_vec.acc.y;
grad_sum[i].acc.z += grad_out_vec.acc.z;
grad_sum[i].acc.w += grad_out_vec.acc.w;
{% endif %}
}
}
}
// do shared memory reduction only if we used multiple blocks.
if (SL > SL_per_warp) {
struct SharedMemory<Vec4T<acc_type<cache_t, true>>> smem;
Vec4T<acc_type<cache_t, true>>* shared_grad_sums = smem.getPointer();
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] = grad_sum[i];
}
__syncthreads();
if (blockDim.y >= 32) {
if (warp_id < 16) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 16) * kMaxVecsPerThread * kWarpSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 16) {
if (warp_id < 8) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 8) * kMaxVecsPerThread * kWarpSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 8) {
if (warp_id < 4) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 4) * kMaxVecsPerThread * kWarpSize]);
}
}
__syncthreads();
}
if (blockDim.y >= 4) {
if (warp_id < 2) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0; i < kMaxVecsPerThread &&
4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize] =
vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 2) * kMaxVecsPerThread * kWarpSize]);
}
}
__syncthreads();
}
if (warp_id == 0) {
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
grad_sum[i] = vec4_acc(
shared_grad_sums
[lane_id + i * kWarpSize +
warp_id * kMaxVecsPerThread * kWarpSize],
shared_grad_sums
[lane_id + i * kWarpSize +
(warp_id + 1) * kMaxVecsPerThread * kWarpSize]);
}
}
}
if (warp_id == 0) {
int64_t weights_offset = weights_offsets[t_0];
{% if not dense %}
emb_t* __restrict__ weights{nullptr};
cache_t* __restrict__ cache_weights{nullptr};
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
const auto weights_placement = weights_placements[t_0];
if (weights_placement == DEVICE) {
weights = &dev_weights[weights_offset + idx * D_emb];
} else {
weights = &uvm_weights[weights_offset + idx * D_emb];
}
if (weights_placement == MANAGED_CACHING) {
int32_t cache_idx = sorted_lxu_cache_locations[segment_start];
if (cache_idx != kCacheLocationMissing) {
cache_weights = &lxu_cache_weights[cache_idx][0];
}
}
{% for tensor in args.split_tensors %}
acc_type<cache_t, true>* __restrict__ {{ tensor }};
const auto {{ tensor }}_placement = {{ tensor }}_placements[t_0];
int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0];
if ({{ tensor }}_placement == DEVICE) {
{{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset];
} else {
{{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset];
}
{% endfor %}
{{ split_precomputation }}
struct SharedMemory<Vec4T<acc_type<cache_t, true>>> weight_update_buffer;
Vec4T<acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer();
auto weight_row_template = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
threadIdx.x + current_run_id * blockDim.x,
&state);
weight_row_template.set_stoc_state(&state);
}
float2 qparams_template;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
qparams_template = weight_row_template.load_qparams();
}
float2 qparams_new;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
auto& grad = grad_sum[i];
{{ split_weight_update }}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
shared_weight_update_row[lane_id + i * kWarpSize] = weight_new;
} else {
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if embedding is not int8
}
}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
// calculate qparams from updated weight row
qparams_new = thrust_find_qparams<acc_type<cache_t, true>>(shared_weight_update_row, D);
weight_row_template.store_qparams(qparams_new);
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
weight_row_template.store(shared_weight_update_row[lane_id + i * kWarpSize], d, qparams_new);
}
}
{% else %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
auto& grad = grad_sum[i];
grad.store(&grad_dev_weights[weights_offset + idx * D + d]);
}
{% endif %}
}
}
}
}
template <
typename emb_t,
typename cache_t,
size_t kMaxVecsPerThread>
__global__
__launch_bounds__(kBackwardMaxThreads)
void
split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1(
const PackedTensorAccessor32<acc_type<cache_t,true>, 2, RestrictPtrTraits>
grad_output,
PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> dev_weights,
{% if not dense %}
PackedTensorAccessor64<emb_t, 1, RestrictPtrTraits> uvm_weights,
PackedTensorAccessor64<cache_t, 2, RestrictPtrTraits> lxu_cache_weights,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
weights_placements,
{% endif %}
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> weights_offsets,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits>
hash_size_cumsum,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits>
sorted_linear_indices_run,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_cumulative_run_lengths,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_run_lengths,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> sorted_infos,
{% if not dense %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_lxu_cache_locations,
{% endif %}
{% if weighted %}
const PackedTensorAccessor32<acc_type<cache_t, true>, 1, RestrictPtrTraits> sorted_indice_weights,
{% endif %}
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits>
sorted_linear_indices_num_runs,
int32_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
PhiloxCudaState stochastic_rounding_philox_args,
{% else %}
PackedTensorAccessor64<cache_t, 1, RestrictPtrTraits> grad_dev_weights,
{% endif %}
FixedDivisor fd,
{{ args.split_kernel_args | join(", ") }}) {
const int32_t T = D_offsets.size(0) - 1;
const int32_t B = grad_output.size(0);
const int32_t run_id = blockIdx.x * blockDim.y + threadIdx.y;
if (run_id >= sorted_linear_indices_run.size(0)) {
return;
}
if (run_id >= sorted_linear_indices_num_runs[0]) {
return;
}
const int64_t linear_index = sorted_linear_indices_run[run_id];
const int32_t segment_start =
sorted_linear_indices_cumulative_run_lengths[run_id];
const int32_t segment_end =
sorted_linear_indices_cumulative_run_lengths[run_id + 1];
const int32_t SL = segment_end - segment_start;
if (SL >= max_segment_length_per_warp) {
return;
}
// now, each segment corresponds to exactly one table `t` and row in
// that table (`idx`). Thus, we can hoist out some of the book-keeping.
const auto info_0 = sorted_infos[segment_start];
int32_t t_0 = fd.Div(info_0); // info_0 / B;
int64_t hash_size = hash_size_cumsum[t_0];
int32_t D = D_offsets[t_0 + 1] - D_offsets[t_0];
int64_t idx = linear_index - hash_size;
const int32_t SL_per_warp = div_round_up(SL, blockDim.y);
const int32_t sl_start = 0;
const int32_t sl_end = SL;
Vec4T<acc_type<cache_t, true>> grad_sum[kMaxVecsPerThread];
for (int32_t sl = sl_start; sl < sl_end; sl += kWarpSize) {
int32_t sl_j = sl + threadIdx.x;
int32_t b_t = sl_j < sl_end ? sorted_infos[segment_start + sl_j] : 0;
int32_t b; //= b_t % B;
int32_t t; //= b_t / B;
fd.DivMod(b_t, &t, &b);
int32_t D_start = D_offsets[t];
{% if weighted %}
acc_type<cache_t, true> idx_weight = sl_j < sl_end ? sorted_indice_weights[segment_start + sl_j] : 0.0;
{% endif %}
for (int32_t j = 0; j < kWarpSize && sl + j < sl_end; ++j) {
int32_t b_j = __shfl_sync(0xFFFFFFFF, b, j);
int32_t D_start_j = __shfl_sync(0xFFFFFFFF, D_start, j);
{% if weighted %}
acc_type<cache_t, true> idx_weight_j = __shfl_sync(0xFFFFFFFF, idx_weight, j);
{% endif %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<acc_type<cache_t, true>> grad_out_vec(
&grad_output[b_j][0] + D_start_j + d);
{% if weighted %}
grad_sum[i].fma_(grad_out_vec, idx_weight_j);
{% else %}
grad_sum[i].acc.x += grad_out_vec.acc.x;
grad_sum[i].acc.y += grad_out_vec.acc.y;
grad_sum[i].acc.z += grad_out_vec.acc.z;
grad_sum[i].acc.w += grad_out_vec.acc.w;
{% endif %}
}
}
}
int64_t weights_offset = weights_offsets[t_0];
{% if not dense %}
emb_t* __restrict__ weights{nullptr};
cache_t* __restrict__ cache_weights{nullptr};
int32_t D_emb = D;
if (std::is_same<emb_t, uint8_t>::value) {
D_emb += kINT8QparamsBytes;
}
const auto weights_placement = weights_placements[t_0];
if (weights_placement == DEVICE) {
weights = &dev_weights[weights_offset + idx * D_emb];
} else {
weights = &uvm_weights[weights_offset + idx * D_emb];
}
if (weights_placement == MANAGED_CACHING) {
int32_t cache_idx = sorted_lxu_cache_locations[segment_start];
if (cache_idx != kCacheLocationMissing) {
cache_weights = &lxu_cache_weights[cache_idx][0];
}
}
{% for tensor in args.split_tensors %}
acc_type<cache_t, true>* __restrict__ {{ tensor }};
const auto {{ tensor }}_placement = {{ tensor }}_placements[t_0];
int64_t {{ tensor }}_offset = {{ tensor }}_offsets[t_0];
if ({{ tensor }}_placement == DEVICE) {
{{ tensor }} = &{{ tensor }}_dev[{{ tensor }}_offset];
} else {
{{ tensor }} = &{{ tensor }}_uvm[{{ tensor }}_offset];
}
{% endfor %}
{{ split_precomputation }}
struct SharedMemory<Vec4T<acc_type<cache_t, true>>> weight_update_buffer;
Vec4T<acc_type<cache_t, true>>* shared_weight_update_row = weight_update_buffer.getPointer();
auto weight_row_template = WeightRow<emb_t, cache_t, acc_type<cache_t, true>>(weights, cache_weights, D, nullptr);
if (!std::is_same<emb_t, float>::value && stochastic_rounding) {
StochasticRoundingRNGState state;
// different for every *run* and every *thread*.
auto stochastic_rounding_seeds =
at::cuda::philox::unpack(stochastic_rounding_philox_args);
stochastic_rounding_init(
std::get<0>(stochastic_rounding_seeds) ^
std::get<1>(stochastic_rounding_seeds),
threadIdx.x + run_id * blockDim.x,
&state);
weight_row_template.set_stoc_state(&state);
}
float2 qparams_template;
if (std::is_same<emb_t, uint8_t>::value && !cache_weights){
qparams_template = weight_row_template.load_qparams();
}
float2 qparams_new;
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
Vec4T<acc_type<cache_t, true>> weight_new = weight_row_template.load(d, qparams_template);
auto& grad = grad_sum[i];
{{ split_weight_update }}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize] = weight_new;
} else {
weight_row_template.store(weight_new, d, qparams_new); // qparams_new not used if type is not int8
}
}
if (std::is_same<emb_t, uint8_t>::value && !cache_weights) {
// calculate new qparams after row update
qparams_new = thrust_find_qparams<acc_type<cache_t, true>>(&shared_weight_update_row[threadIdx.y * kMaxVecsPerThread * kWarpSize], D);
weight_row_template.store_qparams(qparams_new);
// fetch cached updated row from shared mem and quantize on-the-fly when saving to lowp embedding
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
weight_row_template.store(shared_weight_update_row[threadIdx.x + i * kWarpSize + threadIdx.y * kMaxVecsPerThread * kWarpSize], d, qparams_new);
}
}
{% else %}
#pragma unroll kMaxVecsPerThread
for (int32_t i = 0;
i < kMaxVecsPerThread && 4 * kWarpSize * i + threadIdx.x * 4 < D;
++i) {
int32_t d = 4 * kWarpSize * i + threadIdx.x * 4;
auto& grad = grad_sum[i];
grad.store(&grad_dev_weights[weights_offset + idx * D + d]);
}
{% endif %}
}
template <typename cache_t, typename emb_t>
__global__ void __launch_bounds__(kMaxThreads) grad_mean_kernel(
const PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits>
grad_output,
const PackedTensorAccessor32<int32_t, 1, RestrictPtrTraits> D_offsets,
const PackedTensorAccessor32<int64_t, 1, RestrictPtrTraits> offsets,
PackedTensorAccessor32<acc_type<cache_t, true>, 2, RestrictPtrTraits>
grad_output_mean) {
int32_t B = grad_output.size(0);
int32_t T = D_offsets.size(0) - 1;
int32_t b_t = blockIdx.x * blockDim.y + threadIdx.y;
int32_t b = b_t % B;
int32_t t = b_t / B;
if (b_t >= B * T) {
return;
}
int32_t D_start = D_offsets[t];
int32_t D_end = D_offsets[t + 1];
int32_t D = D_end - D_start;
int64_t indices_start = offsets[t * B + b];
int64_t indices_end = offsets[t * B + b + 1];
int32_t L = indices_end - indices_start;
if (L != 0) {
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<acc_type<cache_t, true>> grad_out_vec(&grad_output[b][D_start + d * 4]);
grad_out_vec.acc.x /= L;
grad_out_vec.acc.y /= L;
grad_out_vec.acc.z /= L;
grad_out_vec.acc.w /= L;
grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]);
}
} else {
for (int32_t d = threadIdx.x; d * 4 < D; d += blockDim.x) {
Vec4T<acc_type<cache_t, true>> grad_out_vec(&grad_output[b][D_start + d * 4]);
grad_out_vec.store(&grad_output_mean[b][D_start + d * 4]);
}
}
}
{{ "void" if not dense else "Tensor" }} split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_exact_cuda(
Tensor grad_output,
Tensor dev_weights,
{% if not dense %}
Tensor uvm_weights,
Tensor lxu_cache_weights,
Tensor weights_placements,
{% endif %}
Tensor weights_offsets,
Tensor D_offsets,
int64_t max_D,
Tensor hash_size_cumsum,
int64_t total_hash_size_bits,
Tensor indices,
Tensor offsets,
int64_t pooling_mode,
{% if weighted %}
Tensor indice_weights,
{% endif %}
{% if not dense %}
Tensor lxu_cache_locations,
{% endif %}
int64_t unused_,
int64_t max_segment_length_per_warp,
{% if not dense %}
bool stochastic_rounding,
{% endif %}
{{ args.split_function_args | join(", ") }}) {
at::cuda::OptionalCUDAGuard device_guard;
device_guard.set_index(dev_weights.get_device());
{% if dense %}
auto grad_dev_weights = zeros_like(dev_weights);
{% endif %}
// short-circuit if there are zero indices.
if (indices.numel() == 0) {
return {{ "grad_dev_weights" if dense else "" }};
}
int32_t T = D_offsets.numel() - 1;
TORCH_CHECK(T > 0);
// offsets = [B x T + 1]
const auto B = (offsets.size(0) - 1) / T;
TORCH_CHECK(B > 0);
auto BT_block_size = kMaxThreads / kWarpSize;
TORCH_CHECK(BT_block_size * kWarpSize <= kMaxThreads);
TORCH_CHECK(max_D <= {{ max_embedding_dim }});
auto infos = at::empty_like(indices, indices.options().dtype(kInt));
auto infos_sorted = at::empty_like(infos);
auto linear_indices = at::empty_like(indices);
auto linear_indices_sorted = at::empty_like(indices);
linearize_index_kernel<<<
div_round_up(B * T, kMaxThreads),
kMaxThreads,
0,
at::cuda::getCurrentCUDAStream()>>>(
hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
infos.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
linear_indices.packed_accessor32<int64_t, 1, RestrictPtrTraits>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
{
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
infos.data_ptr<int32_t>(),
infos_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(kByte));
AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
infos.data_ptr<int32_t>(),
infos_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
}
{% if not dense %}
auto lxu_cache_locations_sorted = at::empty_like(lxu_cache_locations);
if (lxu_cache_locations.size(0) > 0) {
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
lxu_cache_locations.data_ptr<int32_t>(),
lxu_cache_locations_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(kByte));
AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
lxu_cache_locations.data_ptr<int32_t>(),
lxu_cache_locations_sorted.data_ptr<int32_t>(),
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
}
{% endif %}
auto sorted_linear_indices_run = at::empty_like(indices);
auto sorted_linear_indices_run_lengths =
at::zeros_like(indices, indices.options().dtype(kInt));
auto sorted_linear_indices_num_runs =
at::zeros({1}, indices.options().dtype(kInt));
{
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(cub::DeviceRunLengthEncode::Encode(
nullptr,
temp_storage_bytes,
linear_indices_sorted.data_ptr<int64_t>(),
sorted_linear_indices_run.data_ptr<int64_t>(),
sorted_linear_indices_run_lengths.data_ptr<int32_t>(),
sorted_linear_indices_num_runs.data_ptr<int32_t>(),
linear_indices_sorted.numel(),
at::cuda::getCurrentCUDAStream()));
// Allocate temporary storage
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(kByte));
// Run encoding
AT_CUDA_CHECK(cub::DeviceRunLengthEncode::Encode(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices_sorted.data_ptr<int64_t>(),
sorted_linear_indices_run.data_ptr<int64_t>(),
sorted_linear_indices_run_lengths.data_ptr<int32_t>(),
sorted_linear_indices_num_runs.data_ptr<int32_t>(),
linear_indices_sorted.numel(),
at::cuda::getCurrentCUDAStream()));
}
auto sorted_linear_indices_cumulative_run_lengths =
asynchronous_complete_cumsum(sorted_linear_indices_run_lengths);
{% if not dense %}
DISPATCH_EMB_CACHE_TYPES(
{% else %}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
{% endif %}
dev_weights.type(),
{% if not dense %}
lxu_cache_weights.type(),
{% endif %}
"split_embedding_backward_{{ optimizer }}_exact_kernel",
([&] {
{% if weighted %}
auto indice_weights_sorted = at::empty_like(indice_weights);
{
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs(
nullptr,
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
{% if not dense %}
indice_weights.data_ptr<acc_type<cache_t, true>>(),
indice_weights_sorted.data_ptr<acc_type<cache_t, true>>(),
{% else %}
indice_weights.data_ptr<acc_type<scalar_t, true>>(),
indice_weights_sorted.data_ptr<acc_type<scalar_t, true>>(),
{% endif %}
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
auto temp_storage = at::empty(
{static_cast<int64_t>(temp_storage_bytes)},
indices.options().dtype(kByte));
AT_CUDA_CHECK(cub::DeviceRadixSort::SortPairs(
temp_storage.data_ptr(),
temp_storage_bytes,
linear_indices.data_ptr<int64_t>(),
linear_indices_sorted.data_ptr<int64_t>(),
{% if not dense %}
indice_weights.data_ptr<acc_type<cache_t, true>>(),
indice_weights_sorted.data_ptr<acc_type<cache_t, true>>(),
{% else %}
indice_weights.data_ptr<acc_type<scalar_t, true>>(),
indice_weights_sorted.data_ptr<acc_type<scalar_t, true>>(),
{% endif %}
linear_indices.numel(),
0,
total_hash_size_bits,
at::cuda::getCurrentCUDAStream(),
false));
}
{% endif %}
auto grad_output_accessor = grad_output.packed_accessor32<
acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>,
2,
RestrictPtrTraits>();
Tensor grad_output_mean;
if (pooling_mode == MEAN) {
grad_output_mean = at::empty_like(grad_output);
grad_mean_kernel<{{ "scalar_t, scalar_t" if dense else "cache_t, emb_t" }}>
<<<div_round_up((B * T), kMaxThreads / kWarpSize),
dim3(kWarpSize, kMaxThreads / kWarpSize),
0,
at::cuda::getCurrentCUDAStream()>>>(
grad_output_accessor,
D_offsets
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
offsets
.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
grad_output_mean.packed_accessor32<
acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>,
2,
RestrictPtrTraits>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
grad_output_accessor = grad_output_mean.packed_accessor32<
acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>,
2,
RestrictPtrTraits>();
}
{% if not dense %}
PhiloxCudaState rng_engine_inputs;
if (stochastic_rounding && !std::is_same<emb_t, float>::value) {
auto gen = at::cuda::detail::getDefaultCUDAGenerator();
std::lock_guard<std::mutex> lock(gen.mutex());
rng_engine_inputs =
at::check_generator<at::CUDAGeneratorImpl>(gen)
->philox_cuda_state(4);
}
{% endif %}
{% for kMaxVecsPerThread in range(1, max_embedding_dim // 128 + 1) %}
if (max_D <= {{ 128 * kMaxVecsPerThread }}) {
// Stay under 64K of shared memory (96K in total), BT_block_size must be a power of two.
// B
while(BT_block_size * sizeof(acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize * {{ kMaxVecsPerThread }} >= 64 * 1024) {
BT_block_size /= 2;
}
if (std::is_same<{{ "scalar_t" if dense else "emb_t" }}, double>::value) {
// Otherwise we see CUDA kernel launch failures despite the above checks.
BT_block_size = 1;
}
split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_cta_per_row_1<
{% if not dense %}
emb_t,
cache_t,
{% else %}
scalar_t,
scalar_t,
{% endif %}
{{ kMaxVecsPerThread }}>
<<<div_round_up(linear_indices.numel(), 32 * kWarpSize),
dim3(kWarpSize, BT_block_size),
BT_block_size * sizeof(acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>) * 4 * kWarpSize *
{{ kMaxVecsPerThread }},
at::cuda::getCurrentCUDAStream()>>>(
grad_output_accessor,
{% if not dense %}
dev_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% else %}
dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_run
.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_cumulative_run_lengths
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_run_lengths
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
infos_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% if not dense %}
lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% endif %}
{% if weighted %}
indice_weights_sorted.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(),
{% endif %}
sorted_linear_indices_num_runs
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
max_segment_length_per_warp,
{% if not dense %}
stochastic_rounding,
rng_engine_inputs,
{% else %}
grad_dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(),
{% endif %}
FixedDivisor(B),
{{ args.split_kernel_arg_constructors | join(", ") }});
C10_CUDA_KERNEL_LAUNCH_CHECK();
split_embedding_backward_codegen_{{ optimizer }}_{{ wdesc }}_kernel_warp_per_row_1<
{% if not dense %}
emb_t,
cache_t,
{% else %}
scalar_t,
scalar_t,
{% endif %}
{{ kMaxVecsPerThread }}>
<<<div_round_up(linear_indices.numel(), kBackwardMaxThreads / kWarpSize),
dim3(kWarpSize, kBackwardMaxThreads / kWarpSize),
BT_block_size * sizeof(
acc_type<
{% if not dense %}
cache_t
{% else %}
scalar_t
{% endif %},
true>) * 4 * kWarpSize *
{{ kMaxVecsPerThread }},
at::cuda::getCurrentCUDAStream()>>>(
grad_output_accessor,
{% if not dense %}
dev_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
uvm_weights.packed_accessor64<emb_t, 1, RestrictPtrTraits>(),
lxu_cache_weights.packed_accessor64<cache_t, 2, RestrictPtrTraits>(),
weights_placements.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% else %}
dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(),
{% endif %}
weights_offsets.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
D_offsets.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
hash_size_cumsum.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_run
.packed_accessor32<int64_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_cumulative_run_lengths
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
sorted_linear_indices_run_lengths
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
infos_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% if not dense %}
lxu_cache_locations_sorted.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
{% endif %}
{% if weighted %}
indice_weights_sorted.packed_accessor32<acc_type<{{ "scalar_t" if dense else "cache_t" }}, true>, 1, RestrictPtrTraits>(),
{% endif %}
sorted_linear_indices_num_runs
.packed_accessor32<int32_t, 1, RestrictPtrTraits>(),
max_segment_length_per_warp,
{% if not dense %}
stochastic_rounding,
rng_engine_inputs,
{% else %}
grad_dev_weights.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(),
{% endif %}
FixedDivisor(B),
{{ args.split_kernel_arg_constructors | join(", ") }});
C10_CUDA_KERNEL_LAUNCH_CHECK();
return;
}
{% endfor %}
}));
return {{ "grad_dev_weights" if dense else "" }};
}
|
e29c23336e20a0dc5e094b9cf634811af24e000c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
//24-bit multiplication is faster on G80,
//but we must be sure to multiply integers
//only within [-8M, 8M - 1] range
#define IMUL(a, b) __mul24(a, b)
////////////////////////////////////////////////////////////////////////////////
// Kernel configuration
////////////////////////////////////////////////////////////////////////////////
#define KERNEL_RADIUS 8
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__device__ __constant__ float d_Kernel[KERNEL_W];
// Assuming ROW_TILE_W, KERNEL_RADIUS_ALIGNED and dataW
// are multiples of coalescing granularity size,
// all global memory operations are coalesced in convolutionRowGPU()
#define ROW_TILE_W 128
#define KERNEL_RADIUS_ALIGNED 16
// Assuming COLUMN_TILE_W and dataW are multiples
// of coalescing granularity size, all global memory operations
// are coalesced in convolutionColumnGPU()
#define COLUMN_TILE_W 16
#define COLUMN_TILE_H 48
////////////////////////////////////////////////////////////////////////////////
// Loop unrolling templates, needed for best performance
////////////////////////////////////////////////////////////////////////////////
template<int i> __device__ float convolutionRow(float *data){
return
data[KERNEL_RADIUS - i] * d_Kernel[i]
+ convolutionRow<i - 1>(data);
}
template<> __device__ float convolutionRow<-1>(float *data){
return 0;
}
template<int i> __device__ float convolutionColumn(float *data){
return
data[(KERNEL_RADIUS - i) * COLUMN_TILE_W] * d_Kernel[i]
+ convolutionColumn<i - 1>(data);
}
template<> __device__ float convolutionColumn<-1>(float *data){
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH
){
//Data cache
__shared__ float data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];
//Current tile and apron limits, relative to row start
const int tileStart = IMUL(blockIdx.x, ROW_TILE_W);
const int tileEnd = tileStart + ROW_TILE_W - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
//Row start index in d_Data[]
const int rowStart = IMUL(blockIdx.y, dataW);
//Aligned apron start. Assuming dataW and ROW_TILE_W are multiples
//of half-warp size, rowStart + apronStartAligned is also a
//multiple of half-warp size, thus having proper alignment
//for coalesced d_Data[] read.
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Data[rowStart + loadPos] : 0;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming dataW and ROW_TILE_W are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
#ifdef UNROLL_INNER
sum = convolutionRow<2 * KERNEL_RADIUS>(data + smemPos);
#else
for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
sum += data[smemPos + k] * d_Kernel[KERNEL_RADIUS - k];
#endif
d_Result[rowStart + writePos] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH,
int smemStride,
int gmemStride
){
//Data cache
__shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS + COLUMN_TILE_H + KERNEL_RADIUS)];
//Current tile and apron limits, in rows
const int tileStart = IMUL(blockIdx.y, COLUMN_TILE_H);
const int tileEnd = tileStart + COLUMN_TILE_H - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataH - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataH - 1);
//Current column index
const int columnStart = IMUL(blockIdx.x, COLUMN_TILE_W) + threadIdx.x;
//Shared and global memory indices for current column
int smemPos = IMUL(threadIdx.y, COLUMN_TILE_W) + threadIdx.x;
int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart;
//Cycle through the entire data cache
//Load global memory values, if indices are within the image borders,
//or initialize with zero otherwise
for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y){
data[smemPos] =
((y >= apronStartClamped) && (y <= apronEndClamped)) ?
d_Data[gmemPos] : 0;
smemPos += smemStride;
gmemPos += gmemStride;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
//Shared and global memory indices for current column
smemPos = IMUL(threadIdx.y + KERNEL_RADIUS, COLUMN_TILE_W) + threadIdx.x;
gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart;
//Cycle through the tile body, clamped by image borders
//Calculate and output the results
for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y){
float sum = 0;
#ifdef UNROLL_INNER
sum = convolutionColumn<2 * KERNEL_RADIUS>(data + smemPos);
#else
for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
sum +=
data[smemPos + IMUL(k, COLUMN_TILE_W)] *
d_Kernel[KERNEL_RADIUS - k];
#endif
d_Result[gmemPos] = sum;
smemPos += smemStride;
gmemPos += gmemStride;
}
}
| e29c23336e20a0dc5e094b9cf634811af24e000c.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
//24-bit multiplication is faster on G80,
//but we must be sure to multiply integers
//only within [-8M, 8M - 1] range
#define IMUL(a, b) __mul24(a, b)
////////////////////////////////////////////////////////////////////////////////
// Kernel configuration
////////////////////////////////////////////////////////////////////////////////
#define KERNEL_RADIUS 8
#define KERNEL_W (2 * KERNEL_RADIUS + 1)
__device__ __constant__ float d_Kernel[KERNEL_W];
// Assuming ROW_TILE_W, KERNEL_RADIUS_ALIGNED and dataW
// are multiples of coalescing granularity size,
// all global memory operations are coalesced in convolutionRowGPU()
#define ROW_TILE_W 128
#define KERNEL_RADIUS_ALIGNED 16
// Assuming COLUMN_TILE_W and dataW are multiples
// of coalescing granularity size, all global memory operations
// are coalesced in convolutionColumnGPU()
#define COLUMN_TILE_W 16
#define COLUMN_TILE_H 48
////////////////////////////////////////////////////////////////////////////////
// Loop unrolling templates, needed for best performance
////////////////////////////////////////////////////////////////////////////////
template<int i> __device__ float convolutionRow(float *data){
return
data[KERNEL_RADIUS - i] * d_Kernel[i]
+ convolutionRow<i - 1>(data);
}
template<> __device__ float convolutionRow<-1>(float *data){
return 0;
}
template<int i> __device__ float convolutionColumn(float *data){
return
data[(KERNEL_RADIUS - i) * COLUMN_TILE_W] * d_Kernel[i]
+ convolutionColumn<i - 1>(data);
}
template<> __device__ float convolutionColumn<-1>(float *data){
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH
){
//Data cache
__shared__ float data[KERNEL_RADIUS + ROW_TILE_W + KERNEL_RADIUS];
//Current tile and apron limits, relative to row start
const int tileStart = IMUL(blockIdx.x, ROW_TILE_W);
const int tileEnd = tileStart + ROW_TILE_W - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataW - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataW - 1);
//Row start index in d_Data[]
const int rowStart = IMUL(blockIdx.y, dataW);
//Aligned apron start. Assuming dataW and ROW_TILE_W are multiples
//of half-warp size, rowStart + apronStartAligned is also a
//multiple of half-warp size, thus having proper alignment
//for coalesced d_Data[] read.
const int apronStartAligned = tileStart - KERNEL_RADIUS_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Data[rowStart + loadPos] : 0;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming dataW and ROW_TILE_W are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
#ifdef UNROLL_INNER
sum = convolutionRow<2 * KERNEL_RADIUS>(data + smemPos);
#else
for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
sum += data[smemPos + k] * d_Kernel[KERNEL_RADIUS - k];
#endif
d_Result[rowStart + writePos] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(
float *d_Result,
float *d_Data,
int dataW,
int dataH,
int smemStride,
int gmemStride
){
//Data cache
__shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS + COLUMN_TILE_H + KERNEL_RADIUS)];
//Current tile and apron limits, in rows
const int tileStart = IMUL(blockIdx.y, COLUMN_TILE_H);
const int tileEnd = tileStart + COLUMN_TILE_H - 1;
const int apronStart = tileStart - KERNEL_RADIUS;
const int apronEnd = tileEnd + KERNEL_RADIUS;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, dataH - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, dataH - 1);
//Current column index
const int columnStart = IMUL(blockIdx.x, COLUMN_TILE_W) + threadIdx.x;
//Shared and global memory indices for current column
int smemPos = IMUL(threadIdx.y, COLUMN_TILE_W) + threadIdx.x;
int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart;
//Cycle through the entire data cache
//Load global memory values, if indices are within the image borders,
//or initialize with zero otherwise
for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y){
data[smemPos] =
((y >= apronStartClamped) && (y <= apronEndClamped)) ?
d_Data[gmemPos] : 0;
smemPos += smemStride;
gmemPos += gmemStride;
}
//Ensure the completness of the loading stage
//because results, emitted by each thread depend on the data,
//loaded by another threads
__syncthreads();
//Shared and global memory indices for current column
smemPos = IMUL(threadIdx.y + KERNEL_RADIUS, COLUMN_TILE_W) + threadIdx.x;
gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart;
//Cycle through the tile body, clamped by image borders
//Calculate and output the results
for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y){
float sum = 0;
#ifdef UNROLL_INNER
sum = convolutionColumn<2 * KERNEL_RADIUS>(data + smemPos);
#else
for(int k = -KERNEL_RADIUS; k <= KERNEL_RADIUS; k++)
sum +=
data[smemPos + IMUL(k, COLUMN_TILE_W)] *
d_Kernel[KERNEL_RADIUS - k];
#endif
d_Result[gmemPos] = sum;
smemPos += smemStride;
gmemPos += gmemStride;
}
}
|
b9a509e91ab47ec501df74b2b7fa504bc603f22d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfg-v2.cu normal z -> s, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_s
__global__
void magma_slarfg_gpu_kernel( int n, float* dx0, float* dx,
float *dtau, float *dxnorm, float* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ float scale;
float xnorm;
float dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_S_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
float alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_S_REAL(alpha);
float alphai = MAGMA_S_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_S_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_S_MAKE(beta, 0.);
alpha = MAGMA_S_MAKE( MAGMA_S_REAL(alpha) - beta, MAGMA_S_IMAG(alpha));
scale = MAGMA_S_DIV( MAGMA_S_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_S_MUL(dxi, scale);
} else {
*dtau = MAGMA_S_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's slarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_slarfg_gpu( magma_int_t n, float *dx0, float *dx,
float *dtau, float *dxnorm, float *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_snrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_snrm2_cols(n-1, 1, dx0+1, n, dxnorm);
hipLaunchKernelGGL(( magma_slarfg_gpu_kernel), dim3(blocks), dim3(threads),
0, magma_stream , n, dx0, dx, dtau, dxnorm, dAkk);
}
| b9a509e91ab47ec501df74b2b7fa504bc603f22d.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarfg-v2.cu normal z -> s, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_s
__global__
void magma_slarfg_gpu_kernel( int n, float* dx0, float* dx,
float *dtau, float *dxnorm, float* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ float scale;
float xnorm;
float dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_S_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
float alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_S_REAL(alpha);
float alphai = MAGMA_S_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_S_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_S_MAKE(beta, 0.);
alpha = MAGMA_S_MAKE( MAGMA_S_REAL(alpha) - beta, MAGMA_S_IMAG(alpha));
scale = MAGMA_S_DIV( MAGMA_S_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_S_MUL(dxi, scale);
} else {
*dtau = MAGMA_S_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's slarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_slarfg_gpu( magma_int_t n, float *dx0, float *dx,
float *dtau, float *dxnorm, float *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_snrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_snrm2_cols(n-1, 1, dx0+1, n, dxnorm);
magma_slarfg_gpu_kernel<<< blocks, threads,
0, magma_stream >>>(n, dx0, dx, dtau, dxnorm, dAkk);
}
|
842e2eeee7969ef6edd4cd22a8ccb1bdade09b18.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#include <iostream>
using std::cout;
using std::endl;
#define BLOCKSIZE 512
// TODO:
// at::numeric_limits<scalar_t>::lowest;
// implement like pytorch-softmax: two kernels: one is for inner size to be 1, and the other is for spatial. Besides, in the spatial kernel method, we should use threadIdx.x and threadIdx.y for dimsize and inner size parallelization
// define spatial kernel block like this:
/*
* inline dim3 SpatialSoftMax_getBlockSize(
* uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
* uint32_t inner_threads = inner_size;
const int max_threads = 1024;
* inner_threads = ::min(inner_threads, static_cast<uint32_t>(max_threads));
* uint32_t dim_threads = 1;
* if (inner_threads <= 64 && dim_size >= 64) {
* while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
* dim_threads *= 2;
* dim_threads /= 2;
* }
* return dim3(dim_threads, inner_threads);
* }
* */
// consider max_active_blocks when assign grid blocks, the total number of blocks should not be greater than max_active_blocks which is multiProcessCount
namespace large_margin_space {
template<typename scalar_t>
__forceinline__ __device__ void reduce_max(scalar_t* sdata, int tid) {
__syncthreads();
for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) {
if (tid < s) {
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
}
template<typename scalar_t>
__forceinline__ __device__ void reduce_sum(scalar_t* sdata, int tid) {
__syncthreads();
for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
}
template<typename scalar_t>
__forceinline__ __device__ void compute_reduce_values(
const scalar_t* logits, scalar_t* sdata,
const int dimsize, const int m_size,
int n_idx, int m_idx, int64_t lb, int tid) {
// b is max logits without target
// b+1 is max logits with target
// b+2 is sum of exp without target
// b+3 is sum of exp with target
// compute max with and without label index
const scalar_t zero(0.);
__syncthreads();
sdata[tid] = scalar_t(-10000.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
if (j == lb) continue;
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > sdata[tid]) sdata[tid] = val;
}
reduce_max(sdata, tid);
if (tid == 0) {
sdata[blockDim.x] = sdata[0];
sdata[blockDim.x + 1] = sdata[0];
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scalar_t val = logits[idx];
if (val > sdata[0]) sdata[blockDim.x + 1] = val;
}
__syncthreads();
// compute sum of exp with and without label index
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
if (j == lb) continue;
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sdata[tid] += exp(val - sdata[blockDim.x]);
}
reduce_sum<scalar_t>(sdata, tid);
if (tid == 0) sdata[blockDim.x + 2] = sdata[0];
__syncthreads();
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sdata[tid] += exp(val - sdata[blockDim.x + 1]);
}
reduce_sum<scalar_t>(sdata, tid);
if (tid == 0) sdata[blockDim.x + 3] = sdata[0];
}
template<typename scalar_t>
__forceinline__ __device__ void compute_sum_of_qx(
const scalar_t* logits, scalar_t* sdata,
const int dimsize, const int m_size,
int n_idx, int m_idx, int64_t lb, int tid) {
// compute sum of q * x to sdata[blockDim.x + 5]
const scalar_t zero(0.);
__syncthreads();
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
if (j == lb) continue;
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sdata[tid] += val * exp(val - sdata[blockDim.x]);
}
reduce_sum<scalar_t>(sdata, tid);
if (tid == 0) {
sdata[blockDim.x + 5] = sdata[0] / sdata[blockDim.x + 2];
}
}
}
// kernel function for forward and backward
template<typename scalar_t>
__global__ void LMarginLossForward(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
const int64_t *labels,
scalar_t *losses,
const int64_t ignore_index, const float lam) {
// shared memory
// b+4 is coeff of 1/(dimsize - 1)
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + (blockDim.x + 8) * threadIdx.y;
scalar_t zero(0.f);
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
int sample_offset = gridDim.x * blockDim.y;
if (tid == 0) {
sdata[blockDim.x + 4] = scalar_t(1.) / (dimsize - 1);
}
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
int64_t lb = labels[i];
if (lb == ignore_index) {
if (tid == 0) losses[i] = zero;
continue;
}
int n_idx = i / m_size;
int m_idx = i % m_size;
large_margin_space::compute_reduce_values<scalar_t>(logits, sdata,
dimsize, m_size, n_idx, m_idx, lb, tid);
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j+=blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t dval = logits[idx];
scalar_t term(0);
if (j == lb) {
term = -(dval - sdata[blockDim.x + 1]);
term += log(sdata[blockDim.x + 3]);
} else {
dval -= sdata[blockDim.x];
term = exp(dval) / sdata[blockDim.x + 2];
term -= sdata[blockDim.x + 4];
term *= (dval - log(sdata[blockDim.x + 2]));
term *= scalar_t(lam / 2.f);
}
sdata[tid] += term;
}
large_margin_space::reduce_sum<scalar_t>(sdata, tid);
if (tid == 0) losses[i] = sdata[0];
}
}
template<typename scalar_t>
__global__ void LMarginLossBackward(const int n_size,
const int dimsize, const int m_size,
scalar_t *grad_logits,
const scalar_t *logits,
const int64_t *labels,
const int64_t ignore_index,
const float lam) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + (blockDim.x + 8) * threadIdx.y;
scalar_t zero(0.f);
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
int sample_offset = gridDim.x * blockDim.y;
if (tid == 0) {
sdata[blockDim.x + 4] = 1. / (dimsize - 1);
}
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
int64_t lb = labels[i];
int n_idx = i / m_size;
int m_idx = i % m_size;
if (lb == ignore_index) {
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
grad_logits[idx] = zero;
}
continue;
}
large_margin_space::compute_reduce_values<scalar_t>(logits, sdata,
dimsize, m_size, n_idx, m_idx, lb, tid);
large_margin_space::compute_sum_of_qx<scalar_t>(logits, sdata,
dimsize, m_size, n_idx, m_idx, lb, tid);
const scalar_t one(1.f);
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
scalar_t pc = exp(val - sdata[blockDim.x + 1]) / sdata[blockDim.x + 3];
scalar_t gval;
if (j == lb) {
gval = pc - one;
} else {
gval = val - sdata[blockDim.x + 5] + one;
gval *= exp(val - sdata[blockDim.x]) / sdata[blockDim.x + 2];
gval = pc + (gval - sdata[blockDim.x + 4]) * scalar_t(lam / 2.);
}
grad_logits[idx] = gval;
}
}
}
template<typename scalar_t>
__global__ void SpatialLMarginLossForward(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
const int64_t *labels,
scalar_t *losses,
const int64_t ignore_index, const float lam) {
// shared memory
__shared__ int sdata[BLOCKSIZE];
sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid
sdata[1] = n_size * m_size; // samplesize
sdata[2] = gridDim.x * blockDim.x; // sample_offset
for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) {
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
losses[i] = scalar_t(0.f);
continue;
}
int n_idx = i / m_size;
int m_idx = i % m_size;
// compute max
scalar_t max_with_lb(-10000.f);
scalar_t max_no_lb(-10000.f);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > max_with_lb) max_with_lb = val;
if (j == lb) continue;
if (val > max_no_lb) max_no_lb = val;
}
// compute sum of exp
scalar_t sum_with_lb(0.);
scalar_t sum_no_lb(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sum_with_lb += exp(val - max_with_lb);
if (j == lb) continue;
sum_no_lb += exp(val - max_no_lb);
}
// compute loss
scalar_t loss_val(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (j == lb) {
loss_val += - (val - max_with_lb) + log(sum_with_lb);
} else {
loss_val += scalar_t(lam / 2.) * (exp(val - max_no_lb) / sum_no_lb - (scalar_t(1.) / (dimsize - 1))) * (val - max_no_lb - log(sum_no_lb));
}
}
losses[i] = loss_val;
}
}
template<typename scalar_t>
__global__ void SpatialLMarginLossBackward(const int n_size,
const int dimsize, const int m_size,
scalar_t *grad_logits,
const scalar_t *logits,
const int64_t *labels,
const int64_t ignore_index,
const float lam) {
// shared memory
__shared__ int sdata[BLOCKSIZE];
sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid
sdata[1] = n_size * m_size; // samplesize
sdata[2] = gridDim.x * blockDim.x; // sample_offset
const scalar_t one(1.);
for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) {
int lb = static_cast<int>(labels[i]);
int n_idx = i / m_size;
int m_idx = i % m_size;
if (lb == ignore_index) {
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
grad_logits[idx] = scalar_t(0.f);
}
continue;
}
// compute max
scalar_t max_with_lb(-10000.);
scalar_t max_no_lb(-10000.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > max_with_lb) max_with_lb = val;
if (j == lb) continue;
if (val > max_no_lb) max_no_lb = val;
}
// compute sum of exp
scalar_t sum_with_lb(0.);
scalar_t sum_no_lb(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sum_with_lb += exp(val - max_with_lb);
if (j == lb) continue;
sum_no_lb += exp(val - max_no_lb);
}
// compute sum of qx
scalar_t sum_qx(0.);
for (int j{0}; j < dimsize; ++j) {
if (j == lb) continue;
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sum_qx += val * exp(val - max_no_lb) / sum_no_lb;
}
// compute grads
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (lb == j) {
grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb - one;
} else {
grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb + scalar_t(lam / 2.) * ((val + one - sum_qx) * exp(val - max_no_lb) / sum_no_lb - (one / (dimsize - 1)));
}
}
}
}
// cuda forward and backward
at::Tensor large_margin_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float lam) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
// allocate memory and cuda grid/block
auto losses = torch::empty_like(labels, logits.options());
if (losses.numel() == 0) {
THCudaCheck(hipGetLastError());
return losses;
}
// call kernel
if (dimsize < 32 && samplesize > 4096) {
int gridx = ::max(::min(4096, samplesize / BLOCKSIZE), 1);
dim3 block(BLOCKSIZE);
dim3 grid(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] {
int shm_size = BLOCKSIZE * sizeof(scalar_t);
hipLaunchKernelGGL(( SpatialLMarginLossForward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
losses.contiguous().data_ptr<scalar_t>(),
ignore_index, lam
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = ::max(::min(BLOCKSIZE, blockx / 2), 32);
int blocky = ::max(::min(samplesize, BLOCKSIZE / blockx), 1);
int gridx = ::max(::min(4096, samplesize / blocky), 1);
int n_shm = (blockx + 8) * blocky;
dim3 block(blockx, blocky);
dim3 grid(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] {
int shm_size = n_shm * sizeof(scalar_t);
hipLaunchKernelGGL(( LMarginLossForward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
losses.contiguous().data_ptr<scalar_t>(),
ignore_index, lam
);
});
}
THCudaCheck(hipGetLastError());
return losses;
}
at::Tensor large_margin_backward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float lam) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
// allocate memory and cuda grid/block
auto grad_logits = torch::empty_like(logits);
if (grad_logits.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_logits;
}
if (dimsize < 32 && samplesize > 4096) {
int gridx = ::max(::min(4096, samplesize / BLOCKSIZE), 1);
dim3 block(BLOCKSIZE);
dim3 grid(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] {
int shm_size = BLOCKSIZE * sizeof(scalar_t);
hipLaunchKernelGGL(( SpatialLMarginLossBackward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
grad_logits.contiguous().data_ptr<scalar_t>(),
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
ignore_index, lam
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = ::max(::min(BLOCKSIZE, blockx / 2), 32);
int blocky = ::max(::min(samplesize, BLOCKSIZE / blockx), 1);
int gridx = ::max(::min(4096, samplesize / blocky), 1);
int n_shm = (blockx + 8) * blocky;
dim3 block(blockx, blocky);
dim3 grid(gridx);
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] {
int shm_size = n_shm * sizeof(scalar_t);
hipLaunchKernelGGL(( LMarginLossBackward<scalar_t>), dim3(grid), dim3(block), shm_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n_size, dimsize, m_size,
grad_logits.contiguous().data_ptr<scalar_t>(),
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
ignore_index, lam
);
});
}
THCudaCheck(hipGetLastError());
return grad_logits;
}
// python inferface
at::Tensor large_margin_forward(const at::Tensor &logits,
const at::Tensor &labels,
const float lam,
const int64_t ignore_index) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this large margin loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return large_margin_forward_cuda(logits, labels, ignore_index, lam);
}
at::Tensor large_margin_backward(const at::Tensor &logits,
const at::Tensor &labels,
const float lam,
const int64_t ignore_index) {
// TODO: try AT_ASSERTM
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this large margin loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return large_margin_backward_cuda(logits, labels, ignore_index, lam);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("l_margin_forward", &large_margin_forward, "large margin forward");
m.def("l_margin_backward", &large_margin_backward, "large margin backward");
}
| 842e2eeee7969ef6edd4cd22a8ccb1bdade09b18.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#include <iostream>
using std::cout;
using std::endl;
#define BLOCKSIZE 512
// TODO:
// at::numeric_limits<scalar_t>::lowest;
// implement like pytorch-softmax: two kernels: one is for inner size to be 1, and the other is for spatial. Besides, in the spatial kernel method, we should use threadIdx.x and threadIdx.y for dimsize and inner size parallelization
// define spatial kernel block like this:
/*
* inline dim3 SpatialSoftMax_getBlockSize(
* uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
* uint32_t inner_threads = inner_size;
const int max_threads = 1024;
* inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads));
* uint32_t dim_threads = 1;
* if (inner_threads <= 64 && dim_size >= 64) {
* while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
* dim_threads *= 2;
* dim_threads /= 2;
* }
* return dim3(dim_threads, inner_threads);
* }
* */
// consider max_active_blocks when assign grid blocks, the total number of blocks should not be greater than max_active_blocks which is multiProcessCount
namespace large_margin_space {
template<typename scalar_t>
__forceinline__ __device__ void reduce_max(scalar_t* sdata, int tid) {
__syncthreads();
for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) {
if (tid < s) {
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
}
template<typename scalar_t>
__forceinline__ __device__ void reduce_sum(scalar_t* sdata, int tid) {
__syncthreads();
for (unsigned int s{blockDim.x / 2}; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
}
template<typename scalar_t>
__forceinline__ __device__ void compute_reduce_values(
const scalar_t* logits, scalar_t* sdata,
const int dimsize, const int m_size,
int n_idx, int m_idx, int64_t lb, int tid) {
// b is max logits without target
// b+1 is max logits with target
// b+2 is sum of exp without target
// b+3 is sum of exp with target
// compute max with and without label index
const scalar_t zero(0.);
__syncthreads();
sdata[tid] = scalar_t(-10000.);
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
if (j == lb) continue;
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > sdata[tid]) sdata[tid] = val;
}
reduce_max(sdata, tid);
if (tid == 0) {
sdata[blockDim.x] = sdata[0];
sdata[blockDim.x + 1] = sdata[0];
int idx = n_idx * dimsize * m_size + lb * m_size + m_idx;
scalar_t val = logits[idx];
if (val > sdata[0]) sdata[blockDim.x + 1] = val;
}
__syncthreads();
// compute sum of exp with and without label index
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
if (j == lb) continue;
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sdata[tid] += exp(val - sdata[blockDim.x]);
}
reduce_sum<scalar_t>(sdata, tid);
if (tid == 0) sdata[blockDim.x + 2] = sdata[0];
__syncthreads();
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sdata[tid] += exp(val - sdata[blockDim.x + 1]);
}
reduce_sum<scalar_t>(sdata, tid);
if (tid == 0) sdata[blockDim.x + 3] = sdata[0];
}
template<typename scalar_t>
__forceinline__ __device__ void compute_sum_of_qx(
const scalar_t* logits, scalar_t* sdata,
const int dimsize, const int m_size,
int n_idx, int m_idx, int64_t lb, int tid) {
// compute sum of q * x to sdata[blockDim.x + 5]
const scalar_t zero(0.);
__syncthreads();
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j += blockDim.x) {
if (j == lb) continue;
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sdata[tid] += val * exp(val - sdata[blockDim.x]);
}
reduce_sum<scalar_t>(sdata, tid);
if (tid == 0) {
sdata[blockDim.x + 5] = sdata[0] / sdata[blockDim.x + 2];
}
}
}
// kernel function for forward and backward
template<typename scalar_t>
__global__ void LMarginLossForward(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
const int64_t *labels,
scalar_t *losses,
const int64_t ignore_index, const float lam) {
// shared memory
// b+4 is coeff of 1/(dimsize - 1)
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + (blockDim.x + 8) * threadIdx.y;
scalar_t zero(0.f);
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
int sample_offset = gridDim.x * blockDim.y;
if (tid == 0) {
sdata[blockDim.x + 4] = scalar_t(1.) / (dimsize - 1);
}
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
int64_t lb = labels[i];
if (lb == ignore_index) {
if (tid == 0) losses[i] = zero;
continue;
}
int n_idx = i / m_size;
int m_idx = i % m_size;
large_margin_space::compute_reduce_values<scalar_t>(logits, sdata,
dimsize, m_size, n_idx, m_idx, lb, tid);
sdata[tid] = zero;
__syncthreads();
for (int j{tid}; j < dimsize; j+=blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t dval = logits[idx];
scalar_t term(0);
if (j == lb) {
term = -(dval - sdata[blockDim.x + 1]);
term += log(sdata[blockDim.x + 3]);
} else {
dval -= sdata[blockDim.x];
term = exp(dval) / sdata[blockDim.x + 2];
term -= sdata[blockDim.x + 4];
term *= (dval - log(sdata[blockDim.x + 2]));
term *= scalar_t(lam / 2.f);
}
sdata[tid] += term;
}
large_margin_space::reduce_sum<scalar_t>(sdata, tid);
if (tid == 0) losses[i] = sdata[0];
}
}
template<typename scalar_t>
__global__ void LMarginLossBackward(const int n_size,
const int dimsize, const int m_size,
scalar_t *grad_logits,
const scalar_t *logits,
const int64_t *labels,
const int64_t ignore_index,
const float lam) {
extern __shared__ __align__(sizeof(scalar_t)) unsigned char sdata_raw[];
scalar_t *sdata = reinterpret_cast<scalar_t*>(sdata_raw);
sdata = sdata + (blockDim.x + 8) * threadIdx.y;
scalar_t zero(0.f);
int tid = threadIdx.x;
int sample_id = blockIdx.x * blockDim.y + threadIdx.y;
int sample_offset = gridDim.x * blockDim.y;
if (tid == 0) {
sdata[blockDim.x + 4] = 1. / (dimsize - 1);
}
int samplesize = n_size * m_size;
for (int i{sample_id}; i < samplesize; i += sample_offset) {
int64_t lb = labels[i];
int n_idx = i / m_size;
int m_idx = i % m_size;
if (lb == ignore_index) {
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
grad_logits[idx] = zero;
}
continue;
}
large_margin_space::compute_reduce_values<scalar_t>(logits, sdata,
dimsize, m_size, n_idx, m_idx, lb, tid);
large_margin_space::compute_sum_of_qx<scalar_t>(logits, sdata,
dimsize, m_size, n_idx, m_idx, lb, tid);
const scalar_t one(1.f);
for (int j{tid}; j < dimsize; j += blockDim.x) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
scalar_t pc = exp(val - sdata[blockDim.x + 1]) / sdata[blockDim.x + 3];
scalar_t gval;
if (j == lb) {
gval = pc - one;
} else {
gval = val - sdata[blockDim.x + 5] + one;
gval *= exp(val - sdata[blockDim.x]) / sdata[blockDim.x + 2];
gval = pc + (gval - sdata[blockDim.x + 4]) * scalar_t(lam / 2.);
}
grad_logits[idx] = gval;
}
}
}
template<typename scalar_t>
__global__ void SpatialLMarginLossForward(const int n_size,
const int dimsize, const int m_size,
const scalar_t *logits,
const int64_t *labels,
scalar_t *losses,
const int64_t ignore_index, const float lam) {
// shared memory
__shared__ int sdata[BLOCKSIZE];
sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid
sdata[1] = n_size * m_size; // samplesize
sdata[2] = gridDim.x * blockDim.x; // sample_offset
for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) {
int lb = static_cast<int>(labels[i]);
if (lb == ignore_index) {
losses[i] = scalar_t(0.f);
continue;
}
int n_idx = i / m_size;
int m_idx = i % m_size;
// compute max
scalar_t max_with_lb(-10000.f);
scalar_t max_no_lb(-10000.f);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > max_with_lb) max_with_lb = val;
if (j == lb) continue;
if (val > max_no_lb) max_no_lb = val;
}
// compute sum of exp
scalar_t sum_with_lb(0.);
scalar_t sum_no_lb(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sum_with_lb += exp(val - max_with_lb);
if (j == lb) continue;
sum_no_lb += exp(val - max_no_lb);
}
// compute loss
scalar_t loss_val(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (j == lb) {
loss_val += - (val - max_with_lb) + log(sum_with_lb);
} else {
loss_val += scalar_t(lam / 2.) * (exp(val - max_no_lb) / sum_no_lb - (scalar_t(1.) / (dimsize - 1))) * (val - max_no_lb - log(sum_no_lb));
}
}
losses[i] = loss_val;
}
}
template<typename scalar_t>
__global__ void SpatialLMarginLossBackward(const int n_size,
const int dimsize, const int m_size,
scalar_t *grad_logits,
const scalar_t *logits,
const int64_t *labels,
const int64_t ignore_index,
const float lam) {
// shared memory
__shared__ int sdata[BLOCKSIZE];
sdata[0] = blockIdx.x * blockDim.x + threadIdx.x; //tid
sdata[1] = n_size * m_size; // samplesize
sdata[2] = gridDim.x * blockDim.x; // sample_offset
const scalar_t one(1.);
for (int i{sdata[0]}; i < sdata[1]; i += sdata[2]) {
int lb = static_cast<int>(labels[i]);
int n_idx = i / m_size;
int m_idx = i % m_size;
if (lb == ignore_index) {
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
grad_logits[idx] = scalar_t(0.f);
}
continue;
}
// compute max
scalar_t max_with_lb(-10000.);
scalar_t max_no_lb(-10000.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (val > max_with_lb) max_with_lb = val;
if (j == lb) continue;
if (val > max_no_lb) max_no_lb = val;
}
// compute sum of exp
scalar_t sum_with_lb(0.);
scalar_t sum_no_lb(0.);
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sum_with_lb += exp(val - max_with_lb);
if (j == lb) continue;
sum_no_lb += exp(val - max_no_lb);
}
// compute sum of qx
scalar_t sum_qx(0.);
for (int j{0}; j < dimsize; ++j) {
if (j == lb) continue;
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
sum_qx += val * exp(val - max_no_lb) / sum_no_lb;
}
// compute grads
for (int j{0}; j < dimsize; ++j) {
int idx = n_idx * dimsize * m_size + j * m_size + m_idx;
scalar_t val = logits[idx];
if (lb == j) {
grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb - one;
} else {
grad_logits[idx] = exp(val - max_with_lb) / sum_with_lb + scalar_t(lam / 2.) * ((val + one - sum_qx) * exp(val - max_no_lb) / sum_no_lb - (one / (dimsize - 1)));
}
}
}
}
// cuda forward and backward
at::Tensor large_margin_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float lam) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
// allocate memory and cuda grid/block
auto losses = torch::empty_like(labels, logits.options());
if (losses.numel() == 0) {
THCudaCheck(cudaGetLastError());
return losses;
}
// call kernel
if (dimsize < 32 && samplesize > 4096) {
int gridx = std::max(std::min(4096, samplesize / BLOCKSIZE), 1);
dim3 block(BLOCKSIZE);
dim3 grid(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] {
int shm_size = BLOCKSIZE * sizeof(scalar_t);
SpatialLMarginLossForward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
losses.contiguous().data_ptr<scalar_t>(),
ignore_index, lam
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = std::max(std::min(BLOCKSIZE, blockx / 2), 32);
int blocky = std::max(std::min(samplesize, BLOCKSIZE / blockx), 1);
int gridx = std::max(std::min(4096, samplesize / blocky), 1);
int n_shm = (blockx + 8) * blocky;
dim3 block(blockx, blocky);
dim3 grid(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(losses.scalar_type(), "large margin forward", [&] {
int shm_size = n_shm * sizeof(scalar_t);
LMarginLossForward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
losses.contiguous().data_ptr<scalar_t>(),
ignore_index, lam
);
});
}
THCudaCheck(cudaGetLastError());
return losses;
}
at::Tensor large_margin_backward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const int64_t ignore_index,
const float lam) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
const int n_size = logits.size(0);
const int dimsize = logits.size(1);
const int m_size = logits.numel() / (n_size * dimsize);
const int samplesize = labels.numel();
// allocate memory and cuda grid/block
auto grad_logits = torch::empty_like(logits);
if (grad_logits.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_logits;
}
if (dimsize < 32 && samplesize > 4096) {
int gridx = std::max(std::min(4096, samplesize / BLOCKSIZE), 1);
dim3 block(BLOCKSIZE);
dim3 grid(gridx);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] {
int shm_size = BLOCKSIZE * sizeof(scalar_t);
SpatialLMarginLossBackward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
grad_logits.contiguous().data_ptr<scalar_t>(),
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
ignore_index, lam
);
});
} else {
int blockx = 32;
while (blockx < dimsize) blockx *= 2;
blockx = std::max(std::min(BLOCKSIZE, blockx / 2), 32);
int blocky = std::max(std::min(samplesize, BLOCKSIZE / blockx), 1);
int gridx = std::max(std::min(4096, samplesize / blocky), 1);
int n_shm = (blockx + 8) * blocky;
dim3 block(blockx, blocky);
dim3 grid(gridx);
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_logits.scalar_type(), "large margin backwrd", [&] {
int shm_size = n_shm * sizeof(scalar_t);
LMarginLossBackward<scalar_t><<<grid, block, shm_size, at::cuda::getCurrentCUDAStream()>>>(
n_size, dimsize, m_size,
grad_logits.contiguous().data_ptr<scalar_t>(),
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<int64_t>(),
ignore_index, lam
);
});
}
THCudaCheck(cudaGetLastError());
return grad_logits;
}
// python inferface
at::Tensor large_margin_forward(const at::Tensor &logits,
const at::Tensor &labels,
const float lam,
const int64_t ignore_index) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this large margin loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return large_margin_forward_cuda(logits, labels, ignore_index, lam);
}
at::Tensor large_margin_backward(const at::Tensor &logits,
const at::Tensor &labels,
const float lam,
const int64_t ignore_index) {
// TODO: try AT_ASSERTM
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this large margin loss only supports gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return large_margin_backward_cuda(logits, labels, ignore_index, lam);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("l_margin_forward", &large_margin_forward, "large margin forward");
m.def("l_margin_backward", &large_margin_backward, "large margin backward");
}
|
6f13e1aff40820fc3fe9234f165f9d87ffb8c0a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
__global__ void filter(uint8_t* d_image, uint8_t* d_image_result, double *d_filter_kernel, int h, int w)
{
int globalidx = threadIdx.x + blockDim.x * blockIdx.x;
int size = h * w * 3;
int x = globalidx / 3 / w;
int y = globalidx / 3 - x * w;
int s = globalidx - y * 3 - x * w * 3;
int ind = x * w * 3 + y * 3 + s;
uint8_t for_sorting[9] = {0,0,0,0,0,0,0,0,0};
if (globalidx < size) {
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
if (x + i - 1 >= 0 && x + i - 1 < h && y + j - 1 >= 0 && y + j - 1 < w)
for_sorting[i * 3 + j] = d_filter_kernel[i * 3 + j] * d_image[(x + i - 1) * w * 3 + (y + j - 1) * 3 + s];
}
}
thrust::sort(thrust::device, for_sorting, for_sorting + 9);
d_image_result[ind] = for_sorting[4];
}
}
int main(int argc, char **argv)
{
double *h_filter_kernel = (double *) calloc(sizeof(double), 9);
double *d_filter_kernel;
uint8_t* d_image;
uint8_t* d_image_result;
for (int i = 0; i < 9; ++i) {
h_filter_kernel[i] = 1;
}
h_filter_kernel[4] = 0.;
hipMalloc(&d_filter_kernel, sizeof(double) * 9);
hipMemcpy(d_filter_kernel, h_filter_kernel, sizeof(double) * 9, hipMemcpyHostToDevice);
int width, height, bpp;
uint8_t* h_image = stbi_load("image.png", &width, &height, &bpp, 3);
int size = height * width * 3;
hipMalloc(&d_image, sizeof(uint8_t) * size);
hipMalloc(&d_image_result, sizeof(uint8_t) * size);
uint8_t* h_image_result = (uint8_t *)malloc(sizeof(uint8_t) * size);
hipMemcpy(d_image, h_image, sizeof(uint8_t) * size, hipMemcpyHostToDevice);
dim3 dimBlock(1024);
dim3 dimGrid(size/1024);
hipLaunchKernelGGL(( filter), dim3(dimGrid), dim3(dimBlock), 0, 0, d_image, d_image_result, d_filter_kernel, height, width);
stbi_image_free(h_image);
hipMemcpy(h_image_result, d_image_result, sizeof(uint8_t) * size, hipMemcpyDeviceToHost);
stbi_write_png("image_result.png", width, height, 3, h_image_result, width * 3);
return 0;
}
| 6f13e1aff40820fc3fe9234f165f9d87ffb8c0a5.cu | #include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
__global__ void filter(uint8_t* d_image, uint8_t* d_image_result, double *d_filter_kernel, int h, int w)
{
int globalidx = threadIdx.x + blockDim.x * blockIdx.x;
int size = h * w * 3;
int x = globalidx / 3 / w;
int y = globalidx / 3 - x * w;
int s = globalidx - y * 3 - x * w * 3;
int ind = x * w * 3 + y * 3 + s;
uint8_t for_sorting[9] = {0,0,0,0,0,0,0,0,0};
if (globalidx < size) {
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
if (x + i - 1 >= 0 && x + i - 1 < h && y + j - 1 >= 0 && y + j - 1 < w)
for_sorting[i * 3 + j] = d_filter_kernel[i * 3 + j] * d_image[(x + i - 1) * w * 3 + (y + j - 1) * 3 + s];
}
}
thrust::sort(thrust::device, for_sorting, for_sorting + 9);
d_image_result[ind] = for_sorting[4];
}
}
int main(int argc, char **argv)
{
double *h_filter_kernel = (double *) calloc(sizeof(double), 9);
double *d_filter_kernel;
uint8_t* d_image;
uint8_t* d_image_result;
for (int i = 0; i < 9; ++i) {
h_filter_kernel[i] = 1;
}
h_filter_kernel[4] = 0.;
cudaMalloc(&d_filter_kernel, sizeof(double) * 9);
cudaMemcpy(d_filter_kernel, h_filter_kernel, sizeof(double) * 9, cudaMemcpyHostToDevice);
int width, height, bpp;
uint8_t* h_image = stbi_load("image.png", &width, &height, &bpp, 3);
int size = height * width * 3;
cudaMalloc(&d_image, sizeof(uint8_t) * size);
cudaMalloc(&d_image_result, sizeof(uint8_t) * size);
uint8_t* h_image_result = (uint8_t *)malloc(sizeof(uint8_t) * size);
cudaMemcpy(d_image, h_image, sizeof(uint8_t) * size, cudaMemcpyHostToDevice);
dim3 dimBlock(1024);
dim3 dimGrid(size/1024);
filter<<<dimGrid, dimBlock>>>(d_image, d_image_result, d_filter_kernel, height, width);
stbi_image_free(h_image);
cudaMemcpy(h_image_result, d_image_result, sizeof(uint8_t) * size, cudaMemcpyDeviceToHost);
stbi_write_png("image_result.png", width, height, 3, h_image_result, width * 3);
return 0;
}
|
2ebfd6516f56c47d4036cf9662aeaa863531c06a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implementes the math functions for CPU.
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/system/hip/detail/par.h>
#include <thrust/version.h>
#include "caffe2/utils/math.h"
#include "caffe2/core/context_gpu.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
// TODO(Yangqing): Yuck again. Maybe change it to templated functors?
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), \
N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Exp, exp)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Log, log)
__device__ float cuda_sqrf(const float x) { return x * x; }
__device__ double cuda_sqr(const double x) { return x * x; }
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sqr, cuda_sqr)
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(a[i], b[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), \
N, a, b, y); \
}
#define CAFFE_MATH_CUDA_ADD(x, y) (x + y)
#define CAFFE_MATH_CUDA_SUB(x, y) (x - y)
#define CAFFE_MATH_CUDA_MUL(x, y) (x * y)
#define CAFFE_MATH_CUDA_DIV(x, y) (x / y)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, CAFFE_MATH_CUDA_ADD)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Add, CAFFE_MATH_CUDA_ADD)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, CAFFE_MATH_CUDA_SUB)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Sub, CAFFE_MATH_CUDA_SUB)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, CAFFE_MATH_CUDA_MUL)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Mul, CAFFE_MATH_CUDA_MUL)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, CAFFE_MATH_CUDA_DIV)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Div, CAFFE_MATH_CUDA_DIV)
/*
#define CAFFE2_SPECIALIZED_ROWWISEMAX(T) \
template <> \
void RowwiseMax<T, CPUContext>( \
const int N, const int D, const T* x, T* y, CPUContext* context) { \
for (int i = 0; i < N; ++i) { \
y[i] = x[i*D]; \
for (int j = 1; j < D; ++j) { \
y[i] = ::max(y[i], x[i * D + j]); \
} \
} \
}
CAFFE2_SPECIALIZED_ROWWISEMAX(float)
#define CAFFE2_SPECIALIZED_COLWISEMAX(T) \
template <> \
void ColwiseMax<T, CPUContext>( \
const int N, const int D, const T* x, T* y, CPUContext* context) { \
memcpy(y, x, sizeof(T) * D); \
for (int i = 1; i < N; ++i) { \
for (int j = 0; j < D; ++j) { \
y[j] = ::max(y[j], x[i * D + j]); \
} \
} \
}
CAFFE2_SPECIALIZED_COLWISEMAX(float)
*/
namespace {
template<typename T>
__global__ void AddToRowKernel(const int M, const int N, const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
y[i] += x[i % N];
}
}
template<typename T>
__global__ void AddToColKernel(const int M, const int N, const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
y[i] += x[i % M];
}
}
} // namespace
template <>
void AddToRow<float, CUDAContext>(
const int M, const int N, const float* x, float* y, CUDAContext* context) {
hipLaunchKernelGGL(( AddToRowKernel<float>), dim3(CAFFE_GET_BLOCKS(M * N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), M, N, x, y);
}
template <>
void AddToCol<float, CUDAContext>(
const int M, const int N, const float* x, float* y, CUDAContext* context) {
hipLaunchKernelGGL(( AddToColKernel<float>), dim3(CAFFE_GET_BLOCKS(M * N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), M, N, x, y);
}
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K, const float alpha, const float* A,
const float* B, const float beta, float* C, CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(context->cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha,
const float* A, const float* x, const float beta, float* y,
CUDAContext* context) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(context->cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>(const int N, const T alpha, T *Y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( SetKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(int);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void UniformShift(const int N, const T min, const T max,
T* x) {
T scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = x[i] * scale + min;
}
}
__global__ void UniformIntFit(const int N, const int min, const int max,
unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const int n, const float min, const float max, float* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const int n, const double min, const double max, double* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const int n, const int min, const int max, int* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerate(context->curand_generator(),
reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <>
void RandGaussian<float, CUDAContext>(
const int n, const float mean, const float std, float* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerateNormal(
context->curand_generator(), r, n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const int n, const double mean, const double std, double* r,
CUDAContext* context) {
CURAND_CHECK(hiprandGenerateNormalDouble(
context->curand_generator(), r, n, mean, std));
}
template<>
void Dot<float, CUDAContext>(
const int n, const float* a, const float* b, float* y,
CUDAContext* context) {
float result;
CUBLAS_CHECK(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template<>
void Dot<double, CUDAContext>(
const int n, const double* a, const double* b, double* y,
CUDAContext* context) {
double result;
CUBLAS_CHECK(hipblasDdot(context->cublas_handle(), n, a, 1, b, 1, y));
context->Copy<double, CPUContext, CUDAContext>(1, &result, y);
}
#ifdef THRUST_SUPPORTS_PER_THREAD
#define CAFFE2_MATH_SUM_FUNC(T) \
template<> \
void Sum<T, CUDAContext>(const int N, const T* x, T* y, CUDAContext* context) {\
thrust::device_ptr<const T> dev_ptr(x); \
T result = thrust::reduce( \
thrust::hip::par.on(context->cuda_stream()), \
dev_ptr, dev_ptr + N, static_cast<T>(0), thrust::plus<T>()); \
context->Copy<T, CPUContext, CUDAContext>(1, &result, y); \
}
#else // THRUST_SUPPORTS_PER_THREAD
// Really, for any real use you should not be invoking this but should use the
// thrust version, so I was not very careful in tuning the performance of the
// sum kernel.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y) {
const int idx = threadIdx.x;
__shared__ T reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A two-level reduction to get the sum.
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += X[i];
}
__syncthreads();
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < SUM_KERNEL_NTHREADS; ++i) {
tmp += reduction_buffer[i];
}
*Y = tmp;
}
}
#define CAFFE2_MATH_SUM_FUNC(T) \
template<> \
void Sum<T, CUDAContext>(const int N, const T* x, T* y, CUDAContext* context) {\
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), N, x, y); \
}
#endif // THRUST_SUPPORTS_PER_THREAD
CAFFE2_MATH_SUM_FUNC(float)
CAFFE2_MATH_SUM_FUNC(double)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
__global__ void SelectKernel(
const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N, const int D, const float* x, const int* idx, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(
const int n, const T alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * alpha;
}
}
template <typename T>
__global__ void ScaleKernelDeviceAlpha(
const int n, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
} // namespace
template <>
void Scale<float, CUDAContext>(
const int n, const float alpha, const float *x, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<double, CUDAContext>(
const int n, const double alpha, const double *x, double* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<double>),
dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n, const float* alpha, const float *x, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float>),
dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
n, alpha, x, y);
}
template <>
void Scale<double, CUDAContext>(
const int n, const double* alpha, const double *x, double* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(const int N, const float alpha, const float* X,
float* Y, CUDAContext* context) {
CUBLAS_CHECK(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N, const double alpha, const double* X,
double* Y, CUDAContext* context) {
CUBLAS_CHECK(hipblasDaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const T* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] += x[index] * (*a);
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n, const float* alpha, const float* X,
float* Y, CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, X, Y);
}
template <>
void Axpy<double, CUDAContext>(
const int n, const double* alpha, const double* X,
double* Y, CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void AxpbyKernel(const int n, const T a, const T* x,
const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n, const float a, const float* x, const float b, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpbyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, a, x, b, y);
}
template <>
void Axpby<double, CUDAContext>(
const int n, const double a, const double* x, const double b, double* y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpbyKernel<double>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
__global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int width_col, const int channels,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int channel_in = index % channels;
int w_out = index / channels % width_col;
int h_out = index / channels / width_col;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* local_data_col = data_col +
((h_out * width_col) + w_out) * channels * kernel_h * kernel_w
+ channel_in;
for (int i = 0; i < kernel_h; ++i) {
int h = h_in + i;
for (int j = 0; j < kernel_w; ++j) {
int w = w_in + j;
*local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[(h * width + w) * channels + channel_in] : 0;
local_data_col += channels;
}
}
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col,
const int height, const int width,
const int patch_h, const int patch_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int w = index % width + pad_l;
int h = (index / width) % height + pad_t;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col,
const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int c = index % channels;
int w = index / channels % width + pad_l;
int h = index / channels / width + pad_t;
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int channels_col = patch_h * patch_w * channels;
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int c_col = ((h - h_col * stride_h) * patch_w + w - w_col * stride_w) * channels + c;
val += data_col[(h_col * width_col + w_col) * channels_col + c_col];
}
}
*/
// Equivalent of above
int offset = (h * patch_w + w) * channels + c;
int coeff_h_col = width_col * channels_col - stride_h * patch_w * channels;
int coeff_w_col = channels_col - stride_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
} // namespace
template <>
void Im2col<float, CUDAContext, StorageOrder::NCHW>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_t,
pad_l, stride_h, stride_w, height_col, width_col, data_col);
}
template <>
void Im2col<float, CUDAContext, StorageOrder::NHWC>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
// We are going to launch height_col * width_col * channels kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = height_col * width_col * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_t,
pad_l, stride_h, stride_w, width_col, channels, data_col);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_col, height, width, kernel_h, kernel_w,
pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_im);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NHWC>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = height * width * channels;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_col, width, channels, kernel_h, kernel_w,
pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im);
}
namespace {
template <typename T>
__global__ void CopyMatrixKernel(
const int M, const int N, const T* A, const int lda,
T* B, const int ldb) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
int r = i / N;
int c = i % N;
B[r * ldb + c] = A[r * lda + c];
}
}
} // namespace
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize, const int M, const int N, const void* A,
const int lda, void* B, const int ldb, CUDAContext* context) {
hipMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M,
hipMemcpyDeviceToDevice, context->cuda_stream());
}
} // namespace math
} // namespace caffe2
| 2ebfd6516f56c47d4036cf9662aeaa863531c06a.cu | // Implementes the math functions for CPU.
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/system/cuda/detail/par.h>
#include <thrust/version.h>
#include "caffe2/utils/math.h"
#include "caffe2/core/context_gpu.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
// TODO(Yangqing): Yuck again. Maybe change it to templated functors?
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, \
CUDAContext* context) { \
_Kernel_##T##_##Funcname<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Exp, exp)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Log, log)
__device__ float cuda_sqrf(const float x) { return x * x; }
__device__ double cuda_sqr(const double x) { return x * x; }
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sqr, cuda_sqr)
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(a[i], b[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, \
CUDAContext* context) { \
_Kernel_##T##_##Funcname<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
N, a, b, y); \
}
#define CAFFE_MATH_CUDA_ADD(x, y) (x + y)
#define CAFFE_MATH_CUDA_SUB(x, y) (x - y)
#define CAFFE_MATH_CUDA_MUL(x, y) (x * y)
#define CAFFE_MATH_CUDA_DIV(x, y) (x / y)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, CAFFE_MATH_CUDA_ADD)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Add, CAFFE_MATH_CUDA_ADD)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, CAFFE_MATH_CUDA_SUB)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Sub, CAFFE_MATH_CUDA_SUB)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, CAFFE_MATH_CUDA_MUL)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Mul, CAFFE_MATH_CUDA_MUL)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, CAFFE_MATH_CUDA_DIV)
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Div, CAFFE_MATH_CUDA_DIV)
/*
#define CAFFE2_SPECIALIZED_ROWWISEMAX(T) \
template <> \
void RowwiseMax<T, CPUContext>( \
const int N, const int D, const T* x, T* y, CPUContext* context) { \
for (int i = 0; i < N; ++i) { \
y[i] = x[i*D]; \
for (int j = 1; j < D; ++j) { \
y[i] = std::max(y[i], x[i * D + j]); \
} \
} \
}
CAFFE2_SPECIALIZED_ROWWISEMAX(float)
#define CAFFE2_SPECIALIZED_COLWISEMAX(T) \
template <> \
void ColwiseMax<T, CPUContext>( \
const int N, const int D, const T* x, T* y, CPUContext* context) { \
memcpy(y, x, sizeof(T) * D); \
for (int i = 1; i < N; ++i) { \
for (int j = 0; j < D; ++j) { \
y[j] = std::max(y[j], x[i * D + j]); \
} \
} \
}
CAFFE2_SPECIALIZED_COLWISEMAX(float)
*/
namespace {
template<typename T>
__global__ void AddToRowKernel(const int M, const int N, const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
y[i] += x[i % N];
}
}
template<typename T>
__global__ void AddToColKernel(const int M, const int N, const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
y[i] += x[i % M];
}
}
} // namespace
template <>
void AddToRow<float, CUDAContext>(
const int M, const int N, const float* x, float* y, CUDAContext* context) {
AddToRowKernel<float><<<CAFFE_GET_BLOCKS(M * N), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(M, N, x, y);
}
template <>
void AddToCol<float, CUDAContext>(
const int M, const int N, const float* x, float* y, CUDAContext* context) {
AddToColKernel<float><<<CAFFE_GET_BLOCKS(M * N), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(M, N, x, y);
}
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K, const float alpha, const float* A,
const float* B, const float beta, float* C, CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(context->cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha,
const float* A, const float* x, const float beta, float* y,
CUDAContext* context) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(context->cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>(const int N, const T alpha, T *Y, \
CUDAContext* context) { \
SetKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>(N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(int);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void UniformShift(const int N, const T min, const T max,
T* x) {
T scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = x[i] * scale + min;
}
}
__global__ void UniformIntFit(const int N, const int min, const int max,
unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const int n, const float min, const float max, float* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const int n, const double min, const double max, double* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const int n, const int min, const int max, int* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerate(context->curand_generator(),
reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <>
void RandGaussian<float, CUDAContext>(
const int n, const float mean, const float std, float* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerateNormal(
context->curand_generator(), r, n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const int n, const double mean, const double std, double* r,
CUDAContext* context) {
CURAND_CHECK(curandGenerateNormalDouble(
context->curand_generator(), r, n, mean, std));
}
template<>
void Dot<float, CUDAContext>(
const int n, const float* a, const float* b, float* y,
CUDAContext* context) {
float result;
CUBLAS_CHECK(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template<>
void Dot<double, CUDAContext>(
const int n, const double* a, const double* b, double* y,
CUDAContext* context) {
double result;
CUBLAS_CHECK(cublasDdot(context->cublas_handle(), n, a, 1, b, 1, y));
context->Copy<double, CPUContext, CUDAContext>(1, &result, y);
}
#ifdef THRUST_SUPPORTS_PER_THREAD
#define CAFFE2_MATH_SUM_FUNC(T) \
template<> \
void Sum<T, CUDAContext>(const int N, const T* x, T* y, CUDAContext* context) {\
thrust::device_ptr<const T> dev_ptr(x); \
T result = thrust::reduce( \
thrust::cuda::par.on(context->cuda_stream()), \
dev_ptr, dev_ptr + N, static_cast<T>(0), thrust::plus<T>()); \
context->Copy<T, CPUContext, CUDAContext>(1, &result, y); \
}
#else // THRUST_SUPPORTS_PER_THREAD
// Really, for any real use you should not be invoking this but should use the
// thrust version, so I was not very careful in tuning the performance of the
// sum kernel.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y) {
const int idx = threadIdx.x;
__shared__ T reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A two-level reduction to get the sum.
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += X[i];
}
__syncthreads();
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < SUM_KERNEL_NTHREADS; ++i) {
tmp += reduction_buffer[i];
}
*Y = tmp;
}
}
#define CAFFE2_MATH_SUM_FUNC(T) \
template<> \
void Sum<T, CUDAContext>(const int N, const T* x, T* y, CUDAContext* context) {\
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(N, x, y); \
}
#endif // THRUST_SUPPORTS_PER_THREAD
CAFFE2_MATH_SUM_FUNC(float)
CAFFE2_MATH_SUM_FUNC(double)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
__global__ void SelectKernel(
const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N, const int D, const float* x, const int* idx, float* y,
CUDAContext* context) {
SelectKernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(
const int n, const T alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * alpha;
}
}
template <typename T>
__global__ void ScaleKernelDeviceAlpha(
const int n, const T* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
} // namespace
template <>
void Scale<float, CUDAContext>(
const int n, const float alpha, const float *x, float* y,
CUDAContext* context) {
ScaleKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<double, CUDAContext>(
const int n, const double alpha, const double *x, double* y,
CUDAContext* context) {
ScaleKernel<double><<<
CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n, const float* alpha, const float *x, float* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float><<<
CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
n, alpha, x, y);
}
template <>
void Scale<double, CUDAContext>(
const int n, const double* alpha, const double *x, double* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(const int N, const float alpha, const float* X,
float* Y, CUDAContext* context) {
CUBLAS_CHECK(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N, const double alpha, const double* X,
double* Y, CUDAContext* context) {
CUBLAS_CHECK(cublasDaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const T* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] += x[index] * (*a);
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n, const float* alpha, const float* X,
float* Y, CUDAContext* context) {
AxpyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, X, Y);
}
template <>
void Axpy<double, CUDAContext>(
const int n, const double* alpha, const double* X,
double* Y, CUDAContext* context) {
AxpyKernel<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void AxpbyKernel(const int n, const T a, const T* x,
const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n, const float a, const float* x, const float b, float* y,
CUDAContext* context) {
AxpbyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, a, x, b, y);
}
template <>
void Axpby<double, CUDAContext>(
const int n, const double a, const double* x, const double b, double* y,
CUDAContext* context) {
AxpbyKernel<double><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
__global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int width_col, const int channels,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int channel_in = index % channels;
int w_out = index / channels % width_col;
int h_out = index / channels / width_col;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* local_data_col = data_col +
((h_out * width_col) + w_out) * channels * kernel_h * kernel_w
+ channel_in;
for (int i = 0; i < kernel_h; ++i) {
int h = h_in + i;
for (int j = 0; j < kernel_w; ++j) {
int w = w_in + j;
*local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[(h * width + w) * channels + channel_in] : 0;
local_data_col += channels;
}
}
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col,
const int height, const int width,
const int patch_h, const int patch_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int w = index % width + pad_l;
int h = (index / width) % height + pad_t;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int offset =
(c * patch_h * patch_w + h * patch_w + w) * height_col * width_col;
int coeff_h_col = (1 - stride_h * patch_w * height_col) * width_col;
int coeff_w_col = (1 - stride_w * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col,
const int width, const int channels,
const int patch_h, const int patch_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int c = index % channels;
int w = index / channels % width + pad_l;
int h = index / channels / width + pad_t;
// compute the start and end of the output
int w_col_start = (w < patch_w) ? 0 : (w - patch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < patch_h) ? 0 : (h - patch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int channels_col = patch_h * patch_w * channels;
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int c_col = ((h - h_col * stride_h) * patch_w + w - w_col * stride_w) * channels + c;
val += data_col[(h_col * width_col + w_col) * channels_col + c_col];
}
}
*/
// Equivalent of above
int offset = (h * patch_w + w) * channels + c;
int coeff_h_col = width_col * channels_col - stride_h * patch_w * channels;
int coeff_w_col = channels_col - stride_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
} // namespace
template <>
void Im2col<float, CUDAContext, StorageOrder::NCHW>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_t,
pad_l, stride_h, stride_w, height_col, width_col, data_col);
}
template <>
void Im2col<float, CUDAContext, StorageOrder::NHWC>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
// We are going to launch height_col * width_col * channels kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = height_col * width_col * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_t,
pad_l, stride_h, stride_w, width_col, channels, data_col);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_col, height, width, kernel_h, kernel_w,
pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_im);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NHWC>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
int height_col = (height + pad_t + pad_b - kernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - kernel_w) / stride_w + 1;
int num_kernels = height * width * channels;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_col, width, channels, kernel_h, kernel_w,
pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im);
}
namespace {
template <typename T>
__global__ void CopyMatrixKernel(
const int M, const int N, const T* A, const int lda,
T* B, const int ldb) {
CUDA_1D_KERNEL_LOOP(i, M * N) {
int r = i / N;
int c = i % N;
B[r * ldb + c] = A[r * lda + c];
}
}
} // namespace
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize, const int M, const int N, const void* A,
const int lda, void* B, const int ldb, CUDAContext* context) {
cudaMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M,
cudaMemcpyDeviceToDevice, context->cuda_stream());
}
} // namespace math
} // namespace caffe2
|
a9673593431dd5b9c4c2c9482d9666d65ada4c73.hip | // !!! This is a file automatically generated by hipify!!!
/*
skeleton code for assignment3 COMP4901D
Hash Join
xjia@ust.hk 2015/04/15
*/
#include <iostream>
#include <cstdio>
#include <cmath>
#include <cassert>
#include <memory>
#include <limits>
#include <algorithm>
#include <vector>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
using namespace std;
const int numBits = 6;
const int totalBits = 19;
const int numPart = 1 << numBits; // = 2^6
const int numPerPart = 1 << (totalBits - numBits); // = 2^(19-6)
const int mask = (1 << numBits) - 1;
const int numThreads = 128;
const int numBlocks = 512;
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
/*
return the partition ID of the input element
*/
__device__
int getPartID(int element)
{
element >>= (totalBits - numBits);
return element & mask;
}
/*
input: d_key[], array size N
output: d_pixArray[]
funciton: for input array d_key[] with size N, return the partition ID array d_pixArray[]
*/
__global__
void mapPart(int d_key[],int d_pidArray[],int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNumber = blockDim.x * gridDim.x;
while(tid < N)
{
d_pidArray[tid] = getPartID(d_key[tid]);
tid += threadNumber;
}
}
/*
input: d_pidArray[], array size N
output: d_Hist[]
function: calculate the histogram d_Hist[] based on the partition ID array d_pidArray[]
*/
__global__
void count_Hist(int d_Hist[],int d_pidArray[],int N)
{
__shared__ int s_Hist[numThreads * numPart];
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int threadNumber = blockDim.x * gridDim.x;
int offset = threadIdx.x * numPart;
for(int i = 0; i < numPart; ++i)
s_Hist[i + offset] = 0;
for(int i = threadId; i < N; i += threadNumber)
s_Hist[offset + d_pidArray[i]]++;
for(int i = 0; i < numPart; ++i)
d_Hist[i * threadNumber + threadId] = s_Hist[offset + i];
__syncthreads();
}
/*
input: d_pidArray[] (partition ID array), d_psSum[] (prefix sum of histogram), array size N
output: d_loc[] (location array)
function: for each element, calculate its corresponding location in the result array based on its partition ID and prefix sum of histogram
*/
__global__
void write_Hist(int d_pidArray[],int d_psSum[],int d_loc[],int N)
{
__shared__ int s_psSum[numThreads * numPart];
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = gridDim.x * blockDim.x;
int offset = threadIdx.x * numPart;
for(int i = 0; i < numPart; ++i)
s_psSum[i + offset] = d_psSum[threadId + i * threadNumber];
for(int i = threadId; i < N; i += threadNumber)
{
int pid = d_pidArray[i];
d_loc[i] = s_psSum[pid + offset];
s_psSum[pid + offset]++;
}
}
/*
input: d_psSum[] (prefix sum of histogram), array size N
output: start position of each partition
function: for each partition (chunck to be loaded in the join step), calculate its start position in the result array (the first element's position of this partition)
*/
__global__
void getStartPos(int d_psSum[],int d_startPos[],int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = gridDim.x * blockDim.x;
if(tid >= numPart)
return;
d_startPos[tid] = d_psSum[tid * threadNumber];
}
/*
input: d_key[],d_value[],d_loc[],array size []
output: out_key[],out_value[]
function: rewrite the (key,value) pair to its corresponding position based on location array d_loc[]
*/
__global__
void scatter(int d_key[],float d_value[],int out_key[],float out_value[],int d_loc[],int N)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = blockDim.x * gridDim.x;
while(threadId < N)
{
out_key[d_loc[threadId]] = d_key[threadId];
out_value[d_loc[threadId]] = d_value[threadId];
threadId += threadNumber;
}
}
/*
function: split the (key,value) array with size N, record the start position of each partition at the same time
*/
void split(int *d_key,float *d_value,int *d_startPos,int N)
{
dim3 grid(numBlocks);
dim3 block(numThreads);
/*if(N<numThreads){
grid=1;
block=N;
}else{
grid=(N+numThreads-1)/numThreads;
block=numThreads;
}*/
int num_threads=grid.x * block.x;
int hist_len = num_threads * numPart;
int *d_pidArr, *d_Hist, *d_psSum, *d_loc, *d_outkey;
float *d_outvalue;
hipMalloc(&d_outkey, sizeof(int)*N);
cudaCheckError();
hipMalloc(&d_outvalue, sizeof(float)*N);
cudaCheckError();
hipMalloc(&d_loc,sizeof(int)*N);
cudaCheckError();
hipMalloc(&d_pidArr, sizeof(int)*N);
cudaCheckError();
hipMalloc(&d_Hist, sizeof(int)*hist_len);
cudaCheckError();
hipMalloc(&d_psSum, sizeof(int)*hist_len);
cudaCheckError();
hipLaunchKernelGGL(( mapPart), dim3(grid),dim3(block), 0, 0, d_key, d_pidArr, N);
cudaCheckError();
hipLaunchKernelGGL(( count_Hist), dim3(grid),dim3(block), 0, 0, d_Hist, d_pidArr, N);
cudaCheckError();
thrust::device_ptr<int> dev_Hist(d_Hist);
thrust::device_ptr<int> dev_psSum(d_psSum);
thrust::exclusive_scan(dev_Hist, dev_Hist + hist_len, dev_psSum);
cudaCheckError();
hipLaunchKernelGGL(( getStartPos), dim3(grid),dim3(block), 0, 0, d_psSum, d_startPos, N);
cudaCheckError();
hipLaunchKernelGGL(( write_Hist), dim3(grid),dim3(block), 0, 0, d_pidArr, d_psSum, d_loc, N);
cudaCheckError();
hipLaunchKernelGGL(( scatter), dim3(grid),dim3(block), 0, 0, d_key, d_value, d_outkey, d_outvalue, d_loc, N);
cudaCheckError();
hipMemcpy(d_key, d_outkey, sizeof(int)*N, hipMemcpyDeviceToDevice);
cudaCheckError();
hipMemcpy(d_value, d_outvalue, sizeof(float)*N, hipMemcpyDeviceToDevice);
cudaCheckError();
hipFree(d_psSum);
cudaCheckError();
hipFree(d_Hist);
cudaCheckError();
hipFree(d_pidArr);
cudaCheckError();
hipFree(d_loc);
cudaCheckError();
hipFree(d_outvalue);
cudaCheckError();
hipFree(d_outkey);
cudaCheckError();
/* add your code here */
}
/*
function: perform hash join on two (key,value) arrays
*/
__global__
void join(int d_key1[],float d_value1[],int d_key2[],float d_value2[],int d_startPos1[],int d_startPos2[],int d_result[],int N1,int N2)
{
__shared__ int inner[numPerPart];
int b_offset = threadIdx.x;
int b_size = blockDim.x;
//load B to inner shared
int start1 = d_startPos1[blockIdx.x];
int start2 = d_startPos2[blockIdx.x];
int end1, end2;
if(blockIdx.x == blockDim.x - 1){
end1 = N1;
end2 = N2;
}
else{
end1 = d_startPos1[blockIdx.x + 1];
end2 = d_startPos2[blockIdx.x + 1];
}
for(int i=start2+b_offset;i<end2;i+=b_size){
inner[i-start2] = d_key2[i];
}
__syncthreads();
for(int i=start1+b_offset;i<end1;i+=b_size){
d_result[i] = -1;
for(int j=0;j<end2-start2;++j){
if(d_key1[i] == inner[j]){
d_result[i] = start2 + j;
}
}
}
}
/*void check_arr(int* arr, int N){
int lower = std::numeric_limits<int>::min();
std::for_each(arr, arr+N, [&](int& val){
if(val < lower){
fprintf(stderr, "array not sorted! @ %td\n", &val - arr);
exit(-1);
}
else{
lower = val;
}
});
}*/
void print_arr(int* arr, int* loc, int N){
fprintf(stderr, "arr:\n");
//check_arr(arr, N);
for(int i=0;i<numPart;++i){
int start=loc[i], end;
if(i==numPart-1){
end = N;
}else{
end = loc[i+1];
}
fprintf(stderr, "from %d to %d: ", start, end);
for(int j=start;j!=end;++j){
fprintf(stderr, "%08x ", arr[j]);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "loc:\n");
for(int i=0;i<numPart;++i){
fprintf(stderr, "%d ", loc[i]);
}
fprintf(stderr, "\n");
}
void hashJoin(int *d_key1,float *d_value1,int *d_key2,float *d_value2,int N1,int N2,int *d_result)
{
int *d_startPos1,*d_startPos2;
hipMalloc(&d_startPos1,sizeof(int) * numPart);
cudaCheckError();
hipMalloc(&d_startPos2,sizeof(int) * numPart);
cudaCheckError();
split(d_key1,d_value1,d_startPos1,N1);
//std::vector<int> arr1_finish(N1);
//std::vector<int> arr1_loc(numPart);
//hipMemcpy(&arr1_loc.front(), d_startPos1, sizeof(int)*numPart, hipMemcpyDeviceToHost);
//cudaCheckError();
//hipMemcpy(&arr1_finish.front(), d_key1, sizeof(int)*N1, hipMemcpyDeviceToHost);
//cudaCheckError();
//fprintf(stderr, "arr1: ");
//print_arr(&arr1_finish.front(), &arr1_loc.front(), N1);
split(d_key2,d_value2,d_startPos2,N2);
//std::vector<int> arr2_finish(N2);
//std::vector<int> arr2_loc(numPart);
//hipMemcpy(&arr2_loc.front(), d_startPos2, sizeof(int)*numPart, hipMemcpyDeviceToHost);
//cudaCheckError();
//hipMemcpy(&arr2_finish.front(), d_key2, sizeof(int)*N2, hipMemcpyDeviceToHost);
//cudaCheckError();
//fprintf(stderr, "arr2: ");
//print_arr(&arr2_finish.front(), &arr2_loc.front(), N2);
dim3 grid(numPart);
dim3 block(1024);
hipLaunchKernelGGL(( join), dim3(grid),dim3(block), 0, 0, d_key1,d_value1,d_key2,d_value2,d_startPos1,d_startPos2,d_result,N1,N2);
}
int main()
{
freopen("in.txt","r",stdin);
int *h_key1, *h_key2, *d_key1, *d_key2;
float *h_value1, *h_value2, *d_value1, *d_value2;
int *h_result, *d_result;
int N1,N2;
{
int tmp = scanf("%d%d",&N1,&N2);
(void)tmp;
assert(tmp==2);
}
h_key1 = (int*)malloc(N1 * sizeof(int));
h_key2 = (int*)malloc(N2 * sizeof(int));
h_value1 = (float*)malloc(N1 * sizeof(float));
h_value2 = (float*)malloc(N2 * sizeof(float));
h_result = (int*)malloc(N1 * sizeof(int));
hipMalloc(&d_key1, N1 * sizeof(int));
cudaCheckError();
hipMalloc(&d_key2, N2 * sizeof(int));
cudaCheckError();
hipMalloc(&d_value1, N1 * sizeof(float));
cudaCheckError();
hipMalloc(&d_value2, N2 * sizeof(float));
cudaCheckError();
hipMalloc(&d_result, N1 * sizeof(int));
cudaCheckError();
for(int i = 0; i < N1; ++i){
int tmp = scanf("%d%f",&h_key1[i],&h_value1[i]);
(void)tmp;
assert(tmp==2);
}
for(int i = 0; i < N2; ++i){
int tmp = scanf("%d%f",&h_key2[i],&h_value2[i]);
(void)tmp;
assert(tmp==2);
}
memset(h_result,-1,sizeof(int) * N1);
hipMemcpy(d_key1,h_key1, sizeof(int) * N1, hipMemcpyHostToDevice);
cudaCheckError();
hipMemcpy(d_result,h_result, sizeof(int) * N1, hipMemcpyHostToDevice);
cudaCheckError();
hipMemcpy(d_key2,h_key2, sizeof(int) * N2, hipMemcpyHostToDevice);
cudaCheckError();
hipMemcpy(d_value1,h_value1, sizeof(float) * N1, hipMemcpyHostToDevice);
cudaCheckError();
hipMemcpy(d_value2,h_value2, sizeof(float) * N2, hipMemcpyHostToDevice);
cudaCheckError();
hashJoin(d_key1,d_value1,d_key2,d_value2,N1,N2,d_result);
cudaCheckError();
hipMemcpy(h_result,d_result,sizeof(int) * N1, hipMemcpyDeviceToHost);
cudaCheckError();
hipMemcpy(h_key1,d_key1,sizeof(int) * N1, hipMemcpyDeviceToHost);
cudaCheckError();
hipMemcpy(h_key2,d_key2,sizeof(int) * N2, hipMemcpyDeviceToHost);
cudaCheckError();
hipMemcpy(h_value1,d_value1,sizeof(float) * N1, hipMemcpyDeviceToHost);
cudaCheckError();
hipMemcpy(h_value2,d_value2,sizeof(float) * N2, hipMemcpyDeviceToHost);
cudaCheckError();
int matched = 0;
freopen("out.txt","w",stdout);
for(int i = 0;i < N1; ++i)
{
if(h_result[i] == -1)
continue;
matched++;
printf("Key %d\nValue1 %.2f Value2 %.2f\n\n",h_key1[i],h_value1[i],h_value2[h_result[i]]);
}
printf("Matched %d\n",matched);
fclose(stdout);
freopen("/dev/tty","w",stdout);
free(h_key1);
free(h_key2);
free(h_value1);
free(h_value2);
free(h_result);
hipFree(d_key1);
cudaCheckError();
hipFree(d_key2);
cudaCheckError();
hipFree(d_value1);
cudaCheckError();
hipFree(d_value2);
cudaCheckError();
hipFree(d_result);
cudaCheckError();
hipDeviceReset();
cudaCheckError();
return 0;
}
| a9673593431dd5b9c4c2c9482d9666d65ada4c73.cu | /*
skeleton code for assignment3 COMP4901D
Hash Join
xjia@ust.hk 2015/04/15
*/
#include <iostream>
#include <cstdio>
#include <cmath>
#include <cassert>
#include <memory>
#include <limits>
#include <algorithm>
#include <vector>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
using namespace std;
const int numBits = 6;
const int totalBits = 19;
const int numPart = 1 << numBits; // = 2^6
const int numPerPart = 1 << (totalBits - numBits); // = 2^(19-6)
const int mask = (1 << numBits) - 1;
const int numThreads = 128;
const int numBlocks = 512;
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
/*
return the partition ID of the input element
*/
__device__
int getPartID(int element)
{
element >>= (totalBits - numBits);
return element & mask;
}
/*
input: d_key[], array size N
output: d_pixArray[]
funciton: for input array d_key[] with size N, return the partition ID array d_pixArray[]
*/
__global__
void mapPart(int d_key[],int d_pidArray[],int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNumber = blockDim.x * gridDim.x;
while(tid < N)
{
d_pidArray[tid] = getPartID(d_key[tid]);
tid += threadNumber;
}
}
/*
input: d_pidArray[], array size N
output: d_Hist[]
function: calculate the histogram d_Hist[] based on the partition ID array d_pidArray[]
*/
__global__
void count_Hist(int d_Hist[],int d_pidArray[],int N)
{
__shared__ int s_Hist[numThreads * numPart];
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
int threadNumber = blockDim.x * gridDim.x;
int offset = threadIdx.x * numPart;
for(int i = 0; i < numPart; ++i)
s_Hist[i + offset] = 0;
for(int i = threadId; i < N; i += threadNumber)
s_Hist[offset + d_pidArray[i]]++;
for(int i = 0; i < numPart; ++i)
d_Hist[i * threadNumber + threadId] = s_Hist[offset + i];
__syncthreads();
}
/*
input: d_pidArray[] (partition ID array), d_psSum[] (prefix sum of histogram), array size N
output: d_loc[] (location array)
function: for each element, calculate its corresponding location in the result array based on its partition ID and prefix sum of histogram
*/
__global__
void write_Hist(int d_pidArray[],int d_psSum[],int d_loc[],int N)
{
__shared__ int s_psSum[numThreads * numPart];
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = gridDim.x * blockDim.x;
int offset = threadIdx.x * numPart;
for(int i = 0; i < numPart; ++i)
s_psSum[i + offset] = d_psSum[threadId + i * threadNumber];
for(int i = threadId; i < N; i += threadNumber)
{
int pid = d_pidArray[i];
d_loc[i] = s_psSum[pid + offset];
s_psSum[pid + offset]++;
}
}
/*
input: d_psSum[] (prefix sum of histogram), array size N
output: start position of each partition
function: for each partition (chunck to be loaded in the join step), calculate its start position in the result array (the first element's position of this partition)
*/
__global__
void getStartPos(int d_psSum[],int d_startPos[],int N)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = gridDim.x * blockDim.x;
if(tid >= numPart)
return;
d_startPos[tid] = d_psSum[tid * threadNumber];
}
/*
input: d_key[],d_value[],d_loc[],array size []
output: out_key[],out_value[]
function: rewrite the (key,value) pair to its corresponding position based on location array d_loc[]
*/
__global__
void scatter(int d_key[],float d_value[],int out_key[],float out_value[],int d_loc[],int N)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
int threadNumber = blockDim.x * gridDim.x;
while(threadId < N)
{
out_key[d_loc[threadId]] = d_key[threadId];
out_value[d_loc[threadId]] = d_value[threadId];
threadId += threadNumber;
}
}
/*
function: split the (key,value) array with size N, record the start position of each partition at the same time
*/
void split(int *d_key,float *d_value,int *d_startPos,int N)
{
dim3 grid(numBlocks);
dim3 block(numThreads);
/*if(N<numThreads){
grid=1;
block=N;
}else{
grid=(N+numThreads-1)/numThreads;
block=numThreads;
}*/
int num_threads=grid.x * block.x;
int hist_len = num_threads * numPart;
int *d_pidArr, *d_Hist, *d_psSum, *d_loc, *d_outkey;
float *d_outvalue;
cudaMalloc(&d_outkey, sizeof(int)*N);
cudaCheckError();
cudaMalloc(&d_outvalue, sizeof(float)*N);
cudaCheckError();
cudaMalloc(&d_loc,sizeof(int)*N);
cudaCheckError();
cudaMalloc(&d_pidArr, sizeof(int)*N);
cudaCheckError();
cudaMalloc(&d_Hist, sizeof(int)*hist_len);
cudaCheckError();
cudaMalloc(&d_psSum, sizeof(int)*hist_len);
cudaCheckError();
mapPart<<<grid,block>>>(d_key, d_pidArr, N);
cudaCheckError();
count_Hist<<<grid,block>>>(d_Hist, d_pidArr, N);
cudaCheckError();
thrust::device_ptr<int> dev_Hist(d_Hist);
thrust::device_ptr<int> dev_psSum(d_psSum);
thrust::exclusive_scan(dev_Hist, dev_Hist + hist_len, dev_psSum);
cudaCheckError();
getStartPos<<<grid,block>>>(d_psSum, d_startPos, N);
cudaCheckError();
write_Hist<<<grid,block>>>(d_pidArr, d_psSum, d_loc, N);
cudaCheckError();
scatter<<<grid,block>>>(d_key, d_value, d_outkey, d_outvalue, d_loc, N);
cudaCheckError();
cudaMemcpy(d_key, d_outkey, sizeof(int)*N, cudaMemcpyDeviceToDevice);
cudaCheckError();
cudaMemcpy(d_value, d_outvalue, sizeof(float)*N, cudaMemcpyDeviceToDevice);
cudaCheckError();
cudaFree(d_psSum);
cudaCheckError();
cudaFree(d_Hist);
cudaCheckError();
cudaFree(d_pidArr);
cudaCheckError();
cudaFree(d_loc);
cudaCheckError();
cudaFree(d_outvalue);
cudaCheckError();
cudaFree(d_outkey);
cudaCheckError();
/* add your code here */
}
/*
function: perform hash join on two (key,value) arrays
*/
__global__
void join(int d_key1[],float d_value1[],int d_key2[],float d_value2[],int d_startPos1[],int d_startPos2[],int d_result[],int N1,int N2)
{
__shared__ int inner[numPerPart];
int b_offset = threadIdx.x;
int b_size = blockDim.x;
//load B to inner shared
int start1 = d_startPos1[blockIdx.x];
int start2 = d_startPos2[blockIdx.x];
int end1, end2;
if(blockIdx.x == blockDim.x - 1){
end1 = N1;
end2 = N2;
}
else{
end1 = d_startPos1[blockIdx.x + 1];
end2 = d_startPos2[blockIdx.x + 1];
}
for(int i=start2+b_offset;i<end2;i+=b_size){
inner[i-start2] = d_key2[i];
}
__syncthreads();
for(int i=start1+b_offset;i<end1;i+=b_size){
d_result[i] = -1;
for(int j=0;j<end2-start2;++j){
if(d_key1[i] == inner[j]){
d_result[i] = start2 + j;
}
}
}
}
/*void check_arr(int* arr, int N){
int lower = std::numeric_limits<int>::min();
std::for_each(arr, arr+N, [&](int& val){
if(val < lower){
fprintf(stderr, "array not sorted! @ %td\n", &val - arr);
exit(-1);
}
else{
lower = val;
}
});
}*/
void print_arr(int* arr, int* loc, int N){
fprintf(stderr, "arr:\n");
//check_arr(arr, N);
for(int i=0;i<numPart;++i){
int start=loc[i], end;
if(i==numPart-1){
end = N;
}else{
end = loc[i+1];
}
fprintf(stderr, "from %d to %d: ", start, end);
for(int j=start;j!=end;++j){
fprintf(stderr, "%08x ", arr[j]);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "loc:\n");
for(int i=0;i<numPart;++i){
fprintf(stderr, "%d ", loc[i]);
}
fprintf(stderr, "\n");
}
void hashJoin(int *d_key1,float *d_value1,int *d_key2,float *d_value2,int N1,int N2,int *d_result)
{
int *d_startPos1,*d_startPos2;
cudaMalloc(&d_startPos1,sizeof(int) * numPart);
cudaCheckError();
cudaMalloc(&d_startPos2,sizeof(int) * numPart);
cudaCheckError();
split(d_key1,d_value1,d_startPos1,N1);
//std::vector<int> arr1_finish(N1);
//std::vector<int> arr1_loc(numPart);
//cudaMemcpy(&arr1_loc.front(), d_startPos1, sizeof(int)*numPart, cudaMemcpyDeviceToHost);
//cudaCheckError();
//cudaMemcpy(&arr1_finish.front(), d_key1, sizeof(int)*N1, cudaMemcpyDeviceToHost);
//cudaCheckError();
//fprintf(stderr, "arr1: ");
//print_arr(&arr1_finish.front(), &arr1_loc.front(), N1);
split(d_key2,d_value2,d_startPos2,N2);
//std::vector<int> arr2_finish(N2);
//std::vector<int> arr2_loc(numPart);
//cudaMemcpy(&arr2_loc.front(), d_startPos2, sizeof(int)*numPart, cudaMemcpyDeviceToHost);
//cudaCheckError();
//cudaMemcpy(&arr2_finish.front(), d_key2, sizeof(int)*N2, cudaMemcpyDeviceToHost);
//cudaCheckError();
//fprintf(stderr, "arr2: ");
//print_arr(&arr2_finish.front(), &arr2_loc.front(), N2);
dim3 grid(numPart);
dim3 block(1024);
join<<<grid,block>>>(d_key1,d_value1,d_key2,d_value2,d_startPos1,d_startPos2,d_result,N1,N2);
}
int main()
{
freopen("in.txt","r",stdin);
int *h_key1, *h_key2, *d_key1, *d_key2;
float *h_value1, *h_value2, *d_value1, *d_value2;
int *h_result, *d_result;
int N1,N2;
{
int tmp = scanf("%d%d",&N1,&N2);
(void)tmp;
assert(tmp==2);
}
h_key1 = (int*)malloc(N1 * sizeof(int));
h_key2 = (int*)malloc(N2 * sizeof(int));
h_value1 = (float*)malloc(N1 * sizeof(float));
h_value2 = (float*)malloc(N2 * sizeof(float));
h_result = (int*)malloc(N1 * sizeof(int));
cudaMalloc(&d_key1, N1 * sizeof(int));
cudaCheckError();
cudaMalloc(&d_key2, N2 * sizeof(int));
cudaCheckError();
cudaMalloc(&d_value1, N1 * sizeof(float));
cudaCheckError();
cudaMalloc(&d_value2, N2 * sizeof(float));
cudaCheckError();
cudaMalloc(&d_result, N1 * sizeof(int));
cudaCheckError();
for(int i = 0; i < N1; ++i){
int tmp = scanf("%d%f",&h_key1[i],&h_value1[i]);
(void)tmp;
assert(tmp==2);
}
for(int i = 0; i < N2; ++i){
int tmp = scanf("%d%f",&h_key2[i],&h_value2[i]);
(void)tmp;
assert(tmp==2);
}
memset(h_result,-1,sizeof(int) * N1);
cudaMemcpy(d_key1,h_key1, sizeof(int) * N1, cudaMemcpyHostToDevice);
cudaCheckError();
cudaMemcpy(d_result,h_result, sizeof(int) * N1, cudaMemcpyHostToDevice);
cudaCheckError();
cudaMemcpy(d_key2,h_key2, sizeof(int) * N2, cudaMemcpyHostToDevice);
cudaCheckError();
cudaMemcpy(d_value1,h_value1, sizeof(float) * N1, cudaMemcpyHostToDevice);
cudaCheckError();
cudaMemcpy(d_value2,h_value2, sizeof(float) * N2, cudaMemcpyHostToDevice);
cudaCheckError();
hashJoin(d_key1,d_value1,d_key2,d_value2,N1,N2,d_result);
cudaCheckError();
cudaMemcpy(h_result,d_result,sizeof(int) * N1, cudaMemcpyDeviceToHost);
cudaCheckError();
cudaMemcpy(h_key1,d_key1,sizeof(int) * N1, cudaMemcpyDeviceToHost);
cudaCheckError();
cudaMemcpy(h_key2,d_key2,sizeof(int) * N2, cudaMemcpyDeviceToHost);
cudaCheckError();
cudaMemcpy(h_value1,d_value1,sizeof(float) * N1, cudaMemcpyDeviceToHost);
cudaCheckError();
cudaMemcpy(h_value2,d_value2,sizeof(float) * N2, cudaMemcpyDeviceToHost);
cudaCheckError();
int matched = 0;
freopen("out.txt","w",stdout);
for(int i = 0;i < N1; ++i)
{
if(h_result[i] == -1)
continue;
matched++;
printf("Key %d\nValue1 %.2f Value2 %.2f\n\n",h_key1[i],h_value1[i],h_value2[h_result[i]]);
}
printf("Matched %d\n",matched);
fclose(stdout);
freopen("/dev/tty","w",stdout);
free(h_key1);
free(h_key2);
free(h_value1);
free(h_value2);
free(h_result);
cudaFree(d_key1);
cudaCheckError();
cudaFree(d_key2);
cudaCheckError();
cudaFree(d_value1);
cudaCheckError();
cudaFree(d_value2);
cudaCheckError();
cudaFree(d_result);
cudaCheckError();
cudaDeviceReset();
cudaCheckError();
return 0;
}
|
abce5745c6bd0b0d0fbec56ab834fb2be90e3c80.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#define MAX_BINS 4096
//Comparing the multi and single threaded histograms
bool compare(unsigned int *one, unsigned int *two, int size)
{
for(int p = 0; p<size; p++)
{
if (one[p] != two[p])
{
return false;
}
}
return true;
}
//Printing the datas if the required parameter is set to 1
void Data(unsigned int *data, unsigned int dataSize)
{
printf("Data generated : ");
for (int a = 0; a < dataSize; a++)
{
printf("%d", data[a]);
if (a == dataSize - 1)
{
printf("]\n");
}
if (a != dataSize - 1)
{
printf("-");
}
}
}
__global__
static void histogram(unsigned int *input, unsigned int *histo, unsigned int dataSize, unsigned int binSize)
{
int th = blockIdx.x * blockDim.x + threadIdx.x;
//Using extern shared and setting its size when calling the kernel
extern __shared__ int local_histogram[];
//init histo
for (int y = threadIdx.x; y < binSize; y += blockDim.x)
{
local_histogram[y] = 0;
}
__syncthreads();
//Filling the results on the histogram
for (int i = th; i < dataSize; i += blockDim.x * gridDim.x)
{
//Atomic add is used as 2 datas can have the same number, to not have issues if they add 1 at the same time
atomicAdd(&local_histogram[input[i]], 1);
}
__syncthreads();
//add / blocks
for (int z = threadIdx.x; z < binSize; z += blockDim.x)
{
atomicAdd(&histo[z], local_histogram[z]);
}
}
//Printing the results
void result(unsigned int *res, int threadNb, unsigned int Size )
{
printf("Result for %d threads: [", threadNb);
for (int i = 0; i < Size; i++)
{
printf("%d", res[i]);
if (i != Size - 1)
{
printf("|");
}
}
}
//Cleaning the histogram by putting 0's in it
__global__ static void cleanHisto(unsigned int *histo, unsigned int binSize)
{
for (int i = threadIdx.x; i < binSize; i += blockDim.x)
{
histo[i] = 0;
}
__syncthreads();
}
void wrapper(unsigned int dataSize, unsigned int binSize, int display, int threadNb, int blockCount)
{
unsigned int *histo = NULL;
unsigned int *histo_single = NULL;
unsigned int *d_histo = NULL;
unsigned int *data = NULL;
unsigned int *d_data = NULL;
hipEvent_t start;
hipEvent_t start_single;
hipEvent_t stop;
hipEvent_t stop_single;
// Defining the structures
data = (unsigned int *)malloc(dataSize * sizeof(unsigned int));
histo = (unsigned int *)malloc(binSize * sizeof(unsigned int));
histo_single = (unsigned int *)malloc(binSize * sizeof(unsigned int));
// Generate data set on the host
printf("Generation of data sets randomly.\n");
srand(time(NULL));
srand(time(NULL));
for (int i = 0; i < dataSize; i++){
data[i] = rand() % binSize;
}
printf("Done\n");
// Print the input if it was asked while lauching the program
if (display == 1)
{
Data(data, dataSize);
}
// Allocating memory
checkCudaErrors(hipMalloc((void **)&d_histo, sizeof(unsigned int) * binSize));
checkCudaErrors(hipMalloc((void **)&d_data, sizeof(unsigned int) * dataSize));
// Copy the data to the device
checkCudaErrors(hipMemcpy(d_data, data, sizeof(unsigned int) * dataSize, hipMemcpyHostToDevice));
// Record the start event
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, NULL));
// Launch the kernel
hipLaunchKernelGGL(( histogram), dim3(blockCount), dim3(threadNb),sizeof(unsigned int) * binSize, 0, d_data, d_histo, dataSize, binSize);
hipDeviceSynchronize();
// Fetch the result from device to host into histo
printf("End of the kernel, fetching the results :\n");
checkCudaErrors(hipMemcpy(histo, d_histo, sizeof(unsigned int) * binSize, hipMemcpyDeviceToHost));
// Record the stop event and wait for the stop event to complete
checkCudaErrors(hipEventRecord(stop, NULL));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventCreate(&start_single));
checkCudaErrors(hipEventCreate(&stop_single));
checkCudaErrors(hipEventRecord(start_single, NULL));
// Clean the first histogram as I re-use it afterwards
hipLaunchKernelGGL(( cleanHisto), dim3(1), dim3(threadNb), 0, 0, d_histo, binSize);
hipDeviceSynchronize();
// Launch the kernel on a single thread
hipLaunchKernelGGL(( histogram), dim3(1), dim3(1),sizeof(unsigned int) * binSize, 0, d_data, d_histo, dataSize, binSize);
hipDeviceSynchronize();
// Fetch the result of the last kernel onto the host
checkCudaErrors(hipMemcpy(histo_single, d_histo, sizeof(unsigned int) * binSize, hipMemcpyDeviceToHost));
checkCudaErrors(hipEventRecord(stop_single, NULL));
checkCudaErrors(hipEventSynchronize(stop_single));
float msecTotal = 0.0f;
float msecTotal_single = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(hipEventElapsedTime(&msecTotal_single, start_single, stop_single));
double gigaFlops = (dataSize * 1.0e-9f) / (msecTotal / 1000.0f);
double gigaFlops_single = (dataSize * 1.0e-9f) / (msecTotal_single / 1000.0f);
// Print the histograms if the parameter
if (display == 1)
{
result(histo, threadNb, binSize);
result(histo_single, 1, binSize);
}
// Compare the results of the two histograms
if (compare(histo, histo_single, binSize))
{
printf("All good ! Histograms match\n");
}
else
{
printf("Wrong ! Histograms don't match\n");
}
// Print performances
printf("%d threads :\nCuda processing time = %.3fms, \n Perf = %.3f Gflops\n",threadNb, msecTotal, gigaFlops);
printf("1 thread :\nCuda processing time = %.3fms, \n Perf = %.3f Gflops\n", msecTotal_single, gigaFlops_single);
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFree(d_histo));
free(histo);
free(histo_single);
free(data);
}
int main(int argc, char **argv)
{
int print = 0;
unsigned int binSize = MAX_BINS;
unsigned long long ds = 256;
char *dataSize = NULL;
hipDeviceProp_t cudaprop;
// retrieve device
int dev = findCudaDevice(argc, (const char **)argv);
hipGetDeviceProperties(&cudaprop, dev);
//Retrieving parameters
if (checkCmdLineFlag(argc, (const char **)argv, "size"))
{
getCmdLineArgumentString(argc, (const char **)argv, "size", &dataSize);
ds = atoll(dataSize);
}
if (checkCmdLineFlag(argc, (const char **)argv, "displayData"))
{
print = 1;
}
printf("Data Size is: %d \n", ds);
//Max is 2^32 as asked
if (ds >= 4294967296 || ds == 0) {
printf("Error: Data size > 4,294,967,296");
exit(EXIT_FAILURE);
}
//Defining the number of threads to follow the need (ds) with max value 256 (multiple of 32) and < 1024
int nbThread = min((int)ds, 256);
printf("nb thread: %d \n", nbThread);
//Defining the number of blocks to follow the need (if ds = 500 only 2 blocks) with max value a multiple of 30
int nbBlock = min(((int)ds/256),18000);
//if the data size is below 256 we still have to have atleast 1 block
if (nbBlock == 0) nbBlock = 1;
printf("nbblock: %d \n", nbBlock);
wrapper(ds, binSize, print, nbThread, nbBlock);
return EXIT_SUCCESS;
}
| abce5745c6bd0b0d0fbec56ab834fb2be90e3c80.cu | #include <stdio.h>
#include <stdint.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#define MAX_BINS 4096
//Comparing the multi and single threaded histograms
bool compare(unsigned int *one, unsigned int *two, int size)
{
for(int p = 0; p<size; p++)
{
if (one[p] != two[p])
{
return false;
}
}
return true;
}
//Printing the datas if the required parameter is set to 1
void Data(unsigned int *data, unsigned int dataSize)
{
printf("Data generated : ");
for (int a = 0; a < dataSize; a++)
{
printf("%d", data[a]);
if (a == dataSize - 1)
{
printf("]\n");
}
if (a != dataSize - 1)
{
printf("-");
}
}
}
__global__
static void histogram(unsigned int *input, unsigned int *histo, unsigned int dataSize, unsigned int binSize)
{
int th = blockIdx.x * blockDim.x + threadIdx.x;
//Using extern shared and setting its size when calling the kernel
extern __shared__ int local_histogram[];
//init histo
for (int y = threadIdx.x; y < binSize; y += blockDim.x)
{
local_histogram[y] = 0;
}
__syncthreads();
//Filling the results on the histogram
for (int i = th; i < dataSize; i += blockDim.x * gridDim.x)
{
//Atomic add is used as 2 datas can have the same number, to not have issues if they add 1 at the same time
atomicAdd(&local_histogram[input[i]], 1);
}
__syncthreads();
//add / blocks
for (int z = threadIdx.x; z < binSize; z += blockDim.x)
{
atomicAdd(&histo[z], local_histogram[z]);
}
}
//Printing the results
void result(unsigned int *res, int threadNb, unsigned int Size )
{
printf("Result for %d threads: [", threadNb);
for (int i = 0; i < Size; i++)
{
printf("%d", res[i]);
if (i != Size - 1)
{
printf("|");
}
}
}
//Cleaning the histogram by putting 0's in it
__global__ static void cleanHisto(unsigned int *histo, unsigned int binSize)
{
for (int i = threadIdx.x; i < binSize; i += blockDim.x)
{
histo[i] = 0;
}
__syncthreads();
}
void wrapper(unsigned int dataSize, unsigned int binSize, int display, int threadNb, int blockCount)
{
unsigned int *histo = NULL;
unsigned int *histo_single = NULL;
unsigned int *d_histo = NULL;
unsigned int *data = NULL;
unsigned int *d_data = NULL;
cudaEvent_t start;
cudaEvent_t start_single;
cudaEvent_t stop;
cudaEvent_t stop_single;
// Defining the structures
data = (unsigned int *)malloc(dataSize * sizeof(unsigned int));
histo = (unsigned int *)malloc(binSize * sizeof(unsigned int));
histo_single = (unsigned int *)malloc(binSize * sizeof(unsigned int));
// Generate data set on the host
printf("Generation of data sets randomly.\n");
srand(time(NULL));
srand(time(NULL));
for (int i = 0; i < dataSize; i++){
data[i] = rand() % binSize;
}
printf("Done\n");
// Print the input if it was asked while lauching the program
if (display == 1)
{
Data(data, dataSize);
}
// Allocating memory
checkCudaErrors(cudaMalloc((void **)&d_histo, sizeof(unsigned int) * binSize));
checkCudaErrors(cudaMalloc((void **)&d_data, sizeof(unsigned int) * dataSize));
// Copy the data to the device
checkCudaErrors(cudaMemcpy(d_data, data, sizeof(unsigned int) * dataSize, cudaMemcpyHostToDevice));
// Record the start event
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, NULL));
// Launch the kernel
histogram<<<blockCount, threadNb,sizeof(unsigned int) * binSize>>>(d_data, d_histo, dataSize, binSize);
cudaDeviceSynchronize();
// Fetch the result from device to host into histo
printf("End of the kernel, fetching the results :\n");
checkCudaErrors(cudaMemcpy(histo, d_histo, sizeof(unsigned int) * binSize, cudaMemcpyDeviceToHost));
// Record the stop event and wait for the stop event to complete
checkCudaErrors(cudaEventRecord(stop, NULL));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventCreate(&start_single));
checkCudaErrors(cudaEventCreate(&stop_single));
checkCudaErrors(cudaEventRecord(start_single, NULL));
// Clean the first histogram as I re-use it afterwards
cleanHisto<<<1, threadNb>>>(d_histo, binSize);
cudaDeviceSynchronize();
// Launch the kernel on a single thread
histogram<<<1, 1,sizeof(unsigned int) * binSize>>>(d_data, d_histo, dataSize, binSize);
cudaDeviceSynchronize();
// Fetch the result of the last kernel onto the host
checkCudaErrors(cudaMemcpy(histo_single, d_histo, sizeof(unsigned int) * binSize, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaEventRecord(stop_single, NULL));
checkCudaErrors(cudaEventSynchronize(stop_single));
float msecTotal = 0.0f;
float msecTotal_single = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
checkCudaErrors(cudaEventElapsedTime(&msecTotal_single, start_single, stop_single));
double gigaFlops = (dataSize * 1.0e-9f) / (msecTotal / 1000.0f);
double gigaFlops_single = (dataSize * 1.0e-9f) / (msecTotal_single / 1000.0f);
// Print the histograms if the parameter
if (display == 1)
{
result(histo, threadNb, binSize);
result(histo_single, 1, binSize);
}
// Compare the results of the two histograms
if (compare(histo, histo_single, binSize))
{
printf("All good ! Histograms match\n");
}
else
{
printf("Wrong ! Histograms don't match\n");
}
// Print performances
printf("%d threads :\nCuda processing time = %.3fms, \n Perf = %.3f Gflops\n",threadNb, msecTotal, gigaFlops);
printf("1 thread :\nCuda processing time = %.3fms, \n Perf = %.3f Gflops\n", msecTotal_single, gigaFlops_single);
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFree(d_histo));
free(histo);
free(histo_single);
free(data);
}
int main(int argc, char **argv)
{
int print = 0;
unsigned int binSize = MAX_BINS;
unsigned long long ds = 256;
char *dataSize = NULL;
cudaDeviceProp cudaprop;
// retrieve device
int dev = findCudaDevice(argc, (const char **)argv);
cudaGetDeviceProperties(&cudaprop, dev);
//Retrieving parameters
if (checkCmdLineFlag(argc, (const char **)argv, "size"))
{
getCmdLineArgumentString(argc, (const char **)argv, "size", &dataSize);
ds = atoll(dataSize);
}
if (checkCmdLineFlag(argc, (const char **)argv, "displayData"))
{
print = 1;
}
printf("Data Size is: %d \n", ds);
//Max is 2^32 as asked
if (ds >= 4294967296 || ds == 0) {
printf("Error: Data size > 4,294,967,296");
exit(EXIT_FAILURE);
}
//Defining the number of threads to follow the need (ds) with max value 256 (multiple of 32) and < 1024
int nbThread = min((int)ds, 256);
printf("nb thread: %d \n", nbThread);
//Defining the number of blocks to follow the need (if ds = 500 only 2 blocks) with max value a multiple of 30
int nbBlock = min(((int)ds/256),18000);
//if the data size is below 256 we still have to have atleast 1 block
if (nbBlock == 0) nbBlock = 1;
printf("nbblock: %d \n", nbBlock);
wrapper(ds, binSize, print, nbThread, nbBlock);
return EXIT_SUCCESS;
}
|
6b2f408d0ca764ea8ed3a743a46a133a7c3b5f78.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/time/timer.h"
#include "saiga/cuda/cusparseHelper.h"
namespace Saiga {
namespace CUDA {
void testCuSparse(){
// 0 3 0 0 0
// 22 0 0 0 17
// 7 5 0 1 0
// 0 0 0 0 0
// 0 0 14 0 8
//in column major
std::vector<double> denseMatrix = {
0, 22, 7 , 0, 0,
3, 0, 5, 0, 0,
0, 0, 0, 0, 14,
0, 0, 1, 0, 0,
0, 17, 0, 0, 8
};
std::vector<double> denseVector = {
1,2,3,4,5
};
//result of the matrix vector product
std::vector<double> ytarget = {
6,107,21,0,82
};
std::vector<double> values = {
22,7,3,5,14,1,17,8
};
std::vector<int> rowIndx = {
1,2,0,2,4,2,1,4
};
std::vector<int> colPtr = {
0,2,4,5,6,8
};
thrust::device_vector<double> d_values = values;
thrust::device_vector<int> d_rowIndx = rowIndx;
thrust::device_vector<int> d_colPtr = colPtr;
thrust::device_vector<double> d_x = denseVector;
thrust::device_vector<double> d_y(denseVector.size(),0);
hipsparseMatDescr_t mat;
hipsparseCreateMatDescr(&mat);
double alpha = 1;
const double beta = 2;
hipsparseDcsrmv(cusparseHandle,HIPSPARSE_OPERATION_TRANSPOSE,5,5,values.size(),&alpha,mat,
thrust::raw_pointer_cast(d_values.data()),
thrust::raw_pointer_cast(d_colPtr.data()),
thrust::raw_pointer_cast(d_rowIndx.data()),
thrust::raw_pointer_cast(d_x.data()),
&beta,
thrust::raw_pointer_cast(d_y.data())
);
thrust::host_vector<double> y = d_y;
// for(double d : y){
// std::cout << d << " ";
// }
// std::cout << std::endl;
SAIGA_ASSERT(y == ytarget);
std::cout << "cuSPARSE test: SUCCESS!" << std::endl;
}
/* Matrix size */
#define N (275)
/* Host implementation of a simple version of sgemm */
static void simple_sgemm(int n, float alpha, const float *A, const float *B,
float beta, float *C)
{
int i;
int j;
int k;
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
float prod = 0;
for (k = 0; k < n; ++k)
{
prod += A[k * n + i] * B[j * n + k];
}
C[j * n + i] = alpha * prod + beta * C[j * n + i];
}
}
}
void testCuBLAS(){
hipblasStatus_t status;
float *h_A;
float *h_B;
float *h_C;
float *h_C_ref;
float *d_A = 0;
float *d_B = 0;
float *d_C = 0;
float alpha = 1.0f;
float beta = 0.0f;
int n2 = N * N;
int i;
float error_norm;
float ref_norm;
float diff;
/* Allocate host memory for the matrices */
h_A = (float *)malloc(n2 * sizeof(h_A[0]));
if (h_A == 0)
{
fprintf(stderr, "!!!! host memory allocation error (A)\n");
SAIGA_ASSERT(0);
}
h_B = (float *)malloc(n2 * sizeof(h_B[0]));
if (h_B == 0)
{
fprintf(stderr, "!!!! host memory allocation error (B)\n");
SAIGA_ASSERT(0);
}
h_C = (float *)malloc(n2 * sizeof(h_C[0]));
if (h_C == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
SAIGA_ASSERT(0);
}
/* Fill the matrices with test data */
for (i = 0; i < n2; i++)
{
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
h_C[i] = rand() / (float)RAND_MAX;
}
/* Allocate device memory for the matrices */
if (hipMalloc((void **)&d_A, n2 * sizeof(d_A[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
SAIGA_ASSERT(0);
}
if (hipMalloc((void **)&d_B, n2 * sizeof(d_B[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate B)\n");
SAIGA_ASSERT(0);
}
if (hipMalloc((void **)&d_C, n2 * sizeof(d_C[0])) != hipSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate C)\n");
SAIGA_ASSERT(0);
}
/* Initialize the device matrices with the host matrices */
status = hipblasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write A)\n");
SAIGA_ASSERT(0);
}
status = hipblasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write B)\n");
SAIGA_ASSERT(0);
}
status = hipblasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
SAIGA_ASSERT(0);
}
/* Performs operation using plain C code */
simple_sgemm(N, alpha, h_A, h_B, beta, h_C);
h_C_ref = h_C;
/* Performs operation using cublas */
status = hipblasSgemm(cublashandle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
SAIGA_ASSERT(0);
}
/* Allocate host memory for reading back the result from device memory */
h_C = (float *)malloc(n2 * sizeof(h_C[0]));
if (h_C == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
SAIGA_ASSERT(0);
}
/* Read the result back */
status = hipblasGetVector(n2, sizeof(h_C[0]), d_C, 1, h_C, 1);
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
SAIGA_ASSERT(0);
}
/* Check result against reference */
error_norm = 0;
ref_norm = 0;
for (i = 0; i < n2; ++i)
{
diff = h_C_ref[i] - h_C[i];
error_norm += diff * diff;
ref_norm += h_C_ref[i] * h_C_ref[i];
}
error_norm = (float)sqrt((double)error_norm);
ref_norm = (float)sqrt((double)ref_norm);
if (fabs(ref_norm) < 1e-7)
{
fprintf(stderr, "!!!! reference norm is 0\n");
SAIGA_ASSERT(0);
}
/* Memory clean up */
free(h_A);
free(h_B);
free(h_C);
free(h_C_ref);
if (hipFree(d_A) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (A)\n");
SAIGA_ASSERT(0);
}
if (hipFree(d_B) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (B)\n");
SAIGA_ASSERT(0);
}
if (hipFree(d_C) != hipSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
SAIGA_ASSERT(0);
}
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! shutdown error (A)\n");
SAIGA_ASSERT(0);
}
if (error_norm / ref_norm < 1e-6f)
{
std::cout << "cuBLAS test: SUCCESS!" << std::endl;
}
else
{
printf("simpleCUBLAS test failed.\n");
SAIGA_ASSERT(0);
}
}
}
}
| 6b2f408d0ca764ea8ed3a743a46a133a7c3b5f78.cu | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/time/timer.h"
#include "saiga/cuda/cusparseHelper.h"
namespace Saiga {
namespace CUDA {
void testCuSparse(){
// 0 3 0 0 0
// 22 0 0 0 17
// 7 5 0 1 0
// 0 0 0 0 0
// 0 0 14 0 8
//in column major
std::vector<double> denseMatrix = {
0, 22, 7 , 0, 0,
3, 0, 5, 0, 0,
0, 0, 0, 0, 14,
0, 0, 1, 0, 0,
0, 17, 0, 0, 8
};
std::vector<double> denseVector = {
1,2,3,4,5
};
//result of the matrix vector product
std::vector<double> ytarget = {
6,107,21,0,82
};
std::vector<double> values = {
22,7,3,5,14,1,17,8
};
std::vector<int> rowIndx = {
1,2,0,2,4,2,1,4
};
std::vector<int> colPtr = {
0,2,4,5,6,8
};
thrust::device_vector<double> d_values = values;
thrust::device_vector<int> d_rowIndx = rowIndx;
thrust::device_vector<int> d_colPtr = colPtr;
thrust::device_vector<double> d_x = denseVector;
thrust::device_vector<double> d_y(denseVector.size(),0);
cusparseMatDescr_t mat;
cusparseCreateMatDescr(&mat);
double alpha = 1;
const double beta = 2;
cusparseDcsrmv(cusparseHandle,CUSPARSE_OPERATION_TRANSPOSE,5,5,values.size(),&alpha,mat,
thrust::raw_pointer_cast(d_values.data()),
thrust::raw_pointer_cast(d_colPtr.data()),
thrust::raw_pointer_cast(d_rowIndx.data()),
thrust::raw_pointer_cast(d_x.data()),
&beta,
thrust::raw_pointer_cast(d_y.data())
);
thrust::host_vector<double> y = d_y;
// for(double d : y){
// std::cout << d << " ";
// }
// std::cout << std::endl;
SAIGA_ASSERT(y == ytarget);
std::cout << "cuSPARSE test: SUCCESS!" << std::endl;
}
/* Matrix size */
#define N (275)
/* Host implementation of a simple version of sgemm */
static void simple_sgemm(int n, float alpha, const float *A, const float *B,
float beta, float *C)
{
int i;
int j;
int k;
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
float prod = 0;
for (k = 0; k < n; ++k)
{
prod += A[k * n + i] * B[j * n + k];
}
C[j * n + i] = alpha * prod + beta * C[j * n + i];
}
}
}
void testCuBLAS(){
cublasStatus_t status;
float *h_A;
float *h_B;
float *h_C;
float *h_C_ref;
float *d_A = 0;
float *d_B = 0;
float *d_C = 0;
float alpha = 1.0f;
float beta = 0.0f;
int n2 = N * N;
int i;
float error_norm;
float ref_norm;
float diff;
/* Allocate host memory for the matrices */
h_A = (float *)malloc(n2 * sizeof(h_A[0]));
if (h_A == 0)
{
fprintf(stderr, "!!!! host memory allocation error (A)\n");
SAIGA_ASSERT(0);
}
h_B = (float *)malloc(n2 * sizeof(h_B[0]));
if (h_B == 0)
{
fprintf(stderr, "!!!! host memory allocation error (B)\n");
SAIGA_ASSERT(0);
}
h_C = (float *)malloc(n2 * sizeof(h_C[0]));
if (h_C == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
SAIGA_ASSERT(0);
}
/* Fill the matrices with test data */
for (i = 0; i < n2; i++)
{
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
h_C[i] = rand() / (float)RAND_MAX;
}
/* Allocate device memory for the matrices */
if (cudaMalloc((void **)&d_A, n2 * sizeof(d_A[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
SAIGA_ASSERT(0);
}
if (cudaMalloc((void **)&d_B, n2 * sizeof(d_B[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate B)\n");
SAIGA_ASSERT(0);
}
if (cudaMalloc((void **)&d_C, n2 * sizeof(d_C[0])) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate C)\n");
SAIGA_ASSERT(0);
}
/* Initialize the device matrices with the host matrices */
status = cublasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write A)\n");
SAIGA_ASSERT(0);
}
status = cublasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write B)\n");
SAIGA_ASSERT(0);
}
status = cublasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write C)\n");
SAIGA_ASSERT(0);
}
/* Performs operation using plain C code */
simple_sgemm(N, alpha, h_A, h_B, beta, h_C);
h_C_ref = h_C;
/* Performs operation using cublas */
status = cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! kernel execution error.\n");
SAIGA_ASSERT(0);
}
/* Allocate host memory for reading back the result from device memory */
h_C = (float *)malloc(n2 * sizeof(h_C[0]));
if (h_C == 0)
{
fprintf(stderr, "!!!! host memory allocation error (C)\n");
SAIGA_ASSERT(0);
}
/* Read the result back */
status = cublasGetVector(n2, sizeof(h_C[0]), d_C, 1, h_C, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (read C)\n");
SAIGA_ASSERT(0);
}
/* Check result against reference */
error_norm = 0;
ref_norm = 0;
for (i = 0; i < n2; ++i)
{
diff = h_C_ref[i] - h_C[i];
error_norm += diff * diff;
ref_norm += h_C_ref[i] * h_C_ref[i];
}
error_norm = (float)sqrt((double)error_norm);
ref_norm = (float)sqrt((double)ref_norm);
if (fabs(ref_norm) < 1e-7)
{
fprintf(stderr, "!!!! reference norm is 0\n");
SAIGA_ASSERT(0);
}
/* Memory clean up */
free(h_A);
free(h_B);
free(h_C);
free(h_C_ref);
if (cudaFree(d_A) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (A)\n");
SAIGA_ASSERT(0);
}
if (cudaFree(d_B) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (B)\n");
SAIGA_ASSERT(0);
}
if (cudaFree(d_C) != cudaSuccess)
{
fprintf(stderr, "!!!! memory free error (C)\n");
SAIGA_ASSERT(0);
}
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! shutdown error (A)\n");
SAIGA_ASSERT(0);
}
if (error_norm / ref_norm < 1e-6f)
{
std::cout << "cuBLAS test: SUCCESS!" << std::endl;
}
else
{
printf("simpleCUBLAS test failed.\n");
SAIGA_ASSERT(0);
}
}
}
}
|
d916ce7c42c86bf79e877fda1dee98e0c13896e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void power_spectrum_kernel(int row_length, float *A_in, int32_t ldi, float *A_out, int32_t ldo) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
Aw[idx] = ret;
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
Aw[0] = real * real;
Aw[half_length] = im * im;
}
} | d916ce7c42c86bf79e877fda1dee98e0c13896e2.cu | #include "includes.h"
__global__ void power_spectrum_kernel(int row_length, float *A_in, int32_t ldi, float *A_out, int32_t ldo) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
Aw[idx] = ret;
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
Aw[0] = real * real;
Aw[half_length] = im * im;
}
} |
dcb03e925ead3306dea06b2e7a0d8097b36f08e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! jacobi.cu
*/
#include "jacobi.cuh"
#include <iostream>
#include <fstream>
__global__
void doJacobiIteration(int dimX, int dimY, float * in, float * out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x,
j = blockIdx.y * blockDim.y + threadIdx.y;
const int offset = i * dimY + j;
// Remember to do nothing for boundary values.
if( i < 1 || i > dimX - 2 )
return;
if( j < 1 || j > dimY - 2 )
return;
out += offset;
in += offset;
// Jacobi iteration for harmonic means the ouput is average of neighbor points in grid.
*out = *(in - 1) * 0.25 +
*(in + 1) * 0.25 +
*(in - dimY) * 0.25 +
*(in + dimY) * 0.25;
}
__host__
void copyToDevice(float * values, const int dimensions[2], float ** in, float ** out)
{
const int memSize = dimensions[0] * dimensions[1] * sizeof(float);
if (hipMalloc( in, memSize ) != hipSuccess)
throw "Can't allocate in on device.";
if (hipMalloc( out, memSize ) != hipSuccess)
throw "Can't allocate out on device.";
if(hipMemcpy( *in, values, memSize, hipMemcpyHostToDevice ) != hipSuccess)
throw "Can't copy values to in on device.";
if(hipMemcpy( *out, values, memSize, hipMemcpyHostToDevice ) != hipSuccess)
throw "Can't copy values to out on device.";
}
__host__
void setBoundaryValues(float * values, const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f)
{
float stride[2], pos;
int i, last[2] = {dimensions[0] - 1, dimensions[1] - 1};
float * memPos1, * memPos2;
for (i = 0; i < 2; i++)
stride[i] = (upperRight[i] - lowerLeft[i]) / last[i];
// Fill in top and bottom.
memPos1 = values;
memPos2 = values + (dimensions[1]-1);
for (i = 0, pos = lowerLeft[0]; i < dimensions[0]; i++, pos += stride[0], memPos1+=dimensions[1], memPos2+=dimensions[1])
{
*memPos1 = f(pos, lowerLeft[1]);
*memPos2 = f(pos, upperRight[1]);
}
// Fill in sides.
memPos1 = values + 1;
memPos2 = values + (dimensions[0] - 1) * dimensions[1] + 1;
for (i = 0, pos = lowerLeft[1]+stride[1]; i < dimensions[0] - 2; i++, pos += stride[1], memPos1++ , memPos2++ )
{
*memPos1 = f(lowerLeft[0], pos);
*memPos2 = f(upperRight[0], pos);
}
}
__host__
float * makeInitialValues( const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f )
{
float * values = new float[dimensions[0] * dimensions[1]],
* rowPos = values,
* colPos;
// We don't do anything for boundary values yet.
rowPos = values + dimensions[1];
for (int i = 0; i < dimensions[0] - 2; i++, rowPos += dimensions[1])
{
colPos = rowPos + 1;
for (int j = 0; j < dimensions[1] - 2; j++, colPos++)
*colPos = 0;
}
setBoundaryValues( values, dimensions, lowerLeft, upperRight, f );
return values;
}
__host__
float * makeTrueValues(const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f)
{
float *values = new float[dimensions[0] * dimensions[1]],
*rowPosition = values,
*colPosition;
float stride[2] {(upperRight[0] - lowerLeft[0]) / static_cast<float>(dimensions[0] - 1),
(upperRight[1] - lowerLeft[1]) / static_cast<float>(dimensions[1] - 1) };
int i, j;
float x, y;
for (i = 0, x = lowerLeft[0]; i < dimensions[0]; i++, x += stride[0], rowPosition += dimensions[1])
{
colPosition = rowPosition;
for (j = 0, y = lowerLeft[1]; j < dimensions[1] ; j++, y += stride[1], colPosition++)
*colPosition = f(x, y);
}
return values;
}
__host__
float * getErrors(const float * values, const float * trueValues, const int dimensions[2])
{
float * errors = new float[dimensions[0] * dimensions[1]];
unsigned int position = 0;
for ( int i = 0; i < dimensions[0]; i++)
{
for (int j = 0; j < dimensions[1]; j++, position++)
errors[position] = values[position] - trueValues[position];
}
return errors;
}
__host__
float * getRelativeErrors(const float * errors, const float * trueValues, const int dimensions[2], float cutOff)
{
float * relErrors = new float[dimensions[0] * dimensions[1]], * newError;
float absError, absTrue;
const float log10 = ::log(10);
newError = relErrors;
for(int i = 0; i < dimensions[0]; i++)
{
for(int j = 0; j < dimensions[1]; j++, newError++, errors++, trueValues++)
{
absError = abs(*errors);
absTrue = abs(*trueValues);
// Use a cutoff as a work around to dividing by 0.
if (absTrue < cutOff)
absTrue = cutOff;
// Now use cutoff to work around logarithm of 0.
if (absError / absTrue < cutOff)
*newError = ::log(cutOff) / log10;
else
*newError = ::log(absError / absTrue) / log10;
}
}
return relErrors;
}
__host__
float getAverageError(const float * values, const float * trueValues, const int dimensions[2]) //dimX, const int dimY )
{
// Now get the average error.
double error = 0;
int offset;
for (int i = 0; i < dimensions[0]; i++)
{
offset = i * dimensions[1];
for (int j = 0; j < dimensions[1]; j++, offset++)
{
error += abs(values[offset] - trueValues[offset]);
}
}
error /= dimensions[0] * dimensions[1];
return static_cast<float>(error);
}
__host__
void printValues(const int dimensions[2], const float * values)
{
const float * pos = values;
for (int i = 0; i < dimensions[0]; i++)
{
for (int j = 0; j < dimensions[1]; j++, pos++)
std::cout << *pos << ",\t";
std::cout << std::endl;
}
std::cout << std::endl;
}
__host__
void saveToFile(const float * values, const int dimensions[2], const float lowerLeft[2], const float upperRight[2],
const char * filename)
{
std::ofstream myFile(filename, std::ios::binary);
if(!myFile.is_open()) {
throw "Unable to open file.";
}
unsigned int sizeValues = dimensions[0] * dimensions[1] * sizeof(float);
float * tuples = new float[dimensions[0] * dimensions[1] * 3], * coord;
float position[2], skip[2];
for(int i = 0; i < 2; i++)
{
position[i] = lowerLeft[i];
skip[i] = (upperRight[i] - lowerLeft[i]) / (dimensions[i] - 1);
}
coord = tuples;
for( int i = 0; i < dimensions[0]; i++, position[0] += skip[0])
{
position[1] = lowerLeft[1];
for( int j = 0; j < dimensions[1]; j++, position[1] += skip[1], values++)
{
*coord = position[0];
coord++;
*coord = position[1];
coord++;
*coord = *values;
coord++;
}
}
myFile.write((const char *) tuples, 3 * sizeValues);
myFile.close();
delete tuples;
}
| dcb03e925ead3306dea06b2e7a0d8097b36f08e2.cu | /*! jacobi.cu
*/
#include "jacobi.cuh"
#include <iostream>
#include <fstream>
__global__
void doJacobiIteration(int dimX, int dimY, float * in, float * out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x,
j = blockIdx.y * blockDim.y + threadIdx.y;
const int offset = i * dimY + j;
// Remember to do nothing for boundary values.
if( i < 1 || i > dimX - 2 )
return;
if( j < 1 || j > dimY - 2 )
return;
out += offset;
in += offset;
// Jacobi iteration for harmonic means the ouput is average of neighbor points in grid.
*out = *(in - 1) * 0.25 +
*(in + 1) * 0.25 +
*(in - dimY) * 0.25 +
*(in + dimY) * 0.25;
}
__host__
void copyToDevice(float * values, const int dimensions[2], float ** in, float ** out)
{
const int memSize = dimensions[0] * dimensions[1] * sizeof(float);
if (cudaMalloc( in, memSize ) != cudaSuccess)
throw "Can't allocate in on device.";
if (cudaMalloc( out, memSize ) != cudaSuccess)
throw "Can't allocate out on device.";
if(cudaMemcpy( *in, values, memSize, cudaMemcpyHostToDevice ) != cudaSuccess)
throw "Can't copy values to in on device.";
if(cudaMemcpy( *out, values, memSize, cudaMemcpyHostToDevice ) != cudaSuccess)
throw "Can't copy values to out on device.";
}
__host__
void setBoundaryValues(float * values, const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f)
{
float stride[2], pos;
int i, last[2] = {dimensions[0] - 1, dimensions[1] - 1};
float * memPos1, * memPos2;
for (i = 0; i < 2; i++)
stride[i] = (upperRight[i] - lowerLeft[i]) / last[i];
// Fill in top and bottom.
memPos1 = values;
memPos2 = values + (dimensions[1]-1);
for (i = 0, pos = lowerLeft[0]; i < dimensions[0]; i++, pos += stride[0], memPos1+=dimensions[1], memPos2+=dimensions[1])
{
*memPos1 = f(pos, lowerLeft[1]);
*memPos2 = f(pos, upperRight[1]);
}
// Fill in sides.
memPos1 = values + 1;
memPos2 = values + (dimensions[0] - 1) * dimensions[1] + 1;
for (i = 0, pos = lowerLeft[1]+stride[1]; i < dimensions[0] - 2; i++, pos += stride[1], memPos1++ , memPos2++ )
{
*memPos1 = f(lowerLeft[0], pos);
*memPos2 = f(upperRight[0], pos);
}
}
__host__
float * makeInitialValues( const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f )
{
float * values = new float[dimensions[0] * dimensions[1]],
* rowPos = values,
* colPos;
// We don't do anything for boundary values yet.
rowPos = values + dimensions[1];
for (int i = 0; i < dimensions[0] - 2; i++, rowPos += dimensions[1])
{
colPos = rowPos + 1;
for (int j = 0; j < dimensions[1] - 2; j++, colPos++)
*colPos = 0;
}
setBoundaryValues( values, dimensions, lowerLeft, upperRight, f );
return values;
}
__host__
float * makeTrueValues(const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f)
{
float *values = new float[dimensions[0] * dimensions[1]],
*rowPosition = values,
*colPosition;
float stride[2] {(upperRight[0] - lowerLeft[0]) / static_cast<float>(dimensions[0] - 1),
(upperRight[1] - lowerLeft[1]) / static_cast<float>(dimensions[1] - 1) };
int i, j;
float x, y;
for (i = 0, x = lowerLeft[0]; i < dimensions[0]; i++, x += stride[0], rowPosition += dimensions[1])
{
colPosition = rowPosition;
for (j = 0, y = lowerLeft[1]; j < dimensions[1] ; j++, y += stride[1], colPosition++)
*colPosition = f(x, y);
}
return values;
}
__host__
float * getErrors(const float * values, const float * trueValues, const int dimensions[2])
{
float * errors = new float[dimensions[0] * dimensions[1]];
unsigned int position = 0;
for ( int i = 0; i < dimensions[0]; i++)
{
for (int j = 0; j < dimensions[1]; j++, position++)
errors[position] = values[position] - trueValues[position];
}
return errors;
}
__host__
float * getRelativeErrors(const float * errors, const float * trueValues, const int dimensions[2], float cutOff)
{
float * relErrors = new float[dimensions[0] * dimensions[1]], * newError;
float absError, absTrue;
const float log10 = std::log(10);
newError = relErrors;
for(int i = 0; i < dimensions[0]; i++)
{
for(int j = 0; j < dimensions[1]; j++, newError++, errors++, trueValues++)
{
absError = abs(*errors);
absTrue = abs(*trueValues);
// Use a cutoff as a work around to dividing by 0.
if (absTrue < cutOff)
absTrue = cutOff;
// Now use cutoff to work around logarithm of 0.
if (absError / absTrue < cutOff)
*newError = std::log(cutOff) / log10;
else
*newError = std::log(absError / absTrue) / log10;
}
}
return relErrors;
}
__host__
float getAverageError(const float * values, const float * trueValues, const int dimensions[2]) //dimX, const int dimY )
{
// Now get the average error.
double error = 0;
int offset;
for (int i = 0; i < dimensions[0]; i++)
{
offset = i * dimensions[1];
for (int j = 0; j < dimensions[1]; j++, offset++)
{
error += abs(values[offset] - trueValues[offset]);
}
}
error /= dimensions[0] * dimensions[1];
return static_cast<float>(error);
}
__host__
void printValues(const int dimensions[2], const float * values)
{
const float * pos = values;
for (int i = 0; i < dimensions[0]; i++)
{
for (int j = 0; j < dimensions[1]; j++, pos++)
std::cout << *pos << ",\t";
std::cout << std::endl;
}
std::cout << std::endl;
}
__host__
void saveToFile(const float * values, const int dimensions[2], const float lowerLeft[2], const float upperRight[2],
const char * filename)
{
std::ofstream myFile(filename, std::ios::binary);
if(!myFile.is_open()) {
throw "Unable to open file.";
}
unsigned int sizeValues = dimensions[0] * dimensions[1] * sizeof(float);
float * tuples = new float[dimensions[0] * dimensions[1] * 3], * coord;
float position[2], skip[2];
for(int i = 0; i < 2; i++)
{
position[i] = lowerLeft[i];
skip[i] = (upperRight[i] - lowerLeft[i]) / (dimensions[i] - 1);
}
coord = tuples;
for( int i = 0; i < dimensions[0]; i++, position[0] += skip[0])
{
position[1] = lowerLeft[1];
for( int j = 0; j < dimensions[1]; j++, position[1] += skip[1], values++)
{
*coord = position[0];
coord++;
*coord = position[1];
coord++;
*coord = *values;
coord++;
}
}
myFile.write((const char *) tuples, 3 * sizeValues);
myFile.close();
delete tuples;
}
|
f4fbb11c381cb88a6be43b0e5eeca1cd8b97b35c.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogramStorage
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <size_t kStopGrowingSize = 1 << 28>
class DeviceHistogramStorage {
private:
using GradientSumT = GradientPairInt64;
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
// Large buffer of zeroed memory, caches histograms
dh::device_vector<typename GradientSumT::ValueT> data_;
// If we run out of storage allocate one histogram at a time
// in overflow. Not cached, overwritten when a new histogram
// is requested
dh::device_vector<typename GradientSumT::ValueT> overflow_;
std::map<int, size_t> overflow_nidx_map_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2.");
public:
// Start with about 16mb
DeviceHistogramStorage() { data_.reserve(1 << 22); }
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
overflow_nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend() ||
overflow_nidx_map_.find(nidx) != overflow_nidx_map_.cend();
}
int Bins() const { return n_bins_; }
size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; }
dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; }
void AllocateHistograms(const std::vector<int>& new_nidxs) {
for (int nidx : new_nidxs) {
CHECK(!HistogramExists(nidx));
}
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize() * new_nidxs.size();
if (used_size >= kStopGrowingSize) {
// Use overflow
// Delete previous entries
overflow_nidx_map_.clear();
overflow_.resize(HistogramSize() * new_nidxs.size());
// Zero memory
auto d_data = overflow_.data().get();
dh::LaunchN(overflow_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0; });
// Append new histograms
for (int nidx : new_nidxs) {
overflow_nidx_map_[nidx] = overflow_nidx_map_.size() * HistogramSize();
}
} else {
CHECK_GE(data_.size(), used_size);
// Expand if necessary
if (data_.size() < new_used_size) {
data_.resize(::max(data_.size() * 2, new_used_size));
}
// Append new histograms
for (int nidx : new_nidxs) {
nidx_map_[nidx] = nidx_map_.size() * HistogramSize();
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
// Fetch from normal cache
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
} else {
// Fetch from overflow
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
private:
GPUHistEvaluator evaluator_;
Context const* ctx_;
public:
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogramStorage<> hist{};
dh::device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::device_vector<int> monotone_constraints;
dh::device_vector<float> update_predictions;
/*! \brief Sum gradient for each node. */
std::vector<GradientPairPrecise> node_sum_gradients;
TrainParam param;
std::unique_ptr<GradientQuantizer> histogram_rounding;
dh::PinnedMemory pinned;
dh::PinnedMemory pinned2;
common::Monitor monitor;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types, bst_uint _n_rows,
TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features,
BatchParam _batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(page, _n_rows, batch_param, param.subsample,
param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(256);
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param,
ctx_->gpu_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPairPrecise{});
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(hipMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), hipMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
histogram_rounding.reset(new GradientQuantizer(this->gpair));
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)};
EvaluateSplitSharedInputs shared_inputs{
gpu_param,
*histogram_rounding,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
void EvaluateSplits(const std::vector<GPUExpandEntry>& candidates, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
if (candidates.empty()) return;
dh::TemporaryArray<EvaluateSplitInputs> d_node_inputs(2 * candidates.size());
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2 * candidates.size());
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param}, *histogram_rounding, feature_types, matrix.feature_segments,
matrix.gidx_fvalue_map, matrix.min_fvalue,
};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
nidx[i * 2] = left_nidx;
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx);
h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1, candidate.split.left_sum,
left_feature_set, hist.GetNodeHistogram(left_nidx)};
h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1, candidate.split.right_sum,
right_feature_set, hist.GetNodeHistogram(right_nidx)};
}
bst_feature_t number_active_features = h_node_inputs[0].feature_set.size();
for (auto input : h_node_inputs) {
CHECK_EQ(input.feature_set.size(), number_active_features)
<< "Current implementation assumes that the number of active features "
"(after sampling) in any node is the same";
}
dh::safe_cuda(hipMemcpyAsync(d_node_inputs.data().get(), h_node_inputs.data(),
h_node_inputs.size() * sizeof(EvaluateSplitInputs),
hipMemcpyDefault));
this->evaluator_.EvaluateSplits(nidx, number_active_features, dh::ToSpan(d_node_inputs),
shared_inputs, dh::ToSpan(entries));
dh::safe_cuda(hipMemcpyAsync(pinned_candidates_out.data(),
entries.data().get(), sizeof(GPUExpandEntry) * entries.size(),
hipMemcpyDeviceToHost));
dh::DefaultStream().Sync();
}
void BuildHist(int nidx) {
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair,
d_ridx, d_node_hist, *histogram_rounding);
}
// Attempt to do subtraction trick
// return true if succeeded
bool SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
if (!hist.HistogramExists(nidx_histogram) || !hist.HistogramExists(nidx_parent)) {
return false;
}
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
return true;
}
// Extra data for each node that is passed
// to the update position function
struct NodeSplitData {
RegTree::Node split_node;
FeatureType split_type;
common::CatBitField node_cats;
};
void UpdatePosition(const std::vector<GPUExpandEntry>& candidates, RegTree* p_tree) {
if (candidates.empty()) return;
std::vector<int> nidx(candidates.size());
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
nidx.at(i) = e.nid;
left_nidx.at(i) = split_node.LeftChild();
right_nidx.at(i) = split_node.RightChild();
split_data.at(i) = NodeSplitData{split_node, split_type, e.split.split_cats};
}
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
row_partitioner->UpdatePositionBatch(
nidx, left_nidx, right_nidx, split_data,
[=] __device__(bst_uint ridx, const NodeSplitData& data) {
// given a row index, returns the node id it belongs to
bst_float cut_value = d_matrix.GetFvalue(ridx, data.split_node.SplitIndex());
// Missing value
bool go_left = true;
if (isnan(cut_value)) {
go_left = data.split_node.DefaultLeft();
} else {
if (data.split_type == FeatureType::kCategorical) {
go_left = common::Decision<false>(data.node_cats.Bits(), cut_value,
data.split_node.DefaultLeft());
} else {
go_left = cut_value <= data.split_node.SplitCond();
}
}
return go_left;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
// Prediction cache will not be used with external memory
if (!p_fmat->SingleColBlock()) {
if (task.UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective function can not be used with external memory.";
}
p_out_position->Resize(0);
update_predictions.clear();
return;
}
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments),
p_out_position);
}
void FinalisePositionInPage(EllpackPageImpl const *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
update_predictions.resize(row_partitioner->GetRows().size());
auto d_update_predictions = dh::ToSpan(update_predictions);
p_out_position->SetDevice(ctx_->gpu_id);
p_out_position->Resize(row_partitioner->GetRows().size());
auto new_position_op = [=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats = categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision<false>(node_cats, element, node.DefaultLeft());
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
d_update_predictions[row_id] = node.LeafValue();
return position;
}; // NOLINT
auto d_out_position = p_out_position->DeviceSpan();
row_partitioner->FinalisePosition(d_out_position, new_position_op);
dh::LaunchN(row_partitioner->GetRows().size(), [=] __device__(size_t idx) {
bst_node_t position = d_out_position[idx];
d_update_predictions[idx] = d_nodes[position].LeafValue();
bool is_row_sampled = d_gpair[idx].GetHess() - .0f == 0.f;
d_out_position[idx] = is_row_sampled ? ~position : position;
});
}
bool UpdatePredictionCache(linalg::VectorView<float> out_preds_d, RegTree const* p_tree) {
if (update_predictions.empty()) {
return false;
}
CHECK(p_tree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_update_predictions = dh::ToSpan(update_predictions);
CHECK_EQ(out_preds_d.Size(), d_update_predictions.size());
dh::LaunchN(out_preds_d.Size(), [=] XGBOOST_DEVICE(size_t idx) mutable {
out_preds_d(idx) += d_update_predictions[idx];
});
return true;
}
// num histograms is the number of contiguous histograms in memory to reduce over
void AllReduceHist(int nidx, dh::AllReducer* reducer, int num_histograms) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
using ReduceT = typename std::remove_pointer<decltype(d_node_hist)>::type::ValueT;
reducer->AllReduceSum(reinterpret_cast<ReduceT*>(d_node_hist),
reinterpret_cast<ReduceT*>(d_node_hist),
page->Cuts().TotalBins() * 2 * num_histograms);
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(std::vector<GPUExpandEntry> const& candidates, dh::AllReducer* reducer,
const RegTree& tree) {
if (candidates.empty()) return;
// Some nodes we will manually compute histograms
// others we will do by subtraction
std::vector<int> hist_nidx;
std::vector<int> subtraction_nidx;
for (auto& e : candidates) {
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = e.split.right_sum.GetHess() < e.split.left_sum.GetHess();
if (fewer_right) {
hist_nidx.emplace_back(tree[e.nid].RightChild());
subtraction_nidx.emplace_back(tree[e.nid].LeftChild());
} else {
hist_nidx.emplace_back(tree[e.nid].LeftChild());
subtraction_nidx.emplace_back(tree[e.nid].RightChild());
}
}
std::vector<int> all_new = hist_nidx;
all_new.insert(all_new.end(), subtraction_nidx.begin(), subtraction_nidx.end());
// Allocate the histograms
// Guaranteed contiguous memory
hist.AllocateHistograms(all_new);
for (auto nidx : hist_nidx) {
this->BuildHist(nidx);
}
// Reduce all in one go
// This gives much better latency in a distributed setting
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), reducer, hist_nidx.size());
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;
if (!this->SubtractionTrick(parent_nidx, build_hist_nidx, subtraction_trick_nidx)) {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer, 1);
}
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!rabit::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
// should be set to nan in evaluation split.
CHECK(common::CheckNAN(candidate.split.fvalue));
std::vector<common::CatBitField::value_type> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto n_bins_feature = page->Cuts().FeatureBins(candidate.split.findex);
split_cats.resize(common::CatBitField::ComputeStorageSize(n_bins_feature), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
} else {
CHECK(!common::CheckNAN(candidate.split.fvalue));
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
const auto& parent = tree[candidate.nid];
std::size_t max_nidx = ::max(parent.LeftChild(), parent.RightChild());
// Grow as needed
if (node_sum_gradients.size() <= max_nidx) {
node_sum_gradients.resize(max_nidx * 2 + 1);
}
node_sum_gradients[parent.LeftChild()] = candidate.split.left_sum;
node_sum_gradients[parent.RightChild()] = candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, parent.SplitIndex(), parent.LeftChild(),
parent.RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto gpair_it = dh::MakeTransformIterator<GradientPairPrecise>(
dh::tbegin(gpair), [] __device__(auto const& gpair) { return GradientPairPrecise{gpair}; });
GradientPairPrecise root_sum =
dh::Reduce(thrust::hip::par(alloc), gpair_it, gpair_it + gpair.size(),
GradientPairPrecise{}, thrust::plus<GradientPairPrecise>{});
rabit::Allreduce<rabit::op::Sum, double>(reinterpret_cast<double*>(&root_sum), 2);
hist.AllocateHistograms({kRootNIdx});
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer, 1);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, ObjInfo task,
RegTree* p_tree, dh::AllReducer* reducer,
HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
// Process maximum 32 nodes at a time
Driver<GPUExpandEntry> driver(param, 32);
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
for (auto& candidate : expand_set) {
this->ApplySplit(candidate, p_tree);
}
// Get the candidates we are allowed to expand further
// e.g. We do not bother further processing nodes whose children are beyond max depth
std::vector<GPUExpandEntry> filtered_expand_set;
std::copy_if(expand_set.begin(), expand_set.end(), std::back_inserter(filtered_expand_set),
[&](const auto& e) { return driver.IsChildValid(e); });
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(filtered_expand_set.size() * 2, GPUExpandEntry());
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(filtered_expand_set, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(filtered_expand_set, reducer, tree);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates);
monitor.Stop("EvaluateSplits");
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(GenericParameter const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
param_.UpdateAllowUnknown(args);
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
FromJson(config.at("train_param"), ¶m_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
out["train_param"] = ToJson(param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree, &out_position[t_idx]);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
++t_idx;
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({ctx_->gpu_id}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
ctx_->gpu_id,
param_.max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker.reset(new GPUHistMakerDevice<GradientSumT>(
ctx_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, param_,
column_sampling_seed, info_->num_col_, batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, &reducer_, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::VectorView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
char const* Name() const override { return "grow_gpu_hist"; }
private:
bool initialised_{false};
GPUHistMakerTrainParam hist_maker_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo task_;
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](GenericParameter const* tparam, ObjInfo task) {
return new GPUHistMaker(tparam, task);
});
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
| f4fbb11c381cb88a6be43b0e5eeca1cd8b97b35c.cu | /*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogramStorage
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <size_t kStopGrowingSize = 1 << 28>
class DeviceHistogramStorage {
private:
using GradientSumT = GradientPairInt64;
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
// Large buffer of zeroed memory, caches histograms
dh::device_vector<typename GradientSumT::ValueT> data_;
// If we run out of storage allocate one histogram at a time
// in overflow. Not cached, overwritten when a new histogram
// is requested
dh::device_vector<typename GradientSumT::ValueT> overflow_;
std::map<int, size_t> overflow_nidx_map_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2.");
public:
// Start with about 16mb
DeviceHistogramStorage() { data_.reserve(1 << 22); }
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
overflow_nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend() ||
overflow_nidx_map_.find(nidx) != overflow_nidx_map_.cend();
}
int Bins() const { return n_bins_; }
size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; }
dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; }
void AllocateHistograms(const std::vector<int>& new_nidxs) {
for (int nidx : new_nidxs) {
CHECK(!HistogramExists(nidx));
}
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize() * new_nidxs.size();
if (used_size >= kStopGrowingSize) {
// Use overflow
// Delete previous entries
overflow_nidx_map_.clear();
overflow_.resize(HistogramSize() * new_nidxs.size());
// Zero memory
auto d_data = overflow_.data().get();
dh::LaunchN(overflow_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0; });
// Append new histograms
for (int nidx : new_nidxs) {
overflow_nidx_map_[nidx] = overflow_nidx_map_.size() * HistogramSize();
}
} else {
CHECK_GE(data_.size(), used_size);
// Expand if necessary
if (data_.size() < new_used_size) {
data_.resize(std::max(data_.size() * 2, new_used_size));
}
// Append new histograms
for (int nidx : new_nidxs) {
nidx_map_[nidx] = nidx_map_.size() * HistogramSize();
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
// Fetch from normal cache
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
} else {
// Fetch from overflow
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
private:
GPUHistEvaluator evaluator_;
Context const* ctx_;
public:
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogramStorage<> hist{};
dh::device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::device_vector<int> monotone_constraints;
dh::device_vector<float> update_predictions;
/*! \brief Sum gradient for each node. */
std::vector<GradientPairPrecise> node_sum_gradients;
TrainParam param;
std::unique_ptr<GradientQuantizer> histogram_rounding;
dh::PinnedMemory pinned;
dh::PinnedMemory pinned2;
common::Monitor monitor;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types, bst_uint _n_rows,
TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features,
BatchParam _batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(page, _n_rows, batch_param, param.subsample,
param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(256);
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param,
ctx_->gpu_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPairPrecise{});
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(cudaMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), cudaMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
histogram_rounding.reset(new GradientQuantizer(this->gpair));
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)};
EvaluateSplitSharedInputs shared_inputs{
gpu_param,
*histogram_rounding,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
void EvaluateSplits(const std::vector<GPUExpandEntry>& candidates, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
if (candidates.empty()) return;
dh::TemporaryArray<EvaluateSplitInputs> d_node_inputs(2 * candidates.size());
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2 * candidates.size());
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param}, *histogram_rounding, feature_types, matrix.feature_segments,
matrix.gidx_fvalue_map, matrix.min_fvalue,
};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
nidx[i * 2] = left_nidx;
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx);
h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1, candidate.split.left_sum,
left_feature_set, hist.GetNodeHistogram(left_nidx)};
h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1, candidate.split.right_sum,
right_feature_set, hist.GetNodeHistogram(right_nidx)};
}
bst_feature_t number_active_features = h_node_inputs[0].feature_set.size();
for (auto input : h_node_inputs) {
CHECK_EQ(input.feature_set.size(), number_active_features)
<< "Current implementation assumes that the number of active features "
"(after sampling) in any node is the same";
}
dh::safe_cuda(cudaMemcpyAsync(d_node_inputs.data().get(), h_node_inputs.data(),
h_node_inputs.size() * sizeof(EvaluateSplitInputs),
cudaMemcpyDefault));
this->evaluator_.EvaluateSplits(nidx, number_active_features, dh::ToSpan(d_node_inputs),
shared_inputs, dh::ToSpan(entries));
dh::safe_cuda(cudaMemcpyAsync(pinned_candidates_out.data(),
entries.data().get(), sizeof(GPUExpandEntry) * entries.size(),
cudaMemcpyDeviceToHost));
dh::DefaultStream().Sync();
}
void BuildHist(int nidx) {
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair,
d_ridx, d_node_hist, *histogram_rounding);
}
// Attempt to do subtraction trick
// return true if succeeded
bool SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
if (!hist.HistogramExists(nidx_histogram) || !hist.HistogramExists(nidx_parent)) {
return false;
}
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
return true;
}
// Extra data for each node that is passed
// to the update position function
struct NodeSplitData {
RegTree::Node split_node;
FeatureType split_type;
common::CatBitField node_cats;
};
void UpdatePosition(const std::vector<GPUExpandEntry>& candidates, RegTree* p_tree) {
if (candidates.empty()) return;
std::vector<int> nidx(candidates.size());
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
nidx.at(i) = e.nid;
left_nidx.at(i) = split_node.LeftChild();
right_nidx.at(i) = split_node.RightChild();
split_data.at(i) = NodeSplitData{split_node, split_type, e.split.split_cats};
}
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
row_partitioner->UpdatePositionBatch(
nidx, left_nidx, right_nidx, split_data,
[=] __device__(bst_uint ridx, const NodeSplitData& data) {
// given a row index, returns the node id it belongs to
bst_float cut_value = d_matrix.GetFvalue(ridx, data.split_node.SplitIndex());
// Missing value
bool go_left = true;
if (isnan(cut_value)) {
go_left = data.split_node.DefaultLeft();
} else {
if (data.split_type == FeatureType::kCategorical) {
go_left = common::Decision<false>(data.node_cats.Bits(), cut_value,
data.split_node.DefaultLeft());
} else {
go_left = cut_value <= data.split_node.SplitCond();
}
}
return go_left;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
// Prediction cache will not be used with external memory
if (!p_fmat->SingleColBlock()) {
if (task.UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective function can not be used with external memory.";
}
p_out_position->Resize(0);
update_predictions.clear();
return;
}
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments),
p_out_position);
}
void FinalisePositionInPage(EllpackPageImpl const *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
update_predictions.resize(row_partitioner->GetRows().size());
auto d_update_predictions = dh::ToSpan(update_predictions);
p_out_position->SetDevice(ctx_->gpu_id);
p_out_position->Resize(row_partitioner->GetRows().size());
auto new_position_op = [=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats = categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision<false>(node_cats, element, node.DefaultLeft());
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
d_update_predictions[row_id] = node.LeafValue();
return position;
}; // NOLINT
auto d_out_position = p_out_position->DeviceSpan();
row_partitioner->FinalisePosition(d_out_position, new_position_op);
dh::LaunchN(row_partitioner->GetRows().size(), [=] __device__(size_t idx) {
bst_node_t position = d_out_position[idx];
d_update_predictions[idx] = d_nodes[position].LeafValue();
bool is_row_sampled = d_gpair[idx].GetHess() - .0f == 0.f;
d_out_position[idx] = is_row_sampled ? ~position : position;
});
}
bool UpdatePredictionCache(linalg::VectorView<float> out_preds_d, RegTree const* p_tree) {
if (update_predictions.empty()) {
return false;
}
CHECK(p_tree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_update_predictions = dh::ToSpan(update_predictions);
CHECK_EQ(out_preds_d.Size(), d_update_predictions.size());
dh::LaunchN(out_preds_d.Size(), [=] XGBOOST_DEVICE(size_t idx) mutable {
out_preds_d(idx) += d_update_predictions[idx];
});
return true;
}
// num histograms is the number of contiguous histograms in memory to reduce over
void AllReduceHist(int nidx, dh::AllReducer* reducer, int num_histograms) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
using ReduceT = typename std::remove_pointer<decltype(d_node_hist)>::type::ValueT;
reducer->AllReduceSum(reinterpret_cast<ReduceT*>(d_node_hist),
reinterpret_cast<ReduceT*>(d_node_hist),
page->Cuts().TotalBins() * 2 * num_histograms);
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(std::vector<GPUExpandEntry> const& candidates, dh::AllReducer* reducer,
const RegTree& tree) {
if (candidates.empty()) return;
// Some nodes we will manually compute histograms
// others we will do by subtraction
std::vector<int> hist_nidx;
std::vector<int> subtraction_nidx;
for (auto& e : candidates) {
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = e.split.right_sum.GetHess() < e.split.left_sum.GetHess();
if (fewer_right) {
hist_nidx.emplace_back(tree[e.nid].RightChild());
subtraction_nidx.emplace_back(tree[e.nid].LeftChild());
} else {
hist_nidx.emplace_back(tree[e.nid].LeftChild());
subtraction_nidx.emplace_back(tree[e.nid].RightChild());
}
}
std::vector<int> all_new = hist_nidx;
all_new.insert(all_new.end(), subtraction_nidx.begin(), subtraction_nidx.end());
// Allocate the histograms
// Guaranteed contiguous memory
hist.AllocateHistograms(all_new);
for (auto nidx : hist_nidx) {
this->BuildHist(nidx);
}
// Reduce all in one go
// This gives much better latency in a distributed setting
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), reducer, hist_nidx.size());
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;
if (!this->SubtractionTrick(parent_nidx, build_hist_nidx, subtraction_trick_nidx)) {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer, 1);
}
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!rabit::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
// should be set to nan in evaluation split.
CHECK(common::CheckNAN(candidate.split.fvalue));
std::vector<common::CatBitField::value_type> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto n_bins_feature = page->Cuts().FeatureBins(candidate.split.findex);
split_cats.resize(common::CatBitField::ComputeStorageSize(n_bins_feature), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
} else {
CHECK(!common::CheckNAN(candidate.split.fvalue));
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
const auto& parent = tree[candidate.nid];
std::size_t max_nidx = std::max(parent.LeftChild(), parent.RightChild());
// Grow as needed
if (node_sum_gradients.size() <= max_nidx) {
node_sum_gradients.resize(max_nidx * 2 + 1);
}
node_sum_gradients[parent.LeftChild()] = candidate.split.left_sum;
node_sum_gradients[parent.RightChild()] = candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, parent.SplitIndex(), parent.LeftChild(),
parent.RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto gpair_it = dh::MakeTransformIterator<GradientPairPrecise>(
dh::tbegin(gpair), [] __device__(auto const& gpair) { return GradientPairPrecise{gpair}; });
GradientPairPrecise root_sum =
dh::Reduce(thrust::cuda::par(alloc), gpair_it, gpair_it + gpair.size(),
GradientPairPrecise{}, thrust::plus<GradientPairPrecise>{});
rabit::Allreduce<rabit::op::Sum, double>(reinterpret_cast<double*>(&root_sum), 2);
hist.AllocateHistograms({kRootNIdx});
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer, 1);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, ObjInfo task,
RegTree* p_tree, dh::AllReducer* reducer,
HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
// Process maximum 32 nodes at a time
Driver<GPUExpandEntry> driver(param, 32);
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
for (auto& candidate : expand_set) {
this->ApplySplit(candidate, p_tree);
}
// Get the candidates we are allowed to expand further
// e.g. We do not bother further processing nodes whose children are beyond max depth
std::vector<GPUExpandEntry> filtered_expand_set;
std::copy_if(expand_set.begin(), expand_set.end(), std::back_inserter(filtered_expand_set),
[&](const auto& e) { return driver.IsChildValid(e); });
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(filtered_expand_set.size() * 2, GPUExpandEntry());
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(filtered_expand_set, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(filtered_expand_set, reducer, tree);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates);
monitor.Stop("EvaluateSplits");
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(GenericParameter const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
param_.UpdateAllowUnknown(args);
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
FromJson(config.at("train_param"), ¶m_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
out["train_param"] = ToJson(param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree, &out_position[t_idx]);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
++t_idx;
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({ctx_->gpu_id}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
ctx_->gpu_id,
param_.max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker.reset(new GPUHistMakerDevice<GradientSumT>(
ctx_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, param_,
column_sampling_seed, info_->num_col_, batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, &reducer_, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::VectorView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
char const* Name() const override { return "grow_gpu_hist"; }
private:
bool initialised_{false};
GPUHistMakerTrainParam hist_maker_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo task_;
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](GenericParameter const* tparam, ObjInfo task) {
return new GPUHistMaker(tparam, task);
});
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
66697acf3ae2e2cc2e5ac2d6b61b3a2c98ddcaec.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file batch_norm.cu
* \brief CUDA Batch Normalization code
* \author Chris Olivier, Bing Xu, Da Zheng
* Adapted from Torch
*/
#include <hip/hip_runtime_api.h>
#include <algorithm>
#include "batch_norm-inl.h"
#include "../../common/cuda/utils.h"
#define WRITE_DATA_FLAG 1
#define WRITE_GAMMA_FLAG 2
#define WRITE_BETA_FLAG 4
#define FIX_GAMMA_FLAG 8
#define IS_TRAINING_FLAG 16
#define USE_GLOBAL_STATS_FLAG 32
#define ADDTO_DATA_FLAG (1 << 6)
#define ADDTO_GAMMA_FLAG (1 << 7)
#define ADDTO_BETA_FLAG (1 << 8)
#if MXNET_USE_CUDNN == 1
#include "./cudnn/cudnn_batch_norm.h"
#endif
#include "../../../include/mxnet/tensor_blob.h"
using namespace mxnet;
namespace {
/*! \brief inverse standard deviation <-> variance */
template <typename DType, typename AccReal>
MSHADOW_XINLINE AccReal variance_to_invstd(DType var, AccReal eps) {
return rsqrtf(static_cast<AccReal>(var) + eps);
}
template <>
MSHADOW_XINLINE double variance_to_invstd(double var, double eps) {
return rsqrt(var + eps);
}
template <typename AccReal>
MSHADOW_XINLINE AccReal invstd_to_variance(AccReal invstd, AccReal eps) {
return static_cast<AccReal>(1.0) / (invstd * invstd) - eps;
}
template <>
MSHADOW_XINLINE double invstd_to_variance(double invstd, double eps) {
return 1.0 / (invstd * invstd) - eps;
}
} // namespace
namespace mxnet {
namespace op {
namespace batchnorm {
namespace cuda {
static const unsigned WARP_SIZE = 32;
// The maximum number of threads in a block
static const unsigned MAX_BLOCK_SIZE = 512U;
template <typename In, typename Out>
struct ScalarConvert {
static __host__ __device__ __forceinline__ Out to(const In v) {
return (Out)v;
}
};
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static unsigned getNumThreads(int nElem) {
unsigned threadSizes[4] = {32, 64, 128, 256};
for (int i = 0; i != 4; ++i) {
if (static_cast<unsigned>(nElem) <= threadSizes[i]) {
return threadSizes[i];
}
}
return MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template <typename DType, typename AccReal>
struct Float2 {
AccReal v1, v2;
__device__ Float2() {}
__device__ Float2(DType v1, DType v2)
: v1(ScalarConvert<DType, AccReal>::to(v1)), v2(ScalarConvert<DType, AccReal>::to(v2)) {}
__device__ Float2(DType v)
: v1(ScalarConvert<DType, AccReal>::to(v)), v2(ScalarConvert<DType, AccReal>::to(v)) {}
__device__ Float2(int v)
: v1(ScalarConvert<int, AccReal>::to(v)), v2(ScalarConvert<int, AccReal>::to(v)) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template <typename DType, typename AccReal, typename DeviceTensor>
struct SumOp {
__device__ SumOp(const DeviceTensor t) : tensor(t) {}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
return ScalarConvert<DType, AccReal>::to(tensor.get_ref(batch, plane, n));
}
const DeviceTensor tensor;
};
template <typename DType, typename AccReal, typename DeviceTensor>
struct VarOp {
__device__ VarOp(AccReal m, const DeviceTensor t) : mean(m), tensor(t) {}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
DType val = tensor.get_ref(batch, plane, n);
return (val - mean) * (val - mean);
}
const AccReal mean;
const DeviceTensor tensor;
};
template <typename DType, typename AccReal, typename DeviceTensor>
struct GradOp {
__device__ GradOp(AccReal m, const DeviceTensor i, const DeviceTensor g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, AccReal> operator()(int batch, int plane, int n) {
const DType g = gradOutput.get_ref(batch, plane, n);
const DType c = ScalarConvert<AccReal, DType>::to(input.get_ref(batch, plane, n) - mean);
return Float2<DType, AccReal>(g, g * c);
}
const AccReal mean;
const DeviceTensor input;
const DeviceTensor gradOutput;
};
#if TORCH_HIP_VERSION >= 9000
#define FULLMASK 0xFFFFFFFF
#define __shfl_xor(...) __shfl_xor_sync(FULLMASK, __VA_ARGS__)
#endif
// Sum across all threads within a warp
template <typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += __shfl_xor(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template <typename DType, typename AccReal>
static __device__ __forceinline__ Float2<DType, AccReal> warpSum(Float2<DType, AccReal> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
template <typename T, typename Op, typename DeviceTensor>
static __device__ T reduce(Op op, DeviceTensor tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.OuterSize(); ++batch) {
for (int x = threadIdx.x; x < tensor.InnerSize(); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
namespace {
constexpr int inference_forward_threads = 512;
constexpr int shmem_elements = 1536;
} // namespace
template <typename DType, typename AType, typename LType, bool small_num_channels>
__launch_bounds__(inference_forward_threads) __global__
void BatchNormalizationUpdateOutputInferenceKernel(const DType* input,
DType* output,
const index_t size,
const index_t outer_size,
const index_t num_channels,
const index_t inner_size,
const AType* runningMean,
const AType* runningVar,
AType* saveMean,
AType* saveInvStd,
AType* weight,
AType* bias,
const AType epsilon,
const uint32_t flags) {
constexpr int nvec = sizeof(LType) / sizeof(DType);
__shared__ AType saved_invstd[shmem_elements];
__shared__ AType saved_mean[shmem_elements];
__shared__ AType saved_weight[shmem_elements];
__shared__ AType saved_bias[shmem_elements];
union vectorized_loader {
LType aligned;
DType separate[nvec]; // NOLINT(*)
__device__ inline vectorized_loader() {}
__device__ inline ~vectorized_loader() {}
} scratch;
if (small_num_channels) {
for (int i = threadIdx.x; i < num_channels; i += blockDim.x) {
saved_invstd[i] = variance_to_invstd(runningVar[i], epsilon);
saved_mean[i] = runningMean[i];
saved_weight[i] = (weight != nullptr && (flags & FIX_GAMMA_FLAG) == 0) ? weight[i] : 1;
saved_bias[i] = (bias != nullptr) ? bias[i] : 0;
}
__syncthreads();
}
const index_t tid = threadIdx.x + blockIdx.x * blockDim.x;
const index_t stride = blockDim.x * gridDim.x;
const LType* input_aligned = reinterpret_cast<const LType*>(input);
LType* output_aligned = reinterpret_cast<LType*>(output);
for (index_t i = tid; i < size / nvec; i += stride) {
scratch.aligned = input_aligned[i];
const index_t my_channel_base = (nvec * i) % (inner_size * num_channels);
#pragma unroll
for (int j = 0; j < nvec; ++j) {
index_t my_channel = (my_channel_base + j) / inner_size;
if (my_channel >= num_channels)
my_channel = my_channel % num_channels;
AType current_input = static_cast<AType>(scratch.separate[j]);
AType invstd = small_num_channels ? saved_invstd[my_channel]
: variance_to_invstd(runningVar[my_channel], epsilon);
AType mean = small_num_channels ? saved_mean[my_channel] : runningMean[my_channel];
AType gamma =
small_num_channels
? saved_weight[my_channel]
: ((weight != nullptr && (flags & FIX_GAMMA_FLAG) == 0) ? weight[my_channel] : 1);
AType beta =
small_num_channels ? saved_bias[my_channel] : ((bias != nullptr) ? bias[my_channel] : 0);
current_input = gamma * (current_input - mean) * invstd + beta;
scratch.separate[j] = current_input;
}
output_aligned[i] = scratch.aligned;
if (i < num_channels) {
saveMean[i] = runningMean[i];
saveInvStd[i] = variance_to_invstd(runningVar[i], epsilon);
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0 && weight != nullptr) {
weight[i] = 1;
}
}
}
}
template <typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormalizationUpdateOutputKernel(DeviceTensor input,
DeviceTensor output,
DeviceTensor1 weight,
DeviceTensor1 bias,
const AccReal epsilon,
const AccReal momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
const uint32_t flags) {
const int plane = blockIdx.x;
const int N = input.OuterSize() * input.InnerSize();
const AccReal norm = AccReal(1) / N;
// Compute the mean and variance across (batch, x/y/z)
const AccReal mean =
reduce<AccReal>(SumOp<DType, AccReal, DeviceTensor>(input), input, plane) * norm;
__syncthreads();
const AccReal varN =
reduce<AccReal>(VarOp<DType, AccReal, DeviceTensor>(mean, input), input, plane);
AccReal invStd = 0;
if (varN != AccReal(0) || epsilon != AccReal(0)) {
invStd = AccReal(1.0) / sqrt(varN * norm + epsilon);
}
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// For one item (0th) per plane (channel), write the per-channel data (ie mean, variance, etc)
// Momentum based writeback
saveMean[plane] = ScalarConvert<AccReal, DType>::to(mean);
saveInvStd[plane] = invStd;
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0 &&
weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template <typename DeviceTensor1>
struct CUDATensors {
DeviceTensor1 gradWeight;
DeviceTensor1 gradBias;
DeviceTensor1 weight;
DeviceTensor1 runningMean;
DeviceTensor1 runningVar;
DeviceTensor1 saveMean;
DeviceTensor1 saveInvStd;
};
namespace {
inline int ceil_div(int x, int y) {
return (x + y - 1) / y;
}
} // namespace
template <int NTHREADS, typename DType, typename AType, typename LType>
__global__ void FrozenBatchNormalizationBackwardKernelCLastPhase1(const DType* input,
const DType* gradOutput,
AType* temp_space,
DType* gradInput,
const AType* weight,
const AType* runningMean,
const AType* runningVar,
const index_t outer,
const index_t num_channels,
const AType eps,
const uint32_t flags) {
using mxnet::common::cuda::warp_size;
constexpr int num_warps = NTHREADS / warp_size;
constexpr int nvec = sizeof(LType) >= sizeof(DType) ? sizeof(LType) / sizeof(DType) : 1;
const size_t stride = num_channels / nvec;
union vectorized_loader {
LType aligned;
DType separate[nvec]; // NOLINT(*)
__device__ inline vectorized_loader() {}
__device__ inline ~vectorized_loader() {}
};
vectorized_loader vec_input, vec_gradOutput;
__shared__ AType scratch[NTHREADS * 2 * nvec];
AType* my_values_gamma = &(scratch[threadIdx.x * nvec]);
AType* my_values_beta = &(scratch[(NTHREADS + threadIdx.x) * nvec]);
AType sum_gamma[nvec]; // NOLINT(*)
AType sum_beta[nvec]; // NOLINT(*)
#pragma unroll
for (int i = 0; i < nvec; ++i) {
sum_gamma[i] = 0;
sum_beta[i] = 0;
}
const size_t offset = blockIdx.x * warp_size;
const int my_warp = threadIdx.x / warp_size;
const int thread_idx_in_warp = threadIdx.x % warp_size;
AType invstd[nvec]; // NOLINT(*)
AType mean[nvec]; // NOLINT(*)
AType gamma[nvec]; // NOLINT(*)
size_t channel_offset = (offset + thread_idx_in_warp) * nvec;
if (channel_offset < num_channels) {
#pragma unroll
for (int i = 0; i < nvec; ++i) {
invstd[i] = variance_to_invstd(runningVar[channel_offset + i], eps);
mean[i] = runningMean[channel_offset + i];
gamma[i] = weight != nullptr ? weight[channel_offset + i] : 1;
}
}
const LType* aligned_gradOutput = reinterpret_cast<const LType*>(gradOutput);
const LType* aligned_input = reinterpret_cast<const LType*>(input);
LType* gradInput_aligned = reinterpret_cast<LType*>(gradInput);
const int rows_per_block = (outer + gridDim.y - 1) / gridDim.y;
const size_t start_row = my_warp + rows_per_block * blockIdx.y;
const size_t end_row = min(outer, static_cast<index_t>(rows_per_block * (blockIdx.y + 1)));
if (offset + thread_idx_in_warp < stride) {
for (size_t i = start_row; i < end_row; i += num_warps) {
const index_t idx = i * stride + offset + thread_idx_in_warp;
vec_gradOutput.aligned = aligned_gradOutput[idx];
vec_input.aligned = aligned_input[idx];
#pragma unroll
for (int j = 0; j < nvec; ++j) {
sum_beta[j] += static_cast<AType>(vec_gradOutput.separate[j]);
sum_gamma[j] += static_cast<AType>(vec_gradOutput.separate[j]) *
(static_cast<AType>(vec_input.separate[j]) - mean[j]);
}
if (flags & (WRITE_DATA_FLAG | ADDTO_DATA_FLAG)) {
// Gradient to input
#pragma unroll
for (int j = 0; j < nvec; ++j) {
vec_gradOutput.separate[j] *= invstd[j] * gamma[j];
}
if (flags & ADDTO_DATA_FLAG) {
vec_input.aligned = gradInput_aligned[idx];
#pragma unroll
for (int j = 0; j < nvec; ++j) {
vec_gradOutput.separate[j] += vec_input.separate[j];
}
}
gradInput_aligned[idx] = vec_gradOutput.aligned;
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < nvec; ++i) {
my_values_gamma[i] = sum_gamma[i];
my_values_beta[i] = sum_beta[i];
}
__syncthreads();
for (int i = num_warps / 2; i > 0; i /= 2) {
if (my_warp < i) {
const int shared_offset = nvec * i * warp_size;
#pragma unroll
for (int j = 0; j < nvec; ++j) {
my_values_gamma[j] += my_values_gamma[j + shared_offset];
my_values_beta[j] += my_values_beta[j + shared_offset];
}
}
__syncthreads();
}
if (threadIdx.x < min(warp_size * nvec, static_cast<int>(num_channels - nvec * offset))) {
const size_t offset_out = nvec * offset + blockIdx.y * num_channels;
const size_t offset_beta = gridDim.y * num_channels;
temp_space[offset_out + threadIdx.x] = scratch[threadIdx.x];
temp_space[offset_beta + offset_out + threadIdx.x] = scratch[NTHREADS * nvec + threadIdx.x];
}
}
template <typename AType>
__global__ void FrozenBatchNormalizationBackwardKernelCLastPhase2(const AType* temp_space,
const AType* runningVar,
AType* out_gamma,
AType* out_beta,
int lead_dim,
int n_blocks,
AType epsilon,
uint32_t flags) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < lead_dim) {
AType sum_gamma = 0;
AType sum_beta = 0;
for (int i = tid; i < lead_dim * n_blocks; i += lead_dim) {
sum_gamma += temp_space[i];
sum_beta += temp_space[i + lead_dim * n_blocks];
}
if (flags & (WRITE_GAMMA_FLAG | ADDTO_GAMMA_FLAG)) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
const AType invstd = variance_to_invstd(runningVar[tid], epsilon);
if (flags & WRITE_GAMMA_FLAG) {
out_gamma[tid] = sum_gamma * invstd;
} else {
out_gamma[tid] += sum_gamma * invstd;
}
} else {
if (flags & WRITE_GAMMA_FLAG) {
out_gamma[tid] = 0;
}
}
}
if (flags & WRITE_BETA_FLAG) {
out_beta[tid] = sum_beta;
} else if (flags & ADDTO_BETA_FLAG) {
out_beta[tid] += sum_beta;
}
}
}
template <int NTHREADS, typename DType, typename AType, typename LType>
__global__ void FrozenBatchNormalizationBackwardKernel(const DType* input,
const DType* gradOutput,
DType* gradInput,
AType* gradWeight,
AType* gradBias,
const AType* weight,
const AType* runningMean,
const AType* runningVar,
const index_t outer,
const index_t inner,
const index_t num_channels,
const index_t NHW_div_nvec,
const AType eps,
const uint32_t flags) {
const index_t my_channel = blockIdx.x;
const AType invstd = variance_to_invstd(runningVar[my_channel], eps);
const AType mean = runningMean[my_channel];
const AType gamma = weight != nullptr ? weight[my_channel] : 1;
constexpr int nvec = sizeof(LType) > sizeof(DType) ? sizeof(LType) / sizeof(DType) : 1;
union vectorized_loader {
LType aligned;
DType separate[nvec]; // NOLINT(*)
__device__ inline vectorized_loader() {}
__device__ inline ~vectorized_loader() {}
};
vectorized_loader vec_input, vec_gradOutput;
const LType* input_aligned = reinterpret_cast<const LType*>(input);
const LType* gradOutput_aligned = reinterpret_cast<const LType*>(gradOutput);
LType* gradInput_aligned = reinterpret_cast<LType*>(gradInput);
const index_t inner_div_nvec = inner / nvec;
AType sum_gamma = 0;
AType sum_beta = 0;
for (index_t i = threadIdx.x; i < NHW_div_nvec; i += blockDim.x) {
const index_t inner_idx = i % inner_div_nvec;
const index_t outer_idx = i / inner_div_nvec;
const index_t idx = inner_idx + (my_channel + outer_idx * num_channels) * inner_div_nvec;
vec_gradOutput.aligned = gradOutput_aligned[idx];
vec_input.aligned = input_aligned[idx];
#pragma unroll
for (int j = 0; j < nvec; ++j) {
sum_beta += static_cast<AType>(vec_gradOutput.separate[j]);
sum_gamma += static_cast<AType>(vec_gradOutput.separate[j]) *
(static_cast<AType>(vec_input.separate[j]) - mean);
}
if (flags & (WRITE_DATA_FLAG | ADDTO_DATA_FLAG)) {
// Gradient to input
#pragma unroll
for (int j = 0; j < nvec; ++j) {
vec_gradOutput.separate[j] *= invstd * gamma;
}
if (flags & ADDTO_DATA_FLAG) {
vec_input.aligned = gradInput_aligned[idx];
#pragma unroll
for (int j = 0; j < nvec; ++j) {
vec_gradOutput.separate[j] += vec_input.separate[j];
}
}
gradInput_aligned[idx] = vec_gradOutput.aligned;
}
}
sum_gamma =
common::cuda::reduce<NTHREADS, false>(sum_gamma, [](AType a, AType b) { return a + b; });
sum_beta =
common::cuda::reduce<NTHREADS, false>(sum_beta, [](AType a, AType b) { return a + b; });
if (threadIdx.x == 0) {
if (flags & (WRITE_GAMMA_FLAG | ADDTO_GAMMA_FLAG)) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
if (flags & WRITE_GAMMA_FLAG) {
gradWeight[my_channel] = sum_gamma * invstd;
} else {
gradWeight[my_channel] += sum_gamma * invstd;
}
} else {
if (flags & WRITE_GAMMA_FLAG) {
gradWeight[my_channel] = 0;
}
}
}
if (flags & WRITE_BETA_FLAG) {
gradBias[my_channel] = sum_beta;
} else if (flags & ADDTO_BETA_FLAG) {
gradBias[my_channel] += sum_beta;
}
}
}
template <typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
static __global__ void BatchNormalizationBackwardKernel(const DeviceTensor input,
const DeviceTensor gradOutput,
DeviceTensor gradInput,
CUDATensors<DeviceTensor1> tensors,
const uint32_t flags,
const AccReal momentum,
const AccReal eps) {
int plane = blockIdx.x;
int N = gradOutput.OuterSize() * gradOutput.InnerSize();
AccReal mean, invstd;
mean = ScalarConvert<DType, AccReal>::to(tensors.saveMean[plane]);
invstd = tensors.saveInvStd[plane];
const AccReal weightVal = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(tensors.weight[plane])
: AccReal(1);
const AccReal norm = AccReal(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
GradOp<DType, AccReal, DeviceTensor> g(mean, input, gradOutput);
Float2<DType, AccReal> res =
reduce<Float2<DType, AccReal>, GradOp<DType, AccReal, DeviceTensor>, DeviceTensor>(
g, gradOutput, plane);
const AccReal gradOutputSum = res.v1;
const AccReal dotP = res.v2;
const AccReal gradMean = gradOutputSum * norm;
const AccReal projScale = dotP * norm * invstd * invstd;
const AccReal gradScale = invstd * weightVal;
if (threadIdx.x == 0) {
const AccReal localVariance = invstd_to_variance(tensors.saveInvStd[plane], eps);
const AccReal localMean = tensors.saveMean[plane];
// update running averages
tensors.runningMean[plane] =
tensors.runningMean[plane] * momentum + localMean * (AccReal(1) - momentum);
tensors.runningVar[plane] =
tensors.runningVar[plane] * momentum + localVariance * (AccReal(1) - momentum);
}
if (gradInput.Size() > 0 && (flags & (WRITE_DATA_FLAG | ADDTO_DATA_FLAG)) != 0) {
const bool grad_write = flags & WRITE_DATA_FLAG;
if (grad_write) {
for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) {
const DType gradOut = gradOutput.get_ref(batch, plane, x);
const DType inp = input.get_ref(batch, plane, x);
const AccReal proj = (inp - mean) * projScale;
gradInput.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to((gradOut - proj - gradMean) * gradScale);
}
}
} else {
// grad addto
for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) {
const DType gradOut = gradOutput.get_ref(batch, plane, x);
const DType inp = input.get_ref(batch, plane, x);
const AccReal proj = (inp - mean) * projScale;
gradInput.get_ref(batch, plane, x) +=
ScalarConvert<AccReal, DType>::to((gradOut - proj - gradMean) * gradScale);
}
}
}
}
if (tensors.gradWeight.numElements() > 0 && threadIdx.x == 0 &&
(flags & (WRITE_GAMMA_FLAG | ADDTO_GAMMA_FLAG)) != 0) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
if (flags & WRITE_GAMMA_FLAG)
tensors.gradWeight[plane] = ScalarConvert<AccReal, DType>::to(dotP * invstd);
else
tensors.gradWeight[plane] += ScalarConvert<AccReal, DType>::to(dotP * invstd);
} else {
tensors.gradWeight[plane] = DType(0);
}
}
if (tensors.gradBias.numElements() > 0 && threadIdx.x == 0 &&
(flags & (WRITE_BETA_FLAG | ADDTO_BETA_FLAG)) != 0) {
if (flags & WRITE_BETA_FLAG)
tensors.gradBias[plane] = ScalarConvert<AccReal, DType>::to(gradOutputSum);
else
tensors.gradBias[plane] += ScalarConvert<AccReal, DType>::to(gradOutputSum);
}
}
template <typename DType, int Dim>
struct DeviceTensor {
public:
inline DeviceTensor() {}
inline DeviceTensor(DType* p, const int* size) : dptr_(p) {
for (int i = 0; i < Dim; ++i) {
size_[i] = size ? size[i] : 0;
}
}
MSHADOW_XINLINE unsigned getSize(const int i) const {
return size_[i];
}
MSHADOW_XINLINE int numElements() const {
int n = 1;
for (int i = 0; i < Dim; ++i) {
n *= size_[i];
}
return n;
}
MSHADOW_XINLINE DType& operator()(const size_t batch, const size_t plane, const size_t x) const {
int offset = 0;
offset *= size_[0];
offset += batch;
offset *= size_[1];
offset += plane;
offset *= size_[2];
offset += x;
return *(const_cast<DType*>(dptr_ + offset));
}
MSHADOW_XINLINE DType& operator[](const size_t x) const {
return *(dptr_ + x);
}
MSHADOW_XINLINE size_t InnerSize() const {
size_t sz = 1;
for (size_t i = 2; i < Dim; ++i) {
sz *= size_[i];
}
return sz;
}
MSHADOW_XINLINE size_t ChannelCount() const {
return size_[1];
}
DType* dptr_;
int size_[Dim];
};
template <typename DType, int Dim>
static DeviceTensor<DType, Dim> devicetensor(const TBlob& blob) {
CHECK_EQ(blob.type_flag_, mshadow::DataType<DType>::kFlag);
DType* data = blob.dptr<DType>();
const int inDim = blob.shape_.ndim();
if (inDim == Dim) {
DeviceTensor<DType, Dim> tensor(data, nullptr);
for (int i = 0; i < Dim; ++i) {
tensor.size_[i] = blob.size(i);
}
return tensor;
}
// View in which the last dimensions are collapsed or expanded as needed
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = blob.size(i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= blob.size(i);
}
}
return DeviceTensor<DType, Dim>(data, &size[0]);
}
#define DeviceTensor1 DeviceTensor<AccReal, 1>
using namespace mxnet::op;
template <typename DType, typename AccReal>
static void BatchNormalizationUpdateOutput(mshadow::Stream<gpu>* s,
const OpContext& ctx,
const BatchNormParam& param,
const std::vector<TBlob>& in_data,
const std::vector<TBlob>& out_data,
const std::vector<TBlob>& aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnorm::BNTensor3<DType> input =
batchnorm::BNTensor3<DType>(in_data[batchnorm::kData], param.axis);
batchnorm::BNTensor3<DType> output =
batchnorm::BNTensor3<DType>(out_data[batchnorm::kOut], param.axis);
DeviceTensor1 weight = devicetensor<AccReal, 1>(in_data[batchnorm::kGamma]);
DeviceTensor1 bias = devicetensor<AccReal, 1>(in_data[batchnorm::kBeta]);
DeviceTensor1 runningMean = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingMean]);
DeviceTensor1 runningVar = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingVar]);
DeviceTensor1 saveMean = devicetensor<AccReal, 1>(out_data[batchnorm::kMean]);
DeviceTensor1 saveInvStd = devicetensor<AccReal, 1>(out_data[batchnorm::kVar]);
DCHECK_GT(weight.numElements(), 0);
if ((flags & IS_TRAINING_FLAG) == 0 || (flags & USE_GLOBAL_STATS_FLAG) != 0) {
AccReal* bias_ptr = bias.numElements() > 0 ? bias.dptr_ : nullptr;
AccReal* gamma_ptr = weight.numElements() > 0 ? weight.dptr_ : nullptr;
int nvec = sizeof(double) / sizeof(DType);
index_t size = input.InnerSize() * input.OuterSize() * input.ChannelCount();
index_t aligned_size = ((size + nvec - 1) / nvec) * nvec;
index_t blocks =
::min((size + nvec * inference_forward_threads - 1) / (nvec * inference_forward_threads),
static_cast<index_t>(512));
if (input.ChannelCount() < shmem_elements) {
hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInferenceKernel<DType, AccReal, double, true>)
, dim3(blocks), dim3(inference_forward_threads), 0, mshadow::Stream<gpu>::GetStream(s),
input.dptr_,
output.dptr_,
aligned_size,
input.OuterSize(),
input.ChannelCount(),
input.InnerSize(),
runningMean.dptr_,
runningVar.dptr_,
saveMean.dptr_,
saveInvStd.dptr_,
gamma_ptr,
bias_ptr,
eps,
flags);
} else {
hipLaunchKernelGGL(( BatchNormalizationUpdateOutputInferenceKernel<DType, AccReal, double, false>)
, dim3(blocks), dim3(inference_forward_threads), 0, mshadow::Stream<gpu>::GetStream(s),
input.dptr_,
output.dptr_,
aligned_size,
input.OuterSize(),
input.ChannelCount(),
input.InnerSize(),
runningMean.dptr_,
runningVar.dptr_,
saveMean.dptr_,
saveInvStd.dptr_,
gamma_ptr,
bias_ptr,
eps,
flags);
}
} else {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(input.InnerSize()));
hipLaunchKernelGGL(( BatchNormalizationUpdateOutputKernel<DType, AccReal, DeviceTensor1, batchnorm::BNTensor3<DType>>)
, dim3(blocks), dim3(threads), 0, mshadow::Stream<gpu>::GetStream(s), input,
output,
weight,
bias,
eps,
momentum,
runningMean,
runningVar,
saveMean,
saveInvStd,
flags);
}
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationUpdateOutput);
}
template <typename DType, typename AccReal>
static void BatchNormalizationBackward(mshadow::Stream<gpu>* s,
const OpContext& ctx,
const BatchNormParam& param,
const std::vector<TBlob>& out_grad,
const std::vector<TBlob>& in_data,
const std::vector<TBlob>& out_data,
const std::vector<TBlob>& in_grad,
const std::vector<TBlob>& aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnorm::BNTensor3<DType> input =
batchnorm::BNTensor3<DType>(in_data[batchnorm::kData], param.axis);
batchnorm::BNTensor3<DType> gradOutput =
batchnorm::BNTensor3<DType>(out_grad[batchnorm::kOut], param.axis);
batchnorm::BNTensor3<DType> gradInput =
batchnorm::BNTensor3<DType>(in_grad[batchnorm::kData], param.axis);
CHECK_EQ(gradOutput.Size(), gradInput.Size());
CUDATensors<DeviceTensor1> tensors;
tensors.gradWeight = devicetensor<AccReal, 1>(in_grad[batchnorm::kGamma]);
tensors.gradBias = devicetensor<AccReal, 1>(in_grad[batchnorm::kBeta]);
tensors.weight = devicetensor<AccReal, 1>(in_data[batchnorm::kGamma]);
tensors.runningMean = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingMean]);
tensors.runningVar = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingVar]);
tensors.saveMean = devicetensor<AccReal, 1>(out_data[batchnorm::kMean]);
tensors.saveInvStd = devicetensor<AccReal, 1>(out_data[batchnorm::kVar]);
DCHECK_GT(tensors.weight.numElements(), 0);
const bool is_train_and_not_global_stats =
(flags & IS_TRAINING_FLAG) != 0 && (flags & USE_GLOBAL_STATS_FLAG) == 0;
if (is_train_and_not_global_stats) {
dim3 blocks(gradOutput.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(gradOutput.InnerSize()));
hipLaunchKernelGGL(( BatchNormalizationBackwardKernel<DType, AccReal, DeviceTensor1, batchnorm::BNTensor3<DType>>)
, dim3(blocks), dim3(threads), 0, mshadow::Stream<gpu>::GetStream(s),
input, gradOutput, gradInput, tensors, flags, momentum, eps);
} else {
uint32_t flags_copy = flags;
if (gradInput.Size() <= 0) {
flags_copy = (flags_copy & ~WRITE_DATA_FLAG);
}
if (tensors.gradWeight.numElements() <= 0) {
flags_copy = (flags_copy & ~WRITE_GAMMA_FLAG);
}
if (tensors.gradBias.numElements() <= 0) {
flags_copy = (flags_copy & ~WRITE_BETA_FLAG);
}
AccReal* gamma = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0)
? tensors.weight.dptr_
: nullptr;
if (param.axis == -1 || param.axis == in_data[batchnorm::kData].shape_.ndim() - 1) {
const int C = gradOutput.ChannelCount();
int ltype = mxnet::common::cuda::get_load_type(C * sizeof(DType));
const int M = gradOutput.OuterSize();
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
const unsigned int blocks_x =
ceil_div(C * sizeof(DType), mxnet::common::cuda::warp_size * sizeof(LType));
const unsigned int preferred_number_of_blocks =
2 * MultiprocessorCount(ctx.run_ctx.ctx.dev_id);
const unsigned int blocks_y = ::max(preferred_number_of_blocks / blocks_x, 1u);
const dim3 n_blocks = {blocks_x, blocks_y, 1};
auto scratch_space = ctx.requested[batchnorm::kTempSpace].get_space_typed<gpu, 1, AccReal>(
mshadow::Shape1(C * blocks_y * 2), s);
auto stream = mshadow::Stream<gpu>::GetStream(s);
constexpr int nthreads_phase1 = 512;
constexpr int nthreads_phase2 = 128;
hipLaunchKernelGGL(( FrozenBatchNormalizationBackwardKernelCLastPhase1<nthreads_phase1, DType, AccReal, LType>)
, dim3(n_blocks), dim3(nthreads_phase1), 0, stream, input.dptr_,
gradOutput.dptr_,
scratch_space.dptr_,
gradInput.dptr_,
gamma,
tensors.runningMean.dptr_,
tensors.runningVar.dptr_,
M,
C,
eps,
flags_copy);
const int nblocks_phase2 = ceil_div(C, nthreads_phase2);
hipLaunchKernelGGL(( FrozenBatchNormalizationBackwardKernelCLastPhase2<AccReal>)
, dim3(nblocks_phase2), dim3(nthreads_phase2), 0, stream, scratch_space.dptr_,
tensors.runningVar.dptr_,
tensors.gradWeight.dptr_,
tensors.gradBias.dptr_,
C,
blocks_y,
eps,
flags_copy);
});
} else {
dim3 blocks(gradOutput.ChannelCount());
int ltype = mxnet::common::cuda::get_load_type(gradOutput.InnerSize() * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
constexpr int nvec = sizeof(LType) > sizeof(DType) ? sizeof(LType) / sizeof(DType) : 1;
const index_t NHW_div_nvec = gradOutput.OuterSize() * gradOutput.InnerSize() / nvec;
constexpr int threads = 512;
hipLaunchKernelGGL(( FrozenBatchNormalizationBackwardKernel<threads, DType, AccReal, LType>)
, dim3(blocks), dim3(threads), 0, mshadow::Stream<gpu>::GetStream(s), input.dptr_,
gradOutput.dptr_,
gradInput.dptr_,
tensors.gradWeight.dptr_,
tensors.gradBias.dptr_,
gamma,
tensors.runningMean.dptr_,
tensors.runningVar.dptr_,
gradOutput.OuterSize(),
gradOutput.InnerSize(),
gradOutput.ChannelCount(),
NHW_div_nvec,
eps,
flags_copy);
});
}
}
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationBackward);
}
} // namespace cuda
} // namespace batchnorm
template <typename xpu, typename DType, typename AccReal>
static inline uint32_t SetupFlags(const OpContext& ctx,
const BatchNormParam& params,
const std::vector<OpReqType>& req) {
uint32_t flags = 0;
flags |= ctx.is_train ? IS_TRAINING_FLAG : 0;
flags |= params.fix_gamma ? FIX_GAMMA_FLAG : 0;
flags |= params.use_global_stats ? USE_GLOBAL_STATS_FLAG : 0;
if (IsBNWriting(req[batchnorm::kData])) {
flags |= WRITE_DATA_FLAG;
} else if (req[batchnorm::kData] == kAddTo) {
flags |= ADDTO_DATA_FLAG;
}
if (IsBNWriting(req[batchnorm::kGamma])) {
flags |= WRITE_GAMMA_FLAG;
} else if (req[batchnorm::kGamma] == kAddTo) {
flags |= ADDTO_GAMMA_FLAG;
}
if (IsBNWriting(req[batchnorm::kBeta])) {
flags |= WRITE_BETA_FLAG;
} else if (req[batchnorm::kBeta] == kAddTo) {
flags |= ADDTO_BETA_FLAG;
}
return flags;
}
/*! \brief Forward batch-norm pass on GPU */
template <typename xpu, typename DType, typename AccReal>
void BatchNormForwardImpl(mshadow::Stream<gpu>* stream,
const OpContext& ctx,
const BatchNormParam& param_,
const std::vector<TBlob>& in_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& out_data,
const std::vector<TBlob>& aux_states) {
batchnorm::cuda::BatchNormalizationUpdateOutput<DType, AccReal>(
stream,
ctx,
param_,
in_data,
out_data,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoForward_gpu);
}
/*! \brief Backward batch-norm pass on GPU */
template <typename xpu, typename DType, typename AccReal>
void BatchNormBackwardImpl(mshadow::Stream<gpu>* stream,
const OpContext& ctx,
const BatchNormParam& param_,
const std::vector<TBlob>& out_grad,
const std::vector<TBlob>& in_data,
const std::vector<TBlob>& out_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& in_grad,
const std::vector<TBlob>& aux_states) {
batchnorm::cuda::BatchNormalizationBackward<DType, AccReal>(
stream,
ctx,
param_,
out_grad,
in_data,
out_data,
in_grad,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoBackward_gpu);
}
template <>
void BatchNormCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
BatchNormParam param = nnvm::get<BatchNormParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 5U);
std::vector<TBlob> in_data(inputs.begin(), inputs.begin() + 3);
std::vector<TBlob> aux_states(inputs.begin() + 3, inputs.end());
int dtype = inputs[0].type_flag_;
mxnet::TShape shape = inputs[0].shape_;
param.axis = mxnet::op::batchnorm::GetRealAxis(shape, param.axis);
#if MXNET_USE_CUDNN == 1
if (!param.use_global_stats && !param.cudnn_off &&
CudnnBatchNormSupports(param, inputs[batchnorm::kData])) {
CudnnBatchNormForward(param, ctx, inputs, req, outputs);
} else {
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormForward<gpu, DType, AccReal>(ctx, param, in_data, req, outputs, aux_states);
})
}
#else
MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
BatchNormForward<gpu, DType, AccReal>(ctx, param, in_data, req, outputs, aux_states);
});
#endif
}
template <>
void BatchNormGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 8U);
BatchNormParam param = nnvm::get<BatchNormParam>(attrs.parsed);
int dtype = inputs[0].type_flag_;
mxnet::TShape shape = inputs[0].shape_;
param.axis = mxnet::op::batchnorm::GetRealAxis(shape, param.axis);
#if MXNET_USE_CUDNN == 1
if (!param.use_global_stats && !param.cudnn_off &&
CudnnBatchNormSupports(param, inputs[3 + batchnorm::kData])) {
CudnnBatchNormBackward(param, ctx, inputs, req, outputs);
} else {
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormBackward<gpu, DType, AccReal>(ctx, param, inputs, req, outputs);
})
}
#else
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormBackward<gpu, DType, AccReal>(ctx, param, inputs, req, outputs);
});
#endif
}
NNVM_REGISTER_OP(BatchNorm).set_attr<FCompute>("FCompute<gpu>", BatchNormCompute<gpu>);
NNVM_REGISTER_OP(_backward_BatchNorm)
.set_attr<FCompute>("FCompute<gpu>", BatchNormGradCompute<gpu>);
} // namespace op
} // namespace mxnet
| 66697acf3ae2e2cc2e5ac2d6b61b3a2c98ddcaec.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file batch_norm.cu
* \brief CUDA Batch Normalization code
* \author Chris Olivier, Bing Xu, Da Zheng
* Adapted from Torch
*/
#include <cuda_runtime_api.h>
#include <algorithm>
#include "batch_norm-inl.h"
#include "../../common/cuda/utils.h"
#define WRITE_DATA_FLAG 1
#define WRITE_GAMMA_FLAG 2
#define WRITE_BETA_FLAG 4
#define FIX_GAMMA_FLAG 8
#define IS_TRAINING_FLAG 16
#define USE_GLOBAL_STATS_FLAG 32
#define ADDTO_DATA_FLAG (1 << 6)
#define ADDTO_GAMMA_FLAG (1 << 7)
#define ADDTO_BETA_FLAG (1 << 8)
#if MXNET_USE_CUDNN == 1
#include "./cudnn/cudnn_batch_norm.h"
#endif
#include "../../../include/mxnet/tensor_blob.h"
using namespace mxnet;
namespace {
/*! \brief inverse standard deviation <-> variance */
template <typename DType, typename AccReal>
MSHADOW_XINLINE AccReal variance_to_invstd(DType var, AccReal eps) {
return rsqrtf(static_cast<AccReal>(var) + eps);
}
template <>
MSHADOW_XINLINE double variance_to_invstd(double var, double eps) {
return rsqrt(var + eps);
}
template <typename AccReal>
MSHADOW_XINLINE AccReal invstd_to_variance(AccReal invstd, AccReal eps) {
return static_cast<AccReal>(1.0) / (invstd * invstd) - eps;
}
template <>
MSHADOW_XINLINE double invstd_to_variance(double invstd, double eps) {
return 1.0 / (invstd * invstd) - eps;
}
} // namespace
namespace mxnet {
namespace op {
namespace batchnorm {
namespace cuda {
static const unsigned WARP_SIZE = 32;
// The maximum number of threads in a block
static const unsigned MAX_BLOCK_SIZE = 512U;
template <typename In, typename Out>
struct ScalarConvert {
static __host__ __device__ __forceinline__ Out to(const In v) {
return (Out)v;
}
};
// Number of threads in a block given an input size up to MAX_BLOCK_SIZE
static unsigned getNumThreads(int nElem) {
unsigned threadSizes[4] = {32, 64, 128, 256};
for (int i = 0; i != 4; ++i) {
if (static_cast<unsigned>(nElem) <= threadSizes[i]) {
return threadSizes[i];
}
}
return MAX_BLOCK_SIZE;
}
// Returns the index of the most significant 1 bit in `val`.
__device__ __forceinline__ int getMSB(int val) {
return 31 - __clz(val);
}
template <typename DType, typename AccReal>
struct Float2 {
AccReal v1, v2;
__device__ Float2() {}
__device__ Float2(DType v1, DType v2)
: v1(ScalarConvert<DType, AccReal>::to(v1)), v2(ScalarConvert<DType, AccReal>::to(v2)) {}
__device__ Float2(DType v)
: v1(ScalarConvert<DType, AccReal>::to(v)), v2(ScalarConvert<DType, AccReal>::to(v)) {}
__device__ Float2(int v)
: v1(ScalarConvert<int, AccReal>::to(v)), v2(ScalarConvert<int, AccReal>::to(v)) {}
__device__ Float2& operator+=(const Float2& a) {
v1 += a.v1;
v2 += a.v2;
return *this;
}
};
template <typename DType, typename AccReal, typename DeviceTensor>
struct SumOp {
__device__ SumOp(const DeviceTensor t) : tensor(t) {}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
return ScalarConvert<DType, AccReal>::to(tensor.get_ref(batch, plane, n));
}
const DeviceTensor tensor;
};
template <typename DType, typename AccReal, typename DeviceTensor>
struct VarOp {
__device__ VarOp(AccReal m, const DeviceTensor t) : mean(m), tensor(t) {}
__device__ __forceinline__ AccReal operator()(int batch, int plane, int n) {
DType val = tensor.get_ref(batch, plane, n);
return (val - mean) * (val - mean);
}
const AccReal mean;
const DeviceTensor tensor;
};
template <typename DType, typename AccReal, typename DeviceTensor>
struct GradOp {
__device__ GradOp(AccReal m, const DeviceTensor i, const DeviceTensor g)
: mean(m), input(i), gradOutput(g) {}
__device__ __forceinline__ Float2<DType, AccReal> operator()(int batch, int plane, int n) {
const DType g = gradOutput.get_ref(batch, plane, n);
const DType c = ScalarConvert<AccReal, DType>::to(input.get_ref(batch, plane, n) - mean);
return Float2<DType, AccReal>(g, g * c);
}
const AccReal mean;
const DeviceTensor input;
const DeviceTensor gradOutput;
};
#if CUDA_VERSION >= 9000
#define FULLMASK 0xFFFFFFFF
#define __shfl_xor(...) __shfl_xor_sync(FULLMASK, __VA_ARGS__)
#endif
// Sum across all threads within a warp
template <typename T>
static __device__ __forceinline__ T warpSum(T val) {
#if __CUDA_ARCH__ >= 300
for (int i = 0; i < getMSB(WARP_SIZE); ++i) {
val += __shfl_xor(val, 1 << i, WARP_SIZE);
}
#else
__shared__ T values[MAX_BLOCK_SIZE];
values[threadIdx.x] = val;
__threadfence_block();
const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE;
for (int i = 1; i < WARP_SIZE; i++) {
val += values[base + ((i + threadIdx.x) % WARP_SIZE)];
}
#endif
return val;
}
template <typename DType, typename AccReal>
static __device__ __forceinline__ Float2<DType, AccReal> warpSum(Float2<DType, AccReal> value) {
value.v1 = warpSum(value.v1);
value.v2 = warpSum(value.v2);
return value;
}
// Sum across (batch, x/y/z) applying Op() pointwise
template <typename T, typename Op, typename DeviceTensor>
static __device__ T reduce(Op op, DeviceTensor tensor, int plane) {
T sum = (T)0;
for (int batch = 0; batch < tensor.OuterSize(); ++batch) {
for (int x = threadIdx.x; x < tensor.InnerSize(); x += blockDim.x) {
sum += op(batch, plane, x);
}
}
// sum over NumThreads within a warp
sum = warpSum(sum);
// 'transpose', and reduce within warp again
__shared__ T shared[32];
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
shared[threadIdx.x / WARP_SIZE] = sum;
}
if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) {
// zero out the other entries in shared
shared[threadIdx.x] = (T)0;
}
__syncthreads();
if (threadIdx.x / WARP_SIZE == 0) {
sum = warpSum(shared[threadIdx.x]);
if (threadIdx.x == 0) {
shared[0] = sum;
}
}
__syncthreads();
// Everyone picks it up, should be broadcast into the whole gradInput
return shared[0];
}
namespace {
constexpr int inference_forward_threads = 512;
constexpr int shmem_elements = 1536;
} // namespace
template <typename DType, typename AType, typename LType, bool small_num_channels>
__launch_bounds__(inference_forward_threads) __global__
void BatchNormalizationUpdateOutputInferenceKernel(const DType* input,
DType* output,
const index_t size,
const index_t outer_size,
const index_t num_channels,
const index_t inner_size,
const AType* runningMean,
const AType* runningVar,
AType* saveMean,
AType* saveInvStd,
AType* weight,
AType* bias,
const AType epsilon,
const uint32_t flags) {
constexpr int nvec = sizeof(LType) / sizeof(DType);
__shared__ AType saved_invstd[shmem_elements];
__shared__ AType saved_mean[shmem_elements];
__shared__ AType saved_weight[shmem_elements];
__shared__ AType saved_bias[shmem_elements];
union vectorized_loader {
LType aligned;
DType separate[nvec]; // NOLINT(*)
__device__ inline vectorized_loader() {}
__device__ inline ~vectorized_loader() {}
} scratch;
if (small_num_channels) {
for (int i = threadIdx.x; i < num_channels; i += blockDim.x) {
saved_invstd[i] = variance_to_invstd(runningVar[i], epsilon);
saved_mean[i] = runningMean[i];
saved_weight[i] = (weight != nullptr && (flags & FIX_GAMMA_FLAG) == 0) ? weight[i] : 1;
saved_bias[i] = (bias != nullptr) ? bias[i] : 0;
}
__syncthreads();
}
const index_t tid = threadIdx.x + blockIdx.x * blockDim.x;
const index_t stride = blockDim.x * gridDim.x;
const LType* input_aligned = reinterpret_cast<const LType*>(input);
LType* output_aligned = reinterpret_cast<LType*>(output);
for (index_t i = tid; i < size / nvec; i += stride) {
scratch.aligned = input_aligned[i];
const index_t my_channel_base = (nvec * i) % (inner_size * num_channels);
#pragma unroll
for (int j = 0; j < nvec; ++j) {
index_t my_channel = (my_channel_base + j) / inner_size;
if (my_channel >= num_channels)
my_channel = my_channel % num_channels;
AType current_input = static_cast<AType>(scratch.separate[j]);
AType invstd = small_num_channels ? saved_invstd[my_channel]
: variance_to_invstd(runningVar[my_channel], epsilon);
AType mean = small_num_channels ? saved_mean[my_channel] : runningMean[my_channel];
AType gamma =
small_num_channels
? saved_weight[my_channel]
: ((weight != nullptr && (flags & FIX_GAMMA_FLAG) == 0) ? weight[my_channel] : 1);
AType beta =
small_num_channels ? saved_bias[my_channel] : ((bias != nullptr) ? bias[my_channel] : 0);
current_input = gamma * (current_input - mean) * invstd + beta;
scratch.separate[j] = current_input;
}
output_aligned[i] = scratch.aligned;
if (i < num_channels) {
saveMean[i] = runningMean[i];
saveInvStd[i] = variance_to_invstd(runningVar[i], epsilon);
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0 && weight != nullptr) {
weight[i] = 1;
}
}
}
}
template <typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
__global__ void BatchNormalizationUpdateOutputKernel(DeviceTensor input,
DeviceTensor output,
DeviceTensor1 weight,
DeviceTensor1 bias,
const AccReal epsilon,
const AccReal momentum,
DeviceTensor1 runningMean,
DeviceTensor1 runningVar,
DeviceTensor1 saveMean,
DeviceTensor1 saveInvStd,
const uint32_t flags) {
const int plane = blockIdx.x;
const int N = input.OuterSize() * input.InnerSize();
const AccReal norm = AccReal(1) / N;
// Compute the mean and variance across (batch, x/y/z)
const AccReal mean =
reduce<AccReal>(SumOp<DType, AccReal, DeviceTensor>(input), input, plane) * norm;
__syncthreads();
const AccReal varN =
reduce<AccReal>(VarOp<DType, AccReal, DeviceTensor>(mean, input), input, plane);
AccReal invStd = 0;
if (varN != AccReal(0) || epsilon != AccReal(0)) {
invStd = AccReal(1.0) / sqrt(varN * norm + epsilon);
}
// Save the mean, variance, and moving averages
if (threadIdx.x == 0) {
// For one item (0th) per plane (channel), write the per-channel data (ie mean, variance, etc)
// Momentum based writeback
saveMean[plane] = ScalarConvert<AccReal, DType>::to(mean);
saveInvStd[plane] = invStd;
if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0 &&
weight.numElements() > 0) {
weight[plane] = AccReal(1);
}
}
// Write normalized and update the output
const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(weight[plane])
: ScalarConvert<int, AccReal>::to(1);
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane])
: ScalarConvert<int, AccReal>::to(0);
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
const DType inp = input.get_ref(batch, plane, x);
output.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to(gamma * (inp - mean) * invStd + beta);
}
}
}
template <typename DeviceTensor1>
struct CUDATensors {
DeviceTensor1 gradWeight;
DeviceTensor1 gradBias;
DeviceTensor1 weight;
DeviceTensor1 runningMean;
DeviceTensor1 runningVar;
DeviceTensor1 saveMean;
DeviceTensor1 saveInvStd;
};
namespace {
inline int ceil_div(int x, int y) {
return (x + y - 1) / y;
}
} // namespace
template <int NTHREADS, typename DType, typename AType, typename LType>
__global__ void FrozenBatchNormalizationBackwardKernelCLastPhase1(const DType* input,
const DType* gradOutput,
AType* temp_space,
DType* gradInput,
const AType* weight,
const AType* runningMean,
const AType* runningVar,
const index_t outer,
const index_t num_channels,
const AType eps,
const uint32_t flags) {
using mxnet::common::cuda::warp_size;
constexpr int num_warps = NTHREADS / warp_size;
constexpr int nvec = sizeof(LType) >= sizeof(DType) ? sizeof(LType) / sizeof(DType) : 1;
const size_t stride = num_channels / nvec;
union vectorized_loader {
LType aligned;
DType separate[nvec]; // NOLINT(*)
__device__ inline vectorized_loader() {}
__device__ inline ~vectorized_loader() {}
};
vectorized_loader vec_input, vec_gradOutput;
__shared__ AType scratch[NTHREADS * 2 * nvec];
AType* my_values_gamma = &(scratch[threadIdx.x * nvec]);
AType* my_values_beta = &(scratch[(NTHREADS + threadIdx.x) * nvec]);
AType sum_gamma[nvec]; // NOLINT(*)
AType sum_beta[nvec]; // NOLINT(*)
#pragma unroll
for (int i = 0; i < nvec; ++i) {
sum_gamma[i] = 0;
sum_beta[i] = 0;
}
const size_t offset = blockIdx.x * warp_size;
const int my_warp = threadIdx.x / warp_size;
const int thread_idx_in_warp = threadIdx.x % warp_size;
AType invstd[nvec]; // NOLINT(*)
AType mean[nvec]; // NOLINT(*)
AType gamma[nvec]; // NOLINT(*)
size_t channel_offset = (offset + thread_idx_in_warp) * nvec;
if (channel_offset < num_channels) {
#pragma unroll
for (int i = 0; i < nvec; ++i) {
invstd[i] = variance_to_invstd(runningVar[channel_offset + i], eps);
mean[i] = runningMean[channel_offset + i];
gamma[i] = weight != nullptr ? weight[channel_offset + i] : 1;
}
}
const LType* aligned_gradOutput = reinterpret_cast<const LType*>(gradOutput);
const LType* aligned_input = reinterpret_cast<const LType*>(input);
LType* gradInput_aligned = reinterpret_cast<LType*>(gradInput);
const int rows_per_block = (outer + gridDim.y - 1) / gridDim.y;
const size_t start_row = my_warp + rows_per_block * blockIdx.y;
const size_t end_row = min(outer, static_cast<index_t>(rows_per_block * (blockIdx.y + 1)));
if (offset + thread_idx_in_warp < stride) {
for (size_t i = start_row; i < end_row; i += num_warps) {
const index_t idx = i * stride + offset + thread_idx_in_warp;
vec_gradOutput.aligned = aligned_gradOutput[idx];
vec_input.aligned = aligned_input[idx];
#pragma unroll
for (int j = 0; j < nvec; ++j) {
sum_beta[j] += static_cast<AType>(vec_gradOutput.separate[j]);
sum_gamma[j] += static_cast<AType>(vec_gradOutput.separate[j]) *
(static_cast<AType>(vec_input.separate[j]) - mean[j]);
}
if (flags & (WRITE_DATA_FLAG | ADDTO_DATA_FLAG)) {
// Gradient to input
#pragma unroll
for (int j = 0; j < nvec; ++j) {
vec_gradOutput.separate[j] *= invstd[j] * gamma[j];
}
if (flags & ADDTO_DATA_FLAG) {
vec_input.aligned = gradInput_aligned[idx];
#pragma unroll
for (int j = 0; j < nvec; ++j) {
vec_gradOutput.separate[j] += vec_input.separate[j];
}
}
gradInput_aligned[idx] = vec_gradOutput.aligned;
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < nvec; ++i) {
my_values_gamma[i] = sum_gamma[i];
my_values_beta[i] = sum_beta[i];
}
__syncthreads();
for (int i = num_warps / 2; i > 0; i /= 2) {
if (my_warp < i) {
const int shared_offset = nvec * i * warp_size;
#pragma unroll
for (int j = 0; j < nvec; ++j) {
my_values_gamma[j] += my_values_gamma[j + shared_offset];
my_values_beta[j] += my_values_beta[j + shared_offset];
}
}
__syncthreads();
}
if (threadIdx.x < min(warp_size * nvec, static_cast<int>(num_channels - nvec * offset))) {
const size_t offset_out = nvec * offset + blockIdx.y * num_channels;
const size_t offset_beta = gridDim.y * num_channels;
temp_space[offset_out + threadIdx.x] = scratch[threadIdx.x];
temp_space[offset_beta + offset_out + threadIdx.x] = scratch[NTHREADS * nvec + threadIdx.x];
}
}
template <typename AType>
__global__ void FrozenBatchNormalizationBackwardKernelCLastPhase2(const AType* temp_space,
const AType* runningVar,
AType* out_gamma,
AType* out_beta,
int lead_dim,
int n_blocks,
AType epsilon,
uint32_t flags) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < lead_dim) {
AType sum_gamma = 0;
AType sum_beta = 0;
for (int i = tid; i < lead_dim * n_blocks; i += lead_dim) {
sum_gamma += temp_space[i];
sum_beta += temp_space[i + lead_dim * n_blocks];
}
if (flags & (WRITE_GAMMA_FLAG | ADDTO_GAMMA_FLAG)) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
const AType invstd = variance_to_invstd(runningVar[tid], epsilon);
if (flags & WRITE_GAMMA_FLAG) {
out_gamma[tid] = sum_gamma * invstd;
} else {
out_gamma[tid] += sum_gamma * invstd;
}
} else {
if (flags & WRITE_GAMMA_FLAG) {
out_gamma[tid] = 0;
}
}
}
if (flags & WRITE_BETA_FLAG) {
out_beta[tid] = sum_beta;
} else if (flags & ADDTO_BETA_FLAG) {
out_beta[tid] += sum_beta;
}
}
}
template <int NTHREADS, typename DType, typename AType, typename LType>
__global__ void FrozenBatchNormalizationBackwardKernel(const DType* input,
const DType* gradOutput,
DType* gradInput,
AType* gradWeight,
AType* gradBias,
const AType* weight,
const AType* runningMean,
const AType* runningVar,
const index_t outer,
const index_t inner,
const index_t num_channels,
const index_t NHW_div_nvec,
const AType eps,
const uint32_t flags) {
const index_t my_channel = blockIdx.x;
const AType invstd = variance_to_invstd(runningVar[my_channel], eps);
const AType mean = runningMean[my_channel];
const AType gamma = weight != nullptr ? weight[my_channel] : 1;
constexpr int nvec = sizeof(LType) > sizeof(DType) ? sizeof(LType) / sizeof(DType) : 1;
union vectorized_loader {
LType aligned;
DType separate[nvec]; // NOLINT(*)
__device__ inline vectorized_loader() {}
__device__ inline ~vectorized_loader() {}
};
vectorized_loader vec_input, vec_gradOutput;
const LType* input_aligned = reinterpret_cast<const LType*>(input);
const LType* gradOutput_aligned = reinterpret_cast<const LType*>(gradOutput);
LType* gradInput_aligned = reinterpret_cast<LType*>(gradInput);
const index_t inner_div_nvec = inner / nvec;
AType sum_gamma = 0;
AType sum_beta = 0;
for (index_t i = threadIdx.x; i < NHW_div_nvec; i += blockDim.x) {
const index_t inner_idx = i % inner_div_nvec;
const index_t outer_idx = i / inner_div_nvec;
const index_t idx = inner_idx + (my_channel + outer_idx * num_channels) * inner_div_nvec;
vec_gradOutput.aligned = gradOutput_aligned[idx];
vec_input.aligned = input_aligned[idx];
#pragma unroll
for (int j = 0; j < nvec; ++j) {
sum_beta += static_cast<AType>(vec_gradOutput.separate[j]);
sum_gamma += static_cast<AType>(vec_gradOutput.separate[j]) *
(static_cast<AType>(vec_input.separate[j]) - mean);
}
if (flags & (WRITE_DATA_FLAG | ADDTO_DATA_FLAG)) {
// Gradient to input
#pragma unroll
for (int j = 0; j < nvec; ++j) {
vec_gradOutput.separate[j] *= invstd * gamma;
}
if (flags & ADDTO_DATA_FLAG) {
vec_input.aligned = gradInput_aligned[idx];
#pragma unroll
for (int j = 0; j < nvec; ++j) {
vec_gradOutput.separate[j] += vec_input.separate[j];
}
}
gradInput_aligned[idx] = vec_gradOutput.aligned;
}
}
sum_gamma =
common::cuda::reduce<NTHREADS, false>(sum_gamma, [](AType a, AType b) { return a + b; });
sum_beta =
common::cuda::reduce<NTHREADS, false>(sum_beta, [](AType a, AType b) { return a + b; });
if (threadIdx.x == 0) {
if (flags & (WRITE_GAMMA_FLAG | ADDTO_GAMMA_FLAG)) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
if (flags & WRITE_GAMMA_FLAG) {
gradWeight[my_channel] = sum_gamma * invstd;
} else {
gradWeight[my_channel] += sum_gamma * invstd;
}
} else {
if (flags & WRITE_GAMMA_FLAG) {
gradWeight[my_channel] = 0;
}
}
}
if (flags & WRITE_BETA_FLAG) {
gradBias[my_channel] = sum_beta;
} else if (flags & ADDTO_BETA_FLAG) {
gradBias[my_channel] += sum_beta;
}
}
}
template <typename DType, typename AccReal, typename DeviceTensor1, typename DeviceTensor>
static __global__ void BatchNormalizationBackwardKernel(const DeviceTensor input,
const DeviceTensor gradOutput,
DeviceTensor gradInput,
CUDATensors<DeviceTensor1> tensors,
const uint32_t flags,
const AccReal momentum,
const AccReal eps) {
int plane = blockIdx.x;
int N = gradOutput.OuterSize() * gradOutput.InnerSize();
AccReal mean, invstd;
mean = ScalarConvert<DType, AccReal>::to(tensors.saveMean[plane]);
invstd = tensors.saveInvStd[plane];
const AccReal weightVal = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0)
? ScalarConvert<DType, AccReal>::to(tensors.weight[plane])
: AccReal(1);
const AccReal norm = AccReal(1) / N;
// Compute two values across (batch, x/y/z) in one pass:
// 1. Sum(gradOutput)
// 2. DotProduct(input - mean, gradOutput)
GradOp<DType, AccReal, DeviceTensor> g(mean, input, gradOutput);
Float2<DType, AccReal> res =
reduce<Float2<DType, AccReal>, GradOp<DType, AccReal, DeviceTensor>, DeviceTensor>(
g, gradOutput, plane);
const AccReal gradOutputSum = res.v1;
const AccReal dotP = res.v2;
const AccReal gradMean = gradOutputSum * norm;
const AccReal projScale = dotP * norm * invstd * invstd;
const AccReal gradScale = invstd * weightVal;
if (threadIdx.x == 0) {
const AccReal localVariance = invstd_to_variance(tensors.saveInvStd[plane], eps);
const AccReal localMean = tensors.saveMean[plane];
// update running averages
tensors.runningMean[plane] =
tensors.runningMean[plane] * momentum + localMean * (AccReal(1) - momentum);
tensors.runningVar[plane] =
tensors.runningVar[plane] * momentum + localVariance * (AccReal(1) - momentum);
}
if (gradInput.Size() > 0 && (flags & (WRITE_DATA_FLAG | ADDTO_DATA_FLAG)) != 0) {
const bool grad_write = flags & WRITE_DATA_FLAG;
if (grad_write) {
for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) {
const DType gradOut = gradOutput.get_ref(batch, plane, x);
const DType inp = input.get_ref(batch, plane, x);
const AccReal proj = (inp - mean) * projScale;
gradInput.get_ref(batch, plane, x) =
ScalarConvert<AccReal, DType>::to((gradOut - proj - gradMean) * gradScale);
}
}
} else {
// grad addto
for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) {
for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) {
const DType gradOut = gradOutput.get_ref(batch, plane, x);
const DType inp = input.get_ref(batch, plane, x);
const AccReal proj = (inp - mean) * projScale;
gradInput.get_ref(batch, plane, x) +=
ScalarConvert<AccReal, DType>::to((gradOut - proj - gradMean) * gradScale);
}
}
}
}
if (tensors.gradWeight.numElements() > 0 && threadIdx.x == 0 &&
(flags & (WRITE_GAMMA_FLAG | ADDTO_GAMMA_FLAG)) != 0) {
if ((flags & FIX_GAMMA_FLAG) == 0) {
if (flags & WRITE_GAMMA_FLAG)
tensors.gradWeight[plane] = ScalarConvert<AccReal, DType>::to(dotP * invstd);
else
tensors.gradWeight[plane] += ScalarConvert<AccReal, DType>::to(dotP * invstd);
} else {
tensors.gradWeight[plane] = DType(0);
}
}
if (tensors.gradBias.numElements() > 0 && threadIdx.x == 0 &&
(flags & (WRITE_BETA_FLAG | ADDTO_BETA_FLAG)) != 0) {
if (flags & WRITE_BETA_FLAG)
tensors.gradBias[plane] = ScalarConvert<AccReal, DType>::to(gradOutputSum);
else
tensors.gradBias[plane] += ScalarConvert<AccReal, DType>::to(gradOutputSum);
}
}
template <typename DType, int Dim>
struct DeviceTensor {
public:
inline DeviceTensor() {}
inline DeviceTensor(DType* p, const int* size) : dptr_(p) {
for (int i = 0; i < Dim; ++i) {
size_[i] = size ? size[i] : 0;
}
}
MSHADOW_XINLINE unsigned getSize(const int i) const {
return size_[i];
}
MSHADOW_XINLINE int numElements() const {
int n = 1;
for (int i = 0; i < Dim; ++i) {
n *= size_[i];
}
return n;
}
MSHADOW_XINLINE DType& operator()(const size_t batch, const size_t plane, const size_t x) const {
int offset = 0;
offset *= size_[0];
offset += batch;
offset *= size_[1];
offset += plane;
offset *= size_[2];
offset += x;
return *(const_cast<DType*>(dptr_ + offset));
}
MSHADOW_XINLINE DType& operator[](const size_t x) const {
return *(dptr_ + x);
}
MSHADOW_XINLINE size_t InnerSize() const {
size_t sz = 1;
for (size_t i = 2; i < Dim; ++i) {
sz *= size_[i];
}
return sz;
}
MSHADOW_XINLINE size_t ChannelCount() const {
return size_[1];
}
DType* dptr_;
int size_[Dim];
};
template <typename DType, int Dim>
static DeviceTensor<DType, Dim> devicetensor(const TBlob& blob) {
CHECK_EQ(blob.type_flag_, mshadow::DataType<DType>::kFlag);
DType* data = blob.dptr<DType>();
const int inDim = blob.shape_.ndim();
if (inDim == Dim) {
DeviceTensor<DType, Dim> tensor(data, nullptr);
for (int i = 0; i < Dim; ++i) {
tensor.size_[i] = blob.size(i);
}
return tensor;
}
// View in which the last dimensions are collapsed or expanded as needed
int size[Dim];
for (int i = 0; i < Dim || i < inDim; ++i) {
if (i < Dim && i < inDim) {
size[i] = blob.size(i);
} else if (i < Dim) {
size[i] = 1;
} else {
size[Dim - 1] *= blob.size(i);
}
}
return DeviceTensor<DType, Dim>(data, &size[0]);
}
#define DeviceTensor1 DeviceTensor<AccReal, 1>
using namespace mxnet::op;
template <typename DType, typename AccReal>
static void BatchNormalizationUpdateOutput(mshadow::Stream<gpu>* s,
const OpContext& ctx,
const BatchNormParam& param,
const std::vector<TBlob>& in_data,
const std::vector<TBlob>& out_data,
const std::vector<TBlob>& aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnorm::BNTensor3<DType> input =
batchnorm::BNTensor3<DType>(in_data[batchnorm::kData], param.axis);
batchnorm::BNTensor3<DType> output =
batchnorm::BNTensor3<DType>(out_data[batchnorm::kOut], param.axis);
DeviceTensor1 weight = devicetensor<AccReal, 1>(in_data[batchnorm::kGamma]);
DeviceTensor1 bias = devicetensor<AccReal, 1>(in_data[batchnorm::kBeta]);
DeviceTensor1 runningMean = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingMean]);
DeviceTensor1 runningVar = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingVar]);
DeviceTensor1 saveMean = devicetensor<AccReal, 1>(out_data[batchnorm::kMean]);
DeviceTensor1 saveInvStd = devicetensor<AccReal, 1>(out_data[batchnorm::kVar]);
DCHECK_GT(weight.numElements(), 0);
if ((flags & IS_TRAINING_FLAG) == 0 || (flags & USE_GLOBAL_STATS_FLAG) != 0) {
AccReal* bias_ptr = bias.numElements() > 0 ? bias.dptr_ : nullptr;
AccReal* gamma_ptr = weight.numElements() > 0 ? weight.dptr_ : nullptr;
int nvec = sizeof(double) / sizeof(DType);
index_t size = input.InnerSize() * input.OuterSize() * input.ChannelCount();
index_t aligned_size = ((size + nvec - 1) / nvec) * nvec;
index_t blocks =
std::min((size + nvec * inference_forward_threads - 1) / (nvec * inference_forward_threads),
static_cast<index_t>(512));
if (input.ChannelCount() < shmem_elements) {
BatchNormalizationUpdateOutputInferenceKernel<DType, AccReal, double, true>
<<<blocks, inference_forward_threads, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
input.dptr_,
output.dptr_,
aligned_size,
input.OuterSize(),
input.ChannelCount(),
input.InnerSize(),
runningMean.dptr_,
runningVar.dptr_,
saveMean.dptr_,
saveInvStd.dptr_,
gamma_ptr,
bias_ptr,
eps,
flags);
} else {
BatchNormalizationUpdateOutputInferenceKernel<DType, AccReal, double, false>
<<<blocks, inference_forward_threads, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
input.dptr_,
output.dptr_,
aligned_size,
input.OuterSize(),
input.ChannelCount(),
input.InnerSize(),
runningMean.dptr_,
runningVar.dptr_,
saveMean.dptr_,
saveInvStd.dptr_,
gamma_ptr,
bias_ptr,
eps,
flags);
}
} else {
dim3 blocks(input.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(input.InnerSize()));
BatchNormalizationUpdateOutputKernel<DType, AccReal, DeviceTensor1, batchnorm::BNTensor3<DType>>
<<<blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s)>>>(input,
output,
weight,
bias,
eps,
momentum,
runningMean,
runningVar,
saveMean,
saveInvStd,
flags);
}
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationUpdateOutput);
}
template <typename DType, typename AccReal>
static void BatchNormalizationBackward(mshadow::Stream<gpu>* s,
const OpContext& ctx,
const BatchNormParam& param,
const std::vector<TBlob>& out_grad,
const std::vector<TBlob>& in_data,
const std::vector<TBlob>& out_data,
const std::vector<TBlob>& in_grad,
const std::vector<TBlob>& aux_states,
const uint32_t flags,
double momentum,
double eps) {
batchnorm::BNTensor3<DType> input =
batchnorm::BNTensor3<DType>(in_data[batchnorm::kData], param.axis);
batchnorm::BNTensor3<DType> gradOutput =
batchnorm::BNTensor3<DType>(out_grad[batchnorm::kOut], param.axis);
batchnorm::BNTensor3<DType> gradInput =
batchnorm::BNTensor3<DType>(in_grad[batchnorm::kData], param.axis);
CHECK_EQ(gradOutput.Size(), gradInput.Size());
CUDATensors<DeviceTensor1> tensors;
tensors.gradWeight = devicetensor<AccReal, 1>(in_grad[batchnorm::kGamma]);
tensors.gradBias = devicetensor<AccReal, 1>(in_grad[batchnorm::kBeta]);
tensors.weight = devicetensor<AccReal, 1>(in_data[batchnorm::kGamma]);
tensors.runningMean = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingMean]);
tensors.runningVar = devicetensor<AccReal, 1>(aux_states[batchnorm::kMovingVar]);
tensors.saveMean = devicetensor<AccReal, 1>(out_data[batchnorm::kMean]);
tensors.saveInvStd = devicetensor<AccReal, 1>(out_data[batchnorm::kVar]);
DCHECK_GT(tensors.weight.numElements(), 0);
const bool is_train_and_not_global_stats =
(flags & IS_TRAINING_FLAG) != 0 && (flags & USE_GLOBAL_STATS_FLAG) == 0;
if (is_train_and_not_global_stats) {
dim3 blocks(gradOutput.ChannelCount());
dim3 threads(batchnorm::cuda::getNumThreads(gradOutput.InnerSize()));
BatchNormalizationBackwardKernel<DType, AccReal, DeviceTensor1, batchnorm::BNTensor3<DType>>
<<<blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
input, gradOutput, gradInput, tensors, flags, momentum, eps);
} else {
uint32_t flags_copy = flags;
if (gradInput.Size() <= 0) {
flags_copy = (flags_copy & ~WRITE_DATA_FLAG);
}
if (tensors.gradWeight.numElements() <= 0) {
flags_copy = (flags_copy & ~WRITE_GAMMA_FLAG);
}
if (tensors.gradBias.numElements() <= 0) {
flags_copy = (flags_copy & ~WRITE_BETA_FLAG);
}
AccReal* gamma = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0)
? tensors.weight.dptr_
: nullptr;
if (param.axis == -1 || param.axis == in_data[batchnorm::kData].shape_.ndim() - 1) {
const int C = gradOutput.ChannelCount();
int ltype = mxnet::common::cuda::get_load_type(C * sizeof(DType));
const int M = gradOutput.OuterSize();
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
const unsigned int blocks_x =
ceil_div(C * sizeof(DType), mxnet::common::cuda::warp_size * sizeof(LType));
const unsigned int preferred_number_of_blocks =
2 * MultiprocessorCount(ctx.run_ctx.ctx.dev_id);
const unsigned int blocks_y = std::max(preferred_number_of_blocks / blocks_x, 1u);
const dim3 n_blocks = {blocks_x, blocks_y, 1};
auto scratch_space = ctx.requested[batchnorm::kTempSpace].get_space_typed<gpu, 1, AccReal>(
mshadow::Shape1(C * blocks_y * 2), s);
auto stream = mshadow::Stream<gpu>::GetStream(s);
constexpr int nthreads_phase1 = 512;
constexpr int nthreads_phase2 = 128;
FrozenBatchNormalizationBackwardKernelCLastPhase1<nthreads_phase1, DType, AccReal, LType>
<<<n_blocks, nthreads_phase1, 0, stream>>>(input.dptr_,
gradOutput.dptr_,
scratch_space.dptr_,
gradInput.dptr_,
gamma,
tensors.runningMean.dptr_,
tensors.runningVar.dptr_,
M,
C,
eps,
flags_copy);
const int nblocks_phase2 = ceil_div(C, nthreads_phase2);
FrozenBatchNormalizationBackwardKernelCLastPhase2<AccReal>
<<<nblocks_phase2, nthreads_phase2, 0, stream>>>(scratch_space.dptr_,
tensors.runningVar.dptr_,
tensors.gradWeight.dptr_,
tensors.gradBias.dptr_,
C,
blocks_y,
eps,
flags_copy);
});
} else {
dim3 blocks(gradOutput.ChannelCount());
int ltype = mxnet::common::cuda::get_load_type(gradOutput.InnerSize() * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
constexpr int nvec = sizeof(LType) > sizeof(DType) ? sizeof(LType) / sizeof(DType) : 1;
const index_t NHW_div_nvec = gradOutput.OuterSize() * gradOutput.InnerSize() / nvec;
constexpr int threads = 512;
FrozenBatchNormalizationBackwardKernel<threads, DType, AccReal, LType>
<<<blocks, threads, 0, mshadow::Stream<gpu>::GetStream(s)>>>(input.dptr_,
gradOutput.dptr_,
gradInput.dptr_,
tensors.gradWeight.dptr_,
tensors.gradBias.dptr_,
gamma,
tensors.runningMean.dptr_,
tensors.runningVar.dptr_,
gradOutput.OuterSize(),
gradOutput.InnerSize(),
gradOutput.ChannelCount(),
NHW_div_nvec,
eps,
flags_copy);
});
}
}
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationBackward);
}
} // namespace cuda
} // namespace batchnorm
template <typename xpu, typename DType, typename AccReal>
static inline uint32_t SetupFlags(const OpContext& ctx,
const BatchNormParam& params,
const std::vector<OpReqType>& req) {
uint32_t flags = 0;
flags |= ctx.is_train ? IS_TRAINING_FLAG : 0;
flags |= params.fix_gamma ? FIX_GAMMA_FLAG : 0;
flags |= params.use_global_stats ? USE_GLOBAL_STATS_FLAG : 0;
if (IsBNWriting(req[batchnorm::kData])) {
flags |= WRITE_DATA_FLAG;
} else if (req[batchnorm::kData] == kAddTo) {
flags |= ADDTO_DATA_FLAG;
}
if (IsBNWriting(req[batchnorm::kGamma])) {
flags |= WRITE_GAMMA_FLAG;
} else if (req[batchnorm::kGamma] == kAddTo) {
flags |= ADDTO_GAMMA_FLAG;
}
if (IsBNWriting(req[batchnorm::kBeta])) {
flags |= WRITE_BETA_FLAG;
} else if (req[batchnorm::kBeta] == kAddTo) {
flags |= ADDTO_BETA_FLAG;
}
return flags;
}
/*! \brief Forward batch-norm pass on GPU */
template <typename xpu, typename DType, typename AccReal>
void BatchNormForwardImpl(mshadow::Stream<gpu>* stream,
const OpContext& ctx,
const BatchNormParam& param_,
const std::vector<TBlob>& in_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& out_data,
const std::vector<TBlob>& aux_states) {
batchnorm::cuda::BatchNormalizationUpdateOutput<DType, AccReal>(
stream,
ctx,
param_,
in_data,
out_data,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoForward_gpu);
}
/*! \brief Backward batch-norm pass on GPU */
template <typename xpu, typename DType, typename AccReal>
void BatchNormBackwardImpl(mshadow::Stream<gpu>* stream,
const OpContext& ctx,
const BatchNormParam& param_,
const std::vector<TBlob>& out_grad,
const std::vector<TBlob>& in_data,
const std::vector<TBlob>& out_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& in_grad,
const std::vector<TBlob>& aux_states) {
batchnorm::cuda::BatchNormalizationBackward<DType, AccReal>(
stream,
ctx,
param_,
out_grad,
in_data,
out_data,
in_grad,
aux_states,
SetupFlags<xpu, DType, AccReal>(ctx, param_, req),
param_.momentum,
param_.eps);
MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoBackward_gpu);
}
template <>
void BatchNormCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
BatchNormParam param = nnvm::get<BatchNormParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 5U);
std::vector<TBlob> in_data(inputs.begin(), inputs.begin() + 3);
std::vector<TBlob> aux_states(inputs.begin() + 3, inputs.end());
int dtype = inputs[0].type_flag_;
mxnet::TShape shape = inputs[0].shape_;
param.axis = mxnet::op::batchnorm::GetRealAxis(shape, param.axis);
#if MXNET_USE_CUDNN == 1
if (!param.use_global_stats && !param.cudnn_off &&
CudnnBatchNormSupports(param, inputs[batchnorm::kData])) {
CudnnBatchNormForward(param, ctx, inputs, req, outputs);
} else {
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormForward<gpu, DType, AccReal>(ctx, param, in_data, req, outputs, aux_states);
})
}
#else
MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, {
BatchNormForward<gpu, DType, AccReal>(ctx, param, in_data, req, outputs, aux_states);
});
#endif
}
template <>
void BatchNormGradCompute<gpu>(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 8U);
BatchNormParam param = nnvm::get<BatchNormParam>(attrs.parsed);
int dtype = inputs[0].type_flag_;
mxnet::TShape shape = inputs[0].shape_;
param.axis = mxnet::op::batchnorm::GetRealAxis(shape, param.axis);
#if MXNET_USE_CUDNN == 1
if (!param.use_global_stats && !param.cudnn_off &&
CudnnBatchNormSupports(param, inputs[3 + batchnorm::kData])) {
CudnnBatchNormBackward(param, ctx, inputs, req, outputs);
} else {
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormBackward<gpu, DType, AccReal>(ctx, param, inputs, req, outputs);
})
}
#else
MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, {
BatchNormBackward<gpu, DType, AccReal>(ctx, param, inputs, req, outputs);
});
#endif
}
NNVM_REGISTER_OP(BatchNorm).set_attr<FCompute>("FCompute<gpu>", BatchNormCompute<gpu>);
NNVM_REGISTER_OP(_backward_BatchNorm)
.set_attr<FCompute>("FCompute<gpu>", BatchNormGradCompute<gpu>);
} // namespace op
} // namespace mxnet
|
2a30107bc758c71f9add02d40c3e9faf38260acf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_push_stochastic1( int *g_push_reser, int *s_push_reser, int *g_count_blocks, bool *g_finish, int *g_block_num, int width1)
{
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
s_push_reser[thid] = g_push_reser[thid] ;
if( thid == 0 )
{
if((*g_count_blocks) == 0 )
(*g_finish) = false ;
}
} | 2a30107bc758c71f9add02d40c3e9faf38260acf.cu | #include "includes.h"
__global__ void kernel_push_stochastic1( int *g_push_reser, int *s_push_reser, int *g_count_blocks, bool *g_finish, int *g_block_num, int width1)
{
int x = __umul24( blockIdx.x, blockDim.x ) + threadIdx.x ;
int y = __umul24( blockIdx.y , blockDim.y ) + threadIdx.y ;
int thid = __umul24( y , width1 ) + x ;
s_push_reser[thid] = g_push_reser[thid] ;
if( thid == 0 )
{
if((*g_count_blocks) == 0 )
(*g_finish) = false ;
}
} |
a99b9456b826f71947d169016e1705674cef4513.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pairwise_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ForwardGPU(const int nthreads, const int num, const Dtype* similarity,
const Dtype* exp_product, const Dtype* product, const Dtype threshold, Dtype* count, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1.0);
if((threshold >= 0) && (product[index] >= threshold)){
loss_data[index] = product[index] * (1 - (similarity[index] > 0));
}
else{
loss_data[index] = log(1 + exp_product[index]) - (similarity[index] > 0) * product[index];
}
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Dtype* similarity = pairwise_sim_.mutable_gpu_data();
Dtype* dot_product = pairwise_sim_.mutable_gpu_diff();
Dtype* exp_product = loss_.mutable_gpu_diff();
Dtype* loss_data = loss_.mutable_gpu_data();
Dtype* count = temp_.mutable_gpu_data();
Dtype* label = bottom[1]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate similarity matrix according to label
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label, label, Dtype(0.0), similarity);
//calculate dot_product and exp_product
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, inner_num_,
Dtype(1.0), bottom_data, bottom_data, Dtype(0.0), dot_product);
caffe_gpu_exp(outer_num_ * outer_num_, dot_product, exp_product);
//calculate pairwise loss
hipLaunchKernelGGL(( ForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, similarity, exp_product,
dot_product, threshold_, count, loss_data);
Dtype loss, count_num;
caffe_gpu_asum(nthreads, loss_data, &loss);
caffe_gpu_asum(nthreads, count, &count_num);
loss /= (count_num > 0 ? count_num : Dtype(1));
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void BackwardGPU(const int nthreads, const int num,
const Dtype* similarity, const Dtype* exp_product, Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
diff[index] = 2 * (
1 / (1 + 1 / exp_product[index]) -
(similarity[index] > 0)
);
count[index] = Dtype(1.0);
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* diff = temp_.mutable_gpu_data();
Dtype* count = temp_.mutable_gpu_diff();
const Dtype* similarity = pairwise_sim_.gpu_data();
const Dtype* exp_product = loss_.gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate diff
hipLaunchKernelGGL(( BackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, similarity,
exp_product, count, diff);
//copy to bottom_diff
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_,
loss_weight_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data,
Dtype(0.0), bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer);
} // namespace caffe
| a99b9456b826f71947d169016e1705674cef4513.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pairwise_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ForwardGPU(const int nthreads, const int num, const Dtype* similarity,
const Dtype* exp_product, const Dtype* product, const Dtype threshold, Dtype* count, Dtype* loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
count[index] = Dtype(1.0);
if((threshold >= 0) && (product[index] >= threshold)){
loss_data[index] = product[index] * (1 - (similarity[index] > 0));
}
else{
loss_data[index] = log(1 + exp_product[index]) - (similarity[index] > 0) * product[index];
}
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Dtype* similarity = pairwise_sim_.mutable_gpu_data();
Dtype* dot_product = pairwise_sim_.mutable_gpu_diff();
Dtype* exp_product = loss_.mutable_gpu_diff();
Dtype* loss_data = loss_.mutable_gpu_data();
Dtype* count = temp_.mutable_gpu_data();
Dtype* label = bottom[1]->mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate similarity matrix according to label
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label, label, Dtype(0.0), similarity);
//calculate dot_product and exp_product
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, inner_num_,
Dtype(1.0), bottom_data, bottom_data, Dtype(0.0), dot_product);
caffe_gpu_exp(outer_num_ * outer_num_, dot_product, exp_product);
//calculate pairwise loss
ForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, similarity, exp_product,
dot_product, threshold_, count, loss_data);
Dtype loss, count_num;
caffe_gpu_asum(nthreads, loss_data, &loss);
caffe_gpu_asum(nthreads, count, &count_num);
loss /= (count_num > 0 ? count_num : Dtype(1));
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void BackwardGPU(const int nthreads, const int num,
const Dtype* similarity, const Dtype* exp_product, Dtype* count, Dtype* diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
diff[index] = 2 * (
1 / (1 + 1 / exp_product[index]) -
(similarity[index] > 0)
);
count[index] = Dtype(1.0);
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* diff = temp_.mutable_gpu_data();
Dtype* count = temp_.mutable_gpu_diff();
const Dtype* similarity = pairwise_sim_.gpu_data();
const Dtype* exp_product = loss_.gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate diff
BackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, similarity,
exp_product, count, diff);
//copy to bottom_diff
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_,
loss_weight_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data,
Dtype(0.0), bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer);
} // namespace caffe
|
20a534641878903effc1edf8c454e7a40bacbee4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "real.h"
#include "constant.cuh"
#include "macros.h"
/*
* ======================= advection TB ====================
* Integrate forward (advection only) by one time step.
* ATMS 502 / CSE 566, Spring 2016
*
* Arguments:
*
* t1 real array values at current step
* t2 real array values at next step
* c real true speed of wave
* dx real grid spacing
* dt real time step
* iW,iE integers indices bounding array data
* nx integer number of grid points
* advection_type
* char if 'L', linear advection;
* otherwise, nonlinear
*/
//u = u[idx3D(level,row,col,nydim,nxdim+1)]
//v = v[idx3D(level,row,col,nydim+1,nxdim)]
//w = w[idx3D(level,row,col,nydim,nxdim)]
//t = t[idx3D(level,row,col,nydim,nxdim)]
//#include <stdio.h>
__global__
void advectionTB(real *__restrict__ tTemp, const real *__restrict__ t, const real *w)
{
const int col = blockIdx.x*blockDim.x+threadIdx.x + iW;
const int level = blockIdx.y*blockDim.y+threadIdx.y + kB;
real tR,tRm1;
real wR,wRp1;
if (advection_type == 'p') {
__shared__ real flux[TILESIZEX][TILESIZEY+1];
//if(col<=iE && level<=kT) {
for (int row=jS; row<=jN && col<=iE && level<=kT ; ++row) {
tR = t[idx3D(level,row,col,nydim,nxdim)];
tRm1 = t[idx3D(level-1,row,col,nydim,nxdim)];
wR = w[idx3D(level,row,col,nydim,nxdim)];
wRp1 = w[idx3D(level+1,row,col,nydim,nxdim)];
real r = fabs(wR*dt/dz);
if (wR >=0.0 ) {
flux[threadIdx.x][threadIdx.y] = r*(tRm1 + 0.25*(1-r)*( tR - t[idx3D(level-2,row,col,nydim,nxdim)]) );
} else {
flux[threadIdx.x][threadIdx.y] = r*(-tR + 0.25*(1-r)*( t[idx3D(level+1,row,col,nydim,nxdim)] - tRm1 ));
} // end if
if (threadIdx.y == blockDim.y-1 || level==kT) {
r = fabs(wRp1*dt/dz);
if (wRp1 >=0.0) {
flux[threadIdx.x][threadIdx.y+1] = r*(tR + 0.25*(1-r)*( t[idx3D(level+1,row,col,nydim,nxdim)] - tRm1) );
} else {
flux[threadIdx.x][threadIdx.y+1] = r*(-t[idx3D(level+1,row,col,nydim,nxdim)] + 0.25*(1-r)*( t[idx3D(level+2,row,col,nydim,nxdim)] - tR ));
} // end if
} // end if //
__syncthreads();
tTemp[idx3D(level,row,col,nydim,nxdim)] = tR-(flux[threadIdx.x][threadIdx.y+1]-flux[threadIdx.x][threadIdx.y]) + dt/dz*tR*(wRp1-wR);
__syncthreads();
} // end for //
//} // end if //
} // end if //
} // end of advectionTB() //
| 20a534641878903effc1edf8c454e7a40bacbee4.cu | #include "real.h"
#include "constant.cuh"
#include "macros.h"
/*
* ======================= advection TB ====================
* Integrate forward (advection only) by one time step.
* ATMS 502 / CSE 566, Spring 2016
*
* Arguments:
*
* t1 real array values at current step
* t2 real array values at next step
* c real true speed of wave
* dx real grid spacing
* dt real time step
* iW,iE integers indices bounding array data
* nx integer number of grid points
* advection_type
* char if 'L', linear advection;
* otherwise, nonlinear
*/
//u = u[idx3D(level,row,col,nydim,nxdim+1)]
//v = v[idx3D(level,row,col,nydim+1,nxdim)]
//w = w[idx3D(level,row,col,nydim,nxdim)]
//t = t[idx3D(level,row,col,nydim,nxdim)]
//#include <stdio.h>
__global__
void advectionTB(real *__restrict__ tTemp, const real *__restrict__ t, const real *w)
{
const int col = blockIdx.x*blockDim.x+threadIdx.x + iW;
const int level = blockIdx.y*blockDim.y+threadIdx.y + kB;
real tR,tRm1;
real wR,wRp1;
if (advection_type == 'p') {
__shared__ real flux[TILESIZEX][TILESIZEY+1];
//if(col<=iE && level<=kT) {
for (int row=jS; row<=jN && col<=iE && level<=kT ; ++row) {
tR = t[idx3D(level,row,col,nydim,nxdim)];
tRm1 = t[idx3D(level-1,row,col,nydim,nxdim)];
wR = w[idx3D(level,row,col,nydim,nxdim)];
wRp1 = w[idx3D(level+1,row,col,nydim,nxdim)];
real r = fabs(wR*dt/dz);
if (wR >=0.0 ) {
flux[threadIdx.x][threadIdx.y] = r*(tRm1 + 0.25*(1-r)*( tR - t[idx3D(level-2,row,col,nydim,nxdim)]) );
} else {
flux[threadIdx.x][threadIdx.y] = r*(-tR + 0.25*(1-r)*( t[idx3D(level+1,row,col,nydim,nxdim)] - tRm1 ));
} // end if
if (threadIdx.y == blockDim.y-1 || level==kT) {
r = fabs(wRp1*dt/dz);
if (wRp1 >=0.0) {
flux[threadIdx.x][threadIdx.y+1] = r*(tR + 0.25*(1-r)*( t[idx3D(level+1,row,col,nydim,nxdim)] - tRm1) );
} else {
flux[threadIdx.x][threadIdx.y+1] = r*(-t[idx3D(level+1,row,col,nydim,nxdim)] + 0.25*(1-r)*( t[idx3D(level+2,row,col,nydim,nxdim)] - tR ));
} // end if
} // end if //
__syncthreads();
tTemp[idx3D(level,row,col,nydim,nxdim)] = tR-(flux[threadIdx.x][threadIdx.y+1]-flux[threadIdx.x][threadIdx.y]) + dt/dz*tR*(wRp1-wR);
__syncthreads();
} // end for //
//} // end if //
} // end if //
} // end of advectionTB() //
|
cf8bd9a85c9964ea3005b81442c7fe470fc1148a.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| cf8bd9a85c9964ea3005b81442c7fe470fc1148a.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
f956738902e397e38f603c740f597bcddb9feeb6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Macro.h"
#include "CUFLU.h"
#if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
// check before compiling anything else
#if ( NCOMP_PASSIVE != 0 )
# error : RTVD scheme does NOT support passive scalars !!
#endif
#include "CUFLU_Shared_FluUtility.cu"
#include "CUDA_ConstMemory.h"
#define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x )
#define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE )
static __device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS );
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_FluidSolver_RTVD
// Description : GPU fluid solver based on the relaxing TVD (RTVD) scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The three-dimensional evolution is achieved by using the dimensional-split method
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// g_Corner : Global memory array storing the physical corner coordinates of each patch group (USELESS CURRENTLY)
// g_Pot_USG : Global memory array storing the input potential for UNSPLIT_GRAVITY (NOT SUPPORTED in RTVD)
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres, const real MinEint,
const EoS_t EoS )
{
__shared__ real s_cu [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_cw [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_flux [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_RLflux[FLU_BLOCK_SIZE_Y][5][FLU_NXT];
if ( XYZ )
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 0, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 6, MinDens, MinPres, MinEint, &EoS );
}
else
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 6, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 0, MinDens, MinPres, MinEint, &EoS );
}
} // FUNCTION : CUFLU_FluidSolver_RTVD
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_Advance
// Description : GPU device function, which performs a one-dimensional sweep based on the TVD scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The direction of the one dimensional sweep is determined by the input parameter "XYZ"
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// j_gap : Number of useless grids in each side in the j direction (j may not be equal to y)
// k_gap : Number of useless grids in each side in the k direction (k mya not be equal to z)
// s_cu : Shared memory array storing the normal flux
// s_cw : Shared memory array storing the auxiliary flux
// s_flux : Shared memory array storing the final flux used to update the fluid variables
// s_RLflux : Shared memory array storing the left/right-moving flux
// XYZ : 0 : Update the solution in the x direction
// 3 : Update the solution in the y direction
// 6 : Update the solution in the z direction
// --> This parameter is also used to determine the place to store the output fluxes
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS )
{
const uint bx = blockIdx.x;
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
const uint dj = blockDim.y;
const uint size_j = FLU_NXT - (j_gap<<1);
const uint size_k = FLU_NXT - (k_gap<<1);
const uint NColumn = __umul24( size_j, size_k );
const uint i = tx; // (i,j) the element in shared memory under evaluation
const uint ip = i+1;
const uint im = i-1;
uint j = j_gap + ty%size_j;
uint k = k_gap + ty/size_j;
uint Column0 = 0; // the total number of columns that have been updated
const uint j_end = FLU_NXT - j_gap;
const uint k_end = FLU_NXT - k_gap;
const real dt_half = (real)0.5*dt;
const real *Passive = NULL; // RTVD does not support passive scalars
bool RuleOut = false;
const bool CheckMinPres_Yes = true;
real _rho, vx, p, c, Temp, Fluid[5], Fluid_half[5];
int ID1, ID2, ID3, Comp[5], delta_k;
// set the order of component for update in different directions
switch ( XYZ )
{
case 0: Comp[0] = 0; Comp[1] = 1; Comp[2] = 2; Comp[3] = 3; Comp[4] = 4; break;
case 3: Comp[0] = 0; Comp[1] = 2; Comp[2] = 1; Comp[3] = 3; Comp[4] = 4; break;
case 6: Comp[0] = 0; Comp[1] = 3; Comp[2] = 2; Comp[3] = 1; Comp[4] = 4; break;
}
// start the TVD scheme
do
{
// determine the array indices for updating in different directions
switch ( XYZ )
{
case 0: ID1 = to1D1( k, j, i ); break;
case 3: ID1 = to1D1( k, i, j ); break;
case 6: ID1 = to1D1( i, k, j ); break;
}
// load data into per-thread registers
for (int v=0; v<5; v++) Fluid[v] = g_Fluid_In[bx][ Comp[v] ][ID1];
// a. Evaluate the half-step values of fluid variables
//-----------------------------------------------------------------------------
// (a1). set variables defined in the center of cell
_rho = (real)1.0 / Fluid[0];
vx = _rho * Fluid[1];
p = Hydro_Con2Pres( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr, NULL, NULL,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &p , "pressure", ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[0], "density" , ERROR_INFO, UNPHY_VERBOSE );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid[1];
s_cw[ty][1][i] = Fluid[1]*vx + p;
s_cw[ty][2][i] = Fluid[2]*vx;
s_cw[ty][3][i] = Fluid[3]*vx;
s_cw[ty][4][i] = ( Fluid[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid[0];
s_cu[ty][1][i] = c*Fluid[1];
s_cu[ty][2][i] = c*Fluid[2];
s_cu[ty][3][i] = c*Fluid[3];
s_cu[ty][4][i] = c*Fluid[4];
__syncthreads();
// (a2). set flux defined in the right-hand surface of cell by the upwind scheme
if ( i < FLU_NXT-1 )
{
for (int v=0; v<5; v++)
s_flux[ty][v][i] = (real)0.5*( ( s_cu[ty][v][i ]+s_cw[ty][v][i ] ) -
( s_cu[ty][v][ip]-s_cw[ty][v][ip] ) );
}
__syncthreads();
// (a3). evaluate the intermidiate values (u_half)
// if ( i > 0 )
if ( i > 0 && i < FLU_NXT-1 )
{
for (int v=0; v<5; v++) Fluid_half[v] = Fluid[v] - _dh*dt_half*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid_half[0] = FMAX( Fluid_half[0], MinDens );
Fluid_half[4] = Hydro_CheckMinEintInEngy( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4],
MinEint, NULL_REAL );
}
// Evaluate the full-step values of fluid variables
//-----------------------------------------------------------------------------
// (b1). reset variables defined in the center of cell at the intermidate state
if ( i > 0 && i < FLU_NXT-1 )
{
_rho = (real)1.0 / Fluid_half[0];
vx = _rho * Fluid_half[1];
p = Hydro_Con2Pres( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr, NULL, NULL,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &p , "pressure", ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid_half[0], "density" , ERROR_INFO, UNPHY_VERBOSE );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid_half[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid_half[1];
s_cw[ty][1][i] = Fluid_half[1]*vx + p;
s_cw[ty][2][i] = Fluid_half[2]*vx;
s_cw[ty][3][i] = Fluid_half[3]*vx;
s_cw[ty][4][i] = ( Fluid_half[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid_half[0];
s_cu[ty][1][i] = c*Fluid_half[1];
s_cu[ty][2][i] = c*Fluid_half[2];
s_cu[ty][3][i] = c*Fluid_half[3];
s_cu[ty][4][i] = c*Fluid_half[4];
} // if ( i > 0 && i < FLU_NXT-1 )
// (b2). set the right-moving flux defined in the right-hand surface by the TVD scheme
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][i] + s_cw[ty][v][i] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] = s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][im] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] += Temp / ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][im] );
}
}
__syncthreads();
// (b3). set the left-moving flux defined in the left-hand surface by the TVD scheme, get the total flux
// if ( i < FLU_NXT-2 )
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][ip] - s_cw[ty][v][ip] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] -= s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][im]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][ip] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] -= Temp / ( s_RLflux[ty][v][im]-s_RLflux[ty][v][ip] );
}
}
__syncthreads();
// (b4). advance fluid by one full time-step
// if ( i > 2 )
// if ( i > 2 && i < FLU_NXT-3 )
if ( i > 2 && i < FLU_NXT-3 && RuleOut == false )
{
for (int v=0; v<5; v++) Fluid[v] -= _dh*dt*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid[0] = FMAX( Fluid[0], MinDens );
Fluid[4] = Hydro_CheckMinEintInEngy( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4],
MinEint, NULL_REAL );
// check negative density and energy
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[4], "energy" , ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[0], "density", ERROR_INFO, UNPHY_VERBOSE );
# endif
// store the updated data back to the global memory
if ( FinalOut )
{
switch ( XYZ )
{
case 0: ID2 = to1D2( k, j, i ); break;
case 3: ID2 = to1D2( k, i, j ); break;
case 6: ID2 = to1D2( i, k, j ); break;
}
for (int v=0; v<5; v++) g_Fluid_Out[bx][ Comp[v] ][ID2] = Fluid[v];
}
else
for (int v=0; v<5; v++) g_Fluid_In [bx][ Comp[v] ][ID1] = Fluid[v];
}
// (b5). save the flux required by the flux-correction operation
if ( StoreFlux )
if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE )
if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE )
if ( i == 0 )
{
ID3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE);
for (int v=0; v<5; v++)
{
g_Flux[bx][XYZ+0][v][ID3] = s_flux[ty][ Comp[v] ][ 2];
g_Flux[bx][XYZ+1][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT/2-1];
g_Flux[bx][XYZ+2][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT - 4];
}
}
// reset the target array indices
j += dj;
if ( j >= j_end )
{
delta_k = ( j - j_end )/size_j + 1;
k += delta_k;
j -= __umul24( size_j, delta_k );
}
Column0 += dj;
// if the index k exceeds the maximum allowed value --> reset (j,k) to harmless values and wait for other
// threads (all threads must exist the while loop "at the same time", otherwise __syncthreads will fail !!)
if ( k >= k_end )
{
j = 0;
k = 0;
RuleOut = true;
}
__syncthreads();
}
while ( Column0 < NColumn );
} // FUNCTION : CUFLU_Advance
#endif // #if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
| f956738902e397e38f603c740f597bcddb9feeb6.cu | #include "Macro.h"
#include "CUFLU.h"
#if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
// check before compiling anything else
#if ( NCOMP_PASSIVE != 0 )
# error : RTVD scheme does NOT support passive scalars !!
#endif
#include "CUFLU_Shared_FluUtility.cu"
#include "CUDA_ConstMemory.h"
#define to1D1(z,y,x) ( __umul24(z, FLU_NXT*FLU_NXT) + __umul24(y, FLU_NXT) + x )
#define to1D2(z,y,x) ( __umul24(z-FLU_GHOST_SIZE, PS2*PS2) + __umul24(y-FLU_GHOST_SIZE, PS2) + x-FLU_GHOST_SIZE )
static __device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS );
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_FluidSolver_RTVD
// Description : GPU fluid solver based on the relaxing TVD (RTVD) scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The three-dimensional evolution is achieved by using the dimensional-split method
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// g_Corner : Global memory array storing the physical corner coordinates of each patch group (USELESS CURRENTLY)
// g_Pot_USG : Global memory array storing the input potential for UNSPLIT_GRAVITY (NOT SUPPORTED in RTVD)
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// XYZ : true : x->y->z ( forward sweep)
// false : z->y->x (backward sweep)
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__global__ void CUFLU_FluidSolver_RTVD(
real g_Fluid_In [][NCOMP_TOTAL][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][NCOMP_TOTAL][ CUBE(PS2) ],
real g_Flux [][9][NCOMP_TOTAL][ SQR(PS2) ],
const double g_Corner[][3],
const real g_Pot_USG[][ CUBE(USG_NXT_F) ],
const real dt, const real _dh, const bool StoreFlux,
const bool XYZ, const real MinDens, const real MinPres, const real MinEint,
const EoS_t EoS )
{
__shared__ real s_cu [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_cw [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_flux [FLU_BLOCK_SIZE_Y][5][FLU_NXT];
__shared__ real s_RLflux[FLU_BLOCK_SIZE_Y][5][FLU_NXT];
if ( XYZ )
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 0, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 6, MinDens, MinPres, MinEint, &EoS );
}
else
{
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, 0,
s_cu, s_cw, s_flux, s_RLflux, false, 6, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, 0, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, false, 3, MinDens, MinPres, MinEint, &EoS );
CUFLU_Advance( g_Fluid_In, g_Fluid_Out, g_Flux, dt, _dh, StoreFlux, FLU_GHOST_SIZE, FLU_GHOST_SIZE,
s_cu, s_cw, s_flux, s_RLflux, true, 0, MinDens, MinPres, MinEint, &EoS );
}
} // FUNCTION : CUFLU_FluidSolver_RTVD
//-------------------------------------------------------------------------------------------------------
// Function : CUFLU_Advance
// Description : GPU device function, which performs a one-dimensional sweep based on the TVD scheme
//
// Note : a. Prefix "g" for pointers pointing to the "Global" memory space
// Prefix "s" for pointers pointing to the "Shared" memory space
// b. The direction of the one dimensional sweep is determined by the input parameter "XYZ"
//
// Parameter : g_Fluid_In : Global memory array to store the input fluid variables
// g_Fluid_Out : Global memory array to store the output fluid variables
// g_Flux : Global memory array to store the output fluxes
// dt : Time interval to advance solution
// _dh : 1 / grid size
// StoreFlux : true --> store the coarse-fine fluxes
// j_gap : Number of useless grids in each side in the j direction (j may not be equal to y)
// k_gap : Number of useless grids in each side in the k direction (k mya not be equal to z)
// s_cu : Shared memory array storing the normal flux
// s_cw : Shared memory array storing the auxiliary flux
// s_flux : Shared memory array storing the final flux used to update the fluid variables
// s_RLflux : Shared memory array storing the left/right-moving flux
// XYZ : 0 : Update the solution in the x direction
// 3 : Update the solution in the y direction
// 6 : Update the solution in the z direction
// --> This parameter is also used to determine the place to store the output fluxes
// MinDens : Density floor
// MinPres : Pressure floor
// MinEint : Internal energy floor
// EoS : EoS object
//-------------------------------------------------------------------------------------------------------
__device__ void CUFLU_Advance( real g_Fluid_In [][5][ CUBE(FLU_NXT) ],
real g_Fluid_Out[][5][ CUBE(PS2) ],
real g_Flux[][9][5][ SQR(PS2) ],
const real dt, const real _dh, const bool StoreFlux,
const int j_gap, const int k_gap, real s_cu[][5][FLU_NXT],
real s_cw[][5][FLU_NXT], real s_flux[][5][FLU_NXT], real s_RLflux[][5][FLU_NXT],
const bool FinalOut, const int XYZ,
const real MinDens, const real MinPres, const real MinEint,
const EoS_t *EoS )
{
const uint bx = blockIdx.x;
const uint tx = threadIdx.x;
const uint ty = threadIdx.y;
const uint dj = blockDim.y;
const uint size_j = FLU_NXT - (j_gap<<1);
const uint size_k = FLU_NXT - (k_gap<<1);
const uint NColumn = __umul24( size_j, size_k );
const uint i = tx; // (i,j) the element in shared memory under evaluation
const uint ip = i+1;
const uint im = i-1;
uint j = j_gap + ty%size_j;
uint k = k_gap + ty/size_j;
uint Column0 = 0; // the total number of columns that have been updated
const uint j_end = FLU_NXT - j_gap;
const uint k_end = FLU_NXT - k_gap;
const real dt_half = (real)0.5*dt;
const real *Passive = NULL; // RTVD does not support passive scalars
bool RuleOut = false;
const bool CheckMinPres_Yes = true;
real _rho, vx, p, c, Temp, Fluid[5], Fluid_half[5];
int ID1, ID2, ID3, Comp[5], delta_k;
// set the order of component for update in different directions
switch ( XYZ )
{
case 0: Comp[0] = 0; Comp[1] = 1; Comp[2] = 2; Comp[3] = 3; Comp[4] = 4; break;
case 3: Comp[0] = 0; Comp[1] = 2; Comp[2] = 1; Comp[3] = 3; Comp[4] = 4; break;
case 6: Comp[0] = 0; Comp[1] = 3; Comp[2] = 2; Comp[3] = 1; Comp[4] = 4; break;
}
// start the TVD scheme
do
{
// determine the array indices for updating in different directions
switch ( XYZ )
{
case 0: ID1 = to1D1( k, j, i ); break;
case 3: ID1 = to1D1( k, i, j ); break;
case 6: ID1 = to1D1( i, k, j ); break;
}
// load data into per-thread registers
for (int v=0; v<5; v++) Fluid[v] = g_Fluid_In[bx][ Comp[v] ][ID1];
// a. Evaluate the half-step values of fluid variables
//-----------------------------------------------------------------------------
// (a1). set variables defined in the center of cell
_rho = (real)1.0 / Fluid[0];
vx = _rho * Fluid[1];
p = Hydro_Con2Pres( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr, NULL, NULL,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &p , "pressure", ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[0], "density" , ERROR_INFO, UNPHY_VERBOSE );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid[1];
s_cw[ty][1][i] = Fluid[1]*vx + p;
s_cw[ty][2][i] = Fluid[2]*vx;
s_cw[ty][3][i] = Fluid[3]*vx;
s_cw[ty][4][i] = ( Fluid[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid[0];
s_cu[ty][1][i] = c*Fluid[1];
s_cu[ty][2][i] = c*Fluid[2];
s_cu[ty][3][i] = c*Fluid[3];
s_cu[ty][4][i] = c*Fluid[4];
__syncthreads();
// (a2). set flux defined in the right-hand surface of cell by the upwind scheme
if ( i < FLU_NXT-1 )
{
for (int v=0; v<5; v++)
s_flux[ty][v][i] = (real)0.5*( ( s_cu[ty][v][i ]+s_cw[ty][v][i ] ) -
( s_cu[ty][v][ip]-s_cw[ty][v][ip] ) );
}
__syncthreads();
// (a3). evaluate the intermidiate values (u_half)
// if ( i > 0 )
if ( i > 0 && i < FLU_NXT-1 )
{
for (int v=0; v<5; v++) Fluid_half[v] = Fluid[v] - _dh*dt_half*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid_half[0] = FMAX( Fluid_half[0], MinDens );
Fluid_half[4] = Hydro_CheckMinEintInEngy( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4],
MinEint, NULL_REAL );
}
// Evaluate the full-step values of fluid variables
//-----------------------------------------------------------------------------
// (b1). reset variables defined in the center of cell at the intermidate state
if ( i > 0 && i < FLU_NXT-1 )
{
_rho = (real)1.0 / Fluid_half[0];
vx = _rho * Fluid_half[1];
p = Hydro_Con2Pres( Fluid_half[0], Fluid_half[1], Fluid_half[2], Fluid_half[3], Fluid_half[4], Passive,
CheckMinPres_Yes, MinPres, NULL_REAL, EoS->DensEint2Pres_FuncPtr, NULL, NULL,
EoS->AuxArrayDevPtr_Flt, EoS->AuxArrayDevPtr_Int, EoS->Table, NULL );
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &p , "pressure", ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid_half[0], "density" , ERROR_INFO, UNPHY_VERBOSE );
# endif
c = FABS( vx ) + SQRT( EoS->DensPres2CSqr_FuncPtr( Fluid_half[0], p, Passive, EoS->AuxArrayDevPtr_Flt,
EoS->AuxArrayDevPtr_Int, EoS->Table ) );
s_cw[ty][0][i] = Fluid_half[1];
s_cw[ty][1][i] = Fluid_half[1]*vx + p;
s_cw[ty][2][i] = Fluid_half[2]*vx;
s_cw[ty][3][i] = Fluid_half[3]*vx;
s_cw[ty][4][i] = ( Fluid_half[4]+p )*vx;
s_cu[ty][0][i] = c*Fluid_half[0];
s_cu[ty][1][i] = c*Fluid_half[1];
s_cu[ty][2][i] = c*Fluid_half[2];
s_cu[ty][3][i] = c*Fluid_half[3];
s_cu[ty][4][i] = c*Fluid_half[4];
} // if ( i > 0 && i < FLU_NXT-1 )
// (b2). set the right-moving flux defined in the right-hand surface by the TVD scheme
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][i] + s_cw[ty][v][i] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] = s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][im] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] += Temp / ( s_RLflux[ty][v][ip]-s_RLflux[ty][v][im] );
}
}
__syncthreads();
// (b3). set the left-moving flux defined in the left-hand surface by the TVD scheme, get the total flux
// if ( i < FLU_NXT-2 )
if ( i > 0 && i < FLU_NXT-2 )
{
for (int v=0; v<5; v++) s_RLflux[ty][v][i] = (real)0.5*( s_cu[ty][v][ip] - s_cw[ty][v][ip] );
}
__syncthreads();
if ( i > 1 && i < FLU_NXT-3 )
{
for (int v=0; v<5; v++)
{
s_flux[ty][v][i] -= s_RLflux[ty][v][i];
Temp = ( s_RLflux[ty][v][im]-s_RLflux[ty][v][i ] )
* ( s_RLflux[ty][v][i ]-s_RLflux[ty][v][ip] );
if ( Temp > (real)0.0 )
s_flux[ty][v][i] -= Temp / ( s_RLflux[ty][v][im]-s_RLflux[ty][v][ip] );
}
}
__syncthreads();
// (b4). advance fluid by one full time-step
// if ( i > 2 )
// if ( i > 2 && i < FLU_NXT-3 )
if ( i > 2 && i < FLU_NXT-3 && RuleOut == false )
{
for (int v=0; v<5; v++) Fluid[v] -= _dh*dt*( s_flux[ty][v][i] - s_flux[ty][v][im] ) ;
// apply density and internal energy floors
Fluid[0] = FMAX( Fluid[0], MinDens );
Fluid[4] = Hydro_CheckMinEintInEngy( Fluid[0], Fluid[1], Fluid[2], Fluid[3], Fluid[4],
MinEint, NULL_REAL );
// check negative density and energy
# ifdef CHECK_UNPHYSICAL_IN_FLUID
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[4], "energy" , ERROR_INFO, UNPHY_VERBOSE );
Hydro_CheckUnphysical( UNPHY_MODE_SING, &Fluid[0], "density", ERROR_INFO, UNPHY_VERBOSE );
# endif
// store the updated data back to the global memory
if ( FinalOut )
{
switch ( XYZ )
{
case 0: ID2 = to1D2( k, j, i ); break;
case 3: ID2 = to1D2( k, i, j ); break;
case 6: ID2 = to1D2( i, k, j ); break;
}
for (int v=0; v<5; v++) g_Fluid_Out[bx][ Comp[v] ][ID2] = Fluid[v];
}
else
for (int v=0; v<5; v++) g_Fluid_In [bx][ Comp[v] ][ID1] = Fluid[v];
}
// (b5). save the flux required by the flux-correction operation
if ( StoreFlux )
if ( k >= FLU_GHOST_SIZE && k < FLU_NXT-FLU_GHOST_SIZE )
if ( j >= FLU_GHOST_SIZE && j < FLU_NXT-FLU_GHOST_SIZE )
if ( i == 0 )
{
ID3 = __umul24( k-FLU_GHOST_SIZE, PS2 ) + (j-FLU_GHOST_SIZE);
for (int v=0; v<5; v++)
{
g_Flux[bx][XYZ+0][v][ID3] = s_flux[ty][ Comp[v] ][ 2];
g_Flux[bx][XYZ+1][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT/2-1];
g_Flux[bx][XYZ+2][v][ID3] = s_flux[ty][ Comp[v] ][FLU_NXT - 4];
}
}
// reset the target array indices
j += dj;
if ( j >= j_end )
{
delta_k = ( j - j_end )/size_j + 1;
k += delta_k;
j -= __umul24( size_j, delta_k );
}
Column0 += dj;
// if the index k exceeds the maximum allowed value --> reset (j,k) to harmless values and wait for other
// threads (all threads must exist the while loop "at the same time", otherwise __syncthreads will fail !!)
if ( k >= k_end )
{
j = 0;
k = 0;
RuleOut = true;
}
__syncthreads();
}
while ( Column0 < NColumn );
} // FUNCTION : CUFLU_Advance
#endif // #if ( defined GPU && MODEL == HYDRO && FLU_SCHEME == RTVD )
|
6a946b05c6cf24161d52a34a9262aca04134cd16.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at { namespace native {
// We manually overload abs because std::abs does not work with thrust::complex types and ROCm.
template<typename scalar_t>
__host__ __device__ static inline scalar_t abs_wrapper(scalar_t v) {
return std::abs(v);
}
__host__ __device__ static inline uint8_t abs_wrapper(uint8_t v) {
return v;
}
__host__ __device__ static inline bool abs_wrapper(bool v) {
return v;
}
void abs_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, iter.dtype(), "abs_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "abs_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return abs_wrapper(a);
});
});
});
}
void logical_not_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(1), "logical_not_cuda", [&]() {
using self_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(0), "logical_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(self_t a) -> scalar_t { return static_cast<scalar_t>(!a); });
});
});
}
void neg_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_C10_COMPLEX_AND2(ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "neg_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "neg_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
});
}
void sign_kernel_cuda(TensorIterator& iter){
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a){
return a;
});
} else {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "sign_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t zero = scalar_t(0);
return (zero < a) - (a < zero);
});
});
}
}
REGISTER_DISPATCH(abs_stub, &abs_kernel_cuda);
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
}} // namespace at::native
| 6a946b05c6cf24161d52a34a9262aca04134cd16.cu | #include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at { namespace native {
// We manually overload abs because std::abs does not work with thrust::complex types and ROCm.
template<typename scalar_t>
__host__ __device__ static inline scalar_t abs_wrapper(scalar_t v) {
return std::abs(v);
}
__host__ __device__ static inline uint8_t abs_wrapper(uint8_t v) {
return v;
}
__host__ __device__ static inline bool abs_wrapper(bool v) {
return v;
}
void abs_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool, iter.dtype(), "abs_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "abs_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return abs_wrapper(a);
});
});
});
}
void logical_not_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(1), "logical_not_cuda", [&]() {
using self_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kBool, kHalf, iter.dtype(0), "logical_not_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(self_t a) -> scalar_t { return static_cast<scalar_t>(!a); });
});
});
}
void neg_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_C10_COMPLEX_AND2(ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "neg_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "neg_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return -a;
});
});
});
}
void sign_kernel_cuda(TensorIterator& iter){
if (iter.dtype() == ScalarType::Bool) {
gpu_kernel(iter, []GPU_LAMBDA(bool a){
return a;
});
} else {
AT_DISPATCH_ALL_TYPES_AND(ScalarType::Half, iter.dtype(), "sign_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
scalar_t zero = scalar_t(0);
return (zero < a) - (a < zero);
});
});
}
}
REGISTER_DISPATCH(abs_stub, &abs_kernel_cuda);
REGISTER_DISPATCH(logical_not_stub, &logical_not_kernel_cuda);
REGISTER_DISPATCH(neg_stub, &neg_kernel_cuda);
REGISTER_DISPATCH(sign_stub, &sign_kernel_cuda);
}} // namespace at::native
|
b6499f159eb3d0dd2e6a15f04caf2404372aa864.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2018, Fabian Prada
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the Johns Hopkins University nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include "gpu-solver.h"
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "cutil.h"
#include "cutil_inline_runtime.h"
#include <stdio.h>
#include "file-io.h"
#define BLOCK_SIZE 16
#define PI2 6.2831853
//typedef float2 fcomplex;
//float * rhs_h = 0;
//float * rhs_d = 0;
//float * symmetric_extended_rhs_d = 0;
//float * symmetric_extended_rhs_h = 0;
//fcomplex * symmetric_extended_rhs_fft_h = 0;
//fcomplex * symmetric_extended_rhs_fft_d = 0;
//unsigned int array_size = 0;
__global__ void ExtendedSymmetricRHS(float * rhs_d,float * extended_rhs_d,const unsigned int imgWidth,const unsigned int imgHeight)
{
unsigned int tx =blockIdx.x*BLOCK_SIZE + threadIdx.x;
unsigned int ty =blockIdx.y*BLOCK_SIZE + threadIdx.y;
if( tx < imgWidth && ty < imgHeight ){
float value = rhs_d[tx + imgWidth*ty];
extended_rhs_d[tx + 2*imgWidth*ty] = value;
extended_rhs_d[ 2*imgWidth - 1 - tx + 2*imgWidth*ty] = value;
extended_rhs_d[tx + 2*imgWidth*(2*imgHeight - 1 - ty)] = value;
extended_rhs_d[2*imgWidth - 1 - tx + 2*imgWidth*(2*imgHeight - 1 - ty)] = value;
}
}
__global__ void ExtractRHS(float * rhs_d,float * extended_rhs_d,const unsigned int imgWidth,const unsigned int imgHeight)
{
unsigned int tx =blockIdx.x*BLOCK_SIZE + threadIdx.x;
unsigned int ty =blockIdx.y*BLOCK_SIZE + threadIdx.y;
if( tx < imgWidth && ty < imgHeight ){
rhs_d[tx + imgWidth*ty]=extended_rhs_d[tx + 2*imgWidth*ty];
}
}
__global__ void SpectralModulation(float2 * extended_rhs_fft_d,const unsigned int extended_imgWidth,const unsigned int extended_imgHeight, const float finv_imgWidth,const float finv_imgHeight, const float dc)
{
unsigned int tx =blockIdx.x*BLOCK_SIZE + threadIdx.x;
unsigned int ty =blockIdx.y*BLOCK_SIZE + threadIdx.y;
if( tx < (extended_imgWidth/2 + 1) && ty < extended_imgHeight ){
if(tx + ty > 0)
{
float attenuation_factor = (finv_imgWidth*finv_imgHeight)/ (4.f -2.f*( cos(PI2*(float)tx*finv_imgWidth) + cos(PI2*((float)ty)*finv_imgHeight)));
extended_rhs_fft_d[tx + ty*(extended_imgWidth/2 + 1) ].x *= attenuation_factor;
extended_rhs_fft_d[tx + ty*(extended_imgWidth/2 + 1) ].y *= attenuation_factor;
}
else
{
extended_rhs_fft_d[0].x = 4.f*dc*(finv_imgWidth*finv_imgHeight);
extended_rhs_fft_d[0].y = 0.f;
}
}
}
void GPUSolvers::FFTLaplaceSolver(float * rhs_d, float * symmetric_extended_rhs_d, float2 * symmetric_extended_rhs_fft_d, hipfftHandle & fftPlanFwd, hipfftHandle & fftPlanInv, const unsigned int array_width,const unsigned int array_height ,const float dc)
{
unsigned int extended_array_width = 2*array_width;
unsigned int extended_array_height = 2*array_height;
//if(array_size!=array_width*array_height){
// array_size = array_width*array_height;
//
// if(symmetric_extended_rhs_d!=0){
// hipFree(symmetric_extended_rhs_d);
// }
// hipMalloc((void**)&symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float));
// if(symmetric_extended_rhs_fft_d!=0){
// hipFree(symmetric_extended_rhs_fft_d);
// }
// hipMalloc((void**)&symmetric_extended_rhs_fft_d,(extended_array_height )*(extended_array_width/ 2 + 1) *sizeof(fcomplex));
//}
unsigned int blocksW = (unsigned int) ceilf( (float) array_width / (float) BLOCK_SIZE );
unsigned int blocksH = (unsigned int) ceilf( (float) array_height /(float) BLOCK_SIZE );
dim3 gridDim( blocksW, blocksH, 1 );
dim3 blockDim( BLOCK_SIZE, BLOCK_SIZE, 1 );
//hipEvent_t start;
//hipEventCreate(&start);
//hipEvent_t stop;
//hipEventCreate(&stop);
//hipEventRecord(start, NULL);
hipLaunchKernelGGL(( ExtendedSymmetricRHS), dim3(gridDim), dim3(blockDim) , 0, 0, rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//hipfftHandle fftPlanFwd, fftPlanInv;
// //printf("...creating R2C & C2R FFT plans for %i x %i\n", fftH, fftW);
// cufftSafeCall( hipfftPlan2d(&fftPlanFwd, extended_array_height, extended_array_width, HIPFFT_R2C) );
// cufftSafeCall( hipfftPlan2d(&fftPlanInv, extended_array_height, extended_array_width, HIPFFT_C2R) );
cufftSafeCall( hipfftExecR2C(fftPlanFwd, (hipfftReal *)symmetric_extended_rhs_d, (hipfftComplex *)symmetric_extended_rhs_fft_d) );
unsigned int ext_blocksW = (unsigned int) ceilf( (float) (extended_array_width / 2 + 1) / (float) BLOCK_SIZE );
unsigned int ext_blocksH = (unsigned int) ceilf( (float) extended_array_height /(float) BLOCK_SIZE );
dim3 ext_gridDim( ext_blocksW, ext_blocksH, 1 );
hipLaunchKernelGGL(( SpectralModulation), dim3(ext_gridDim), dim3(blockDim) , 0, 0, symmetric_extended_rhs_fft_d,extended_array_width,extended_array_height, 1.f/(float)extended_array_width,1.f/(float)extended_array_height,dc);
cufftSafeCall( hipfftExecC2R(fftPlanInv, (hipfftComplex *)symmetric_extended_rhs_fft_d, (hipfftReal *)symmetric_extended_rhs_d) );
hipLaunchKernelGGL(( ExtractRHS), dim3(gridDim), dim3(blockDim) , 0, 0, rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//hipEventRecord(stop, NULL);
//hipEventSynchronize(stop);
// float msecTotal = 0.0f;
// hipEventElapsedTime(&msecTotal, start, stop);
//printf("Time= %.5f msec \n",msecTotal);
}
//void GPUSolvers::FFTLaplaceSolver() // PASSED TEST
//{
// int array_height = 359;
// int array_width = 400;
// const float dc = 57694.f;
//
// unsigned int extended_array_width = 2*array_width;
// unsigned int extended_array_height = 2*array_height;
//
// if(symmetric_extended_rhs_d_size!=array_width*array_height){
// if(rhs_d!=0){
// hipFree(rhs_d);
// }
// else{
// hipMalloc((void**)&rhs_d,array_width*array_height*sizeof(float));
// }
//
// if(symmetric_extended_rhs_d!=0){
// hipFree(symmetric_extended_rhs_d);
// }
// else{
// hipMalloc((void**)&symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float));
// }
// if(symmetric_extended_rhs_h!=0){
// delete symmetric_extended_rhs_h;
// }
// else{
// symmetric_extended_rhs_h = new float[extended_array_width*extended_array_height*sizeof(float)];
// }
// if(symmetric_extended_rhs_fft_d!=0){
// hipFree(symmetric_extended_rhs_fft_d);
// }
// else{
// hipMalloc((void**)&symmetric_extended_rhs_fft_d,(extended_array_height )*(extended_array_width/ 2 + 1) *sizeof(fcomplex));
// }
// if(symmetric_extended_rhs_fft_h!=0){
// delete symmetric_extended_rhs_fft_h;
// }
// else{
// symmetric_extended_rhs_fft_h = new fcomplex[(extended_array_height)*(extended_array_width/ 2 + 1) *sizeof(fcomplex)];
// }
// if(rhs_h!=0){
// delete rhs_h;
// }
// else{
// rhs_h = new float[array_width*array_height*sizeof(float)];
// }
// }
//
// FileIO::readInputdf("input.txt",rhs_h,array_height*array_width);
//
// printf("input [0] = %g \n",rhs_h[0]);
// printf("input [1] = %g \n",rhs_h[1]);
// printf("input [width - 1] = %g \n",rhs_h[array_width-1]);
// printf("input [width] = %g \n",rhs_h[array_width]);
//
// hipMemcpy(rhs_d,rhs_h,array_width*array_height*sizeof(float),hipMemcpyHostToDevice);
//
// unsigned int blocksW = (unsigned int) ceilf( (float) array_width / (float) BLOCK_SIZE );
// unsigned int blocksH = (unsigned int) ceilf( (float) array_height /(float) BLOCK_SIZE );
// dim3 gridDim( blocksW, blocksH, 1 );
// dim3 blockDim( BLOCK_SIZE, BLOCK_SIZE, 1 );
//
//hipLaunchKernelGGL(( ExtendedSymmetricRHS), dim3(gridDim), dim3(blockDim) , 0, 0, rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//
// hipMemcpy(symmetric_extended_rhs_h,symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float),hipMemcpyDeviceToHost);
//
// printf("extended [0] = %g \n",symmetric_extended_rhs_h[0]);
// printf("extended [1] = %g \n",symmetric_extended_rhs_h[1]);
// printf("extended [array_width-2] = %g \n",symmetric_extended_rhs_h[array_width-2]);
// printf("extended [array_width-1] = %g \n",symmetric_extended_rhs_h[array_width-1]);
// printf("extended [array_width] = %g \n",symmetric_extended_rhs_h[array_width]);
// printf("extended [array_width+1] = %g \n",symmetric_extended_rhs_h[array_width+1]);
// printf("extended [array_width+2] = %g \n",symmetric_extended_rhs_h[array_width+2]);
// printf("extended [extended_array_width-1] = %g \n",symmetric_extended_rhs_h[extended_array_width-1]);
// printf("extended [extended_array_width] = %g \n",symmetric_extended_rhs_h[extended_array_width]);
//
// hipfftHandle fftPlanFwd, fftPlanInv;
// //printf("...creating R2C & C2R FFT plans for %i x %i\n", fftH, fftW);
// cufftSafeCall( hipfftPlan2d(&fftPlanFwd, extended_array_height, extended_array_width, HIPFFT_R2C) );
// cufftSafeCall( hipfftPlan2d(&fftPlanInv, extended_array_height, extended_array_width, HIPFFT_C2R) );
// cufftSafeCall( hipfftExecR2C(fftPlanFwd, (hipfftReal *)symmetric_extended_rhs_d, (hipfftComplex *)symmetric_extended_rhs_fft_d) );
//
// hipMemcpy(symmetric_extended_rhs_fft_h,symmetric_extended_rhs_fft_d,(extended_array_height)*(extended_array_width / 2 + 1) *sizeof(fcomplex),hipMemcpyDeviceToHost);
//
// printf("extended fft [0] = (%g,%g) \n",symmetric_extended_rhs_fft_h[0].x,symmetric_extended_rhs_fft_h[0].y);
// printf("extended fft [1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[1].x,symmetric_extended_rhs_fft_h[1].y);
// printf("extended fft [array_width-2] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width-2].x,symmetric_extended_rhs_fft_h[array_width-2].y);
// printf("extended fft [array_width-1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width-1].x,symmetric_extended_rhs_fft_h[array_width-1].y);
// printf("extended fft [array_width] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width].x,symmetric_extended_rhs_fft_h[array_width].y);
// printf("extended fft [array_width+1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width+1].x,symmetric_extended_rhs_fft_h[array_width+1].y);
// printf("extended fft [array_width+2] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width+2].x,symmetric_extended_rhs_fft_h[array_width+2].y);
// printf("extended fft [extended_array_width-1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[extended_array_width-1].x,symmetric_extended_rhs_fft_h[extended_array_width-1].y);
// printf("extended fft [extended_array_width] = (%g,%g) \n",symmetric_extended_rhs_fft_h[extended_array_width].x,symmetric_extended_rhs_fft_h[extended_array_width].y);
//
//
// unsigned int ext_blocksW = (unsigned int) ceilf( (float) (extended_array_width / 2 + 1) / (float) BLOCK_SIZE );
// unsigned int ext_blocksH = (unsigned int) ceilf( (float) extended_array_height /(float) BLOCK_SIZE );
// dim3 ext_gridDim( ext_blocksW, ext_blocksH, 1 );
//
// //hipEvent_t start;
// //hipEventCreate(&start);
// //hipEvent_t stop;
// //hipEventCreate(&stop);
// //hipEventRecord(start, NULL);
//
//hipLaunchKernelGGL(( SpectralModulation), dim3(ext_gridDim), dim3(blockDim) , 0, 0, symmetric_extended_rhs_fft_d,extended_array_width,extended_array_height, 1.f/(float)extended_array_width,1.f/(float)extended_array_height,dc);
//
// hipMemcpy(symmetric_extended_rhs_fft_h,symmetric_extended_rhs_fft_d,(extended_array_height)*(extended_array_width / 2 + 1) *sizeof(fcomplex),hipMemcpyDeviceToHost);
//
// printf("extended modulated fft [0] = (%g,%g) \n",symmetric_extended_rhs_fft_h[0].x,symmetric_extended_rhs_fft_h[0].y);
// printf("extended modulated fft [1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[1].x,symmetric_extended_rhs_fft_h[1].y);
// printf("extended modulated fft [array_width-2] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width-2].x,symmetric_extended_rhs_fft_h[array_width-2].y);
// printf("extended modulated fft [array_width-1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width-1].x,symmetric_extended_rhs_fft_h[array_width-1].y);
// printf("extended modulated fft [array_width] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width].x,symmetric_extended_rhs_fft_h[array_width].y);
// printf("extended modulated fft [array_width+1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width+1].x,symmetric_extended_rhs_fft_h[array_width+1].y);
// printf("extended modulated fft [array_width+2] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width+2].x,symmetric_extended_rhs_fft_h[array_width+2].y);
// printf("extended modulated fft [extended_array_width-1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[extended_array_width-1].x,symmetric_extended_rhs_fft_h[extended_array_width-1].y);
// printf("extended modulated fft [extended_array_width] = (%g,%g) \n",symmetric_extended_rhs_fft_h[extended_array_width].x,symmetric_extended_rhs_fft_h[extended_array_width].y);
//
//
// //hipEventRecord(stop, NULL);
// //hipEventSynchronize(stop);
// // float msecTotal = 0.0f;
// // hipEventElapsedTime(&msecTotal, start, stop);
// //printf("Time= %.5f msec \n",msecTotal);
//
//
// cufftSafeCall( hipfftExecC2R(fftPlanInv, (hipfftComplex *)symmetric_extended_rhs_fft_d, (hipfftReal *)symmetric_extended_rhs_d) );
// hipMemcpy(symmetric_extended_rhs_h,symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float),hipMemcpyDeviceToHost);
//
// printf("extended [0] = %g \n",symmetric_extended_rhs_h[0]);
// printf("extended [1] = %g \n",symmetric_extended_rhs_h[1]);
// printf("extended [array_width-2] = %g \n",symmetric_extended_rhs_h[array_width-2]);
// printf("extended [array_width-1] = %g \n",symmetric_extended_rhs_h[array_width-1]);
// printf("extended [array_width] = %g \n",symmetric_extended_rhs_h[array_width]);
// printf("extended [array_width+1] = %g \n",symmetric_extended_rhs_h[array_width+1]);
// printf("extended [array_width+2] = %g \n",symmetric_extended_rhs_h[array_width+2]);
// printf("extended [extended_array_width-1] = %g \n",symmetric_extended_rhs_h[extended_array_width-1]);
// printf("extended [extended_array_width] = %g \n",symmetric_extended_rhs_h[extended_array_width]);
//
//hipLaunchKernelGGL(( ExtractRHS), dim3(gridDim), dim3(blockDim) , 0, 0, rhs_d, symmetric_extended_rhs_d, array_width, array_height);
// hipMemcpy(rhs_h,rhs_d,array_width*array_height*sizeof(float),hipMemcpyDeviceToHost);
//
// printf("res [0] = %g \n",rhs_h[0]);
// printf("res [1] = %g \n",rhs_h[1]);
// printf("res [array_width-2] = %g \n",rhs_h[array_width-2]);
// printf("res [array_width-1] = %g \n",rhs_h[array_width-1]);
// printf("res [array_width] = %g \n",rhs_h[array_width]);
// printf("res [array_width+1] = %g \n",rhs_h[array_width+1]);
// printf("res [array_width+2] = %g \n",rhs_h[array_width+2]);
//
// FileIO::writeOutputff("output.txt",rhs_h,array_height*array_width);
//}
//void GPUSolvers::FFTLaplaceSolver(float * rhs_d, const unsigned int array_width,const unsigned int array_height)
//{
// unsigned int extended_array_width = 2*array_width;
// unsigned int extended_array_height = 2*array_height;
//
// if(symmetric_extended_rhs_d_size!=array_width*array_height){
// if(symmetric_extended_rhs_d!=0){
// hipFree(symmetric_extended_rhs_d);
// }
// else{
// hipMalloc((void**)&symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float));
// }
// if(symmetric_extended_rhs_fft_d!=0){
// hipFree(symmetric_extended_rhs_fft_d);
// }
// else{
// hipMalloc((void**)&symmetric_extended_rhs_fft_d,extended_array_height*(extended_array_width / 2 + 1) *sizeof(fcomplex));
// }
// }
//
//
//
// hipMemcpy(rhs_h,rhs_d,array_width*array_height*sizeof(float),hipMemcpyDeviceToHost);
//
// printf("Entry 0 = %f", rhs_h[0]);
//
// unsigned int blocksW = (unsigned int) ceilf( (float) array_width / (float) BLOCK_SIZE );
// unsigned int blocksH = (unsigned int) ceilf( (float) array_height /(float) BLOCK_SIZE );
// dim3 gridDim( blocksW, blocksH, 1 );
// dim3 blockDim( BLOCK_SIZE, BLOCK_SIZE, 1 );
//
//hipLaunchKernelGGL(( ExtendedSymmetricRHS), dim3(gridDim), dim3(blockDim) , 0, 0, rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//
// hipfftHandle fftPlanFwd, fftPlanInv;
// //printf("...creating R2C & C2R FFT plans for %i x %i\n", fftH, fftW);
// cufftSafeCall( hipfftPlan2d(&fftPlanFwd, extended_array_height, extended_array_width, HIPFFT_R2C) );
// cufftSafeCall( hipfftPlan2d(&fftPlanInv, extended_array_height, extended_array_width, HIPFFT_C2R) );
// cufftSafeCall( hipfftExecR2C(fftPlanFwd, (hipfftReal *)symmetric_extended_rhs_d, (hipfftComplex *)symmetric_extended_rhs_fft_d) );
//
// unsigned int ext_blocksW = (unsigned int) ceilf( (float) (extended_array_width / 2 + 1) / (float) BLOCK_SIZE );
// unsigned int ext_blocksH = (unsigned int) ceilf( (float) extended_array_height /(float) BLOCK_SIZE );
// dim3 ext_gridDim( ext_blocksW, ext_blocksH, 1 );
//
// //hipEvent_t start;
// //hipEventCreate(&start);
// //hipEvent_t stop;
// //hipEventCreate(&stop);
// //hipEventRecord(start, NULL);
//
// SpectralInversion<<< ext_gridDim, blockDim >>>(symmetric_extended_rhs_fft_d,extended_array_width,extended_array_height, 1.f/(float)extended_array_width,1.f/(float)extended_array_height);
//
// //hipEventRecord(stop, NULL);
// //hipEventSynchronize(stop);
// // float msecTotal = 0.0f;
// // hipEventElapsedTime(&msecTotal, start, stop);
// //printf("Time= %.5f msec \n",msecTotal);
//
//
// cufftSafeCall( hipfftExecC2R(fftPlanInv, (hipfftComplex *)symmetric_extended_rhs_fft_d, (hipfftReal *)symmetric_extended_rhs_d) );
//
// hipMemcpy(symmetric_extended_rhs_fft_h,symmetric_extended_rhs_fft_d,extended_array_height*(extended_array_width / 2 + 1) *sizeof(fcomplex),hipMemcpyDeviceToHost);
//
//hipLaunchKernelGGL(( ExtractRHS), dim3(gridDim), dim3(blockDim) , 0, 0, rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//}
| b6499f159eb3d0dd2e6a15f04caf2404372aa864.cu | /*
Copyright (c) 2018, Fabian Prada
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the Johns Hopkins University nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include "gpu-solver.h"
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include "cutil.h"
#include "cutil_inline_runtime.h"
#include <stdio.h>
#include "file-io.h"
#define BLOCK_SIZE 16
#define PI2 6.2831853
//typedef float2 fcomplex;
//float * rhs_h = 0;
//float * rhs_d = 0;
//float * symmetric_extended_rhs_d = 0;
//float * symmetric_extended_rhs_h = 0;
//fcomplex * symmetric_extended_rhs_fft_h = 0;
//fcomplex * symmetric_extended_rhs_fft_d = 0;
//unsigned int array_size = 0;
__global__ void ExtendedSymmetricRHS(float * rhs_d,float * extended_rhs_d,const unsigned int imgWidth,const unsigned int imgHeight)
{
unsigned int tx =blockIdx.x*BLOCK_SIZE + threadIdx.x;
unsigned int ty =blockIdx.y*BLOCK_SIZE + threadIdx.y;
if( tx < imgWidth && ty < imgHeight ){
float value = rhs_d[tx + imgWidth*ty];
extended_rhs_d[tx + 2*imgWidth*ty] = value;
extended_rhs_d[ 2*imgWidth - 1 - tx + 2*imgWidth*ty] = value;
extended_rhs_d[tx + 2*imgWidth*(2*imgHeight - 1 - ty)] = value;
extended_rhs_d[2*imgWidth - 1 - tx + 2*imgWidth*(2*imgHeight - 1 - ty)] = value;
}
}
__global__ void ExtractRHS(float * rhs_d,float * extended_rhs_d,const unsigned int imgWidth,const unsigned int imgHeight)
{
unsigned int tx =blockIdx.x*BLOCK_SIZE + threadIdx.x;
unsigned int ty =blockIdx.y*BLOCK_SIZE + threadIdx.y;
if( tx < imgWidth && ty < imgHeight ){
rhs_d[tx + imgWidth*ty]=extended_rhs_d[tx + 2*imgWidth*ty];
}
}
__global__ void SpectralModulation(float2 * extended_rhs_fft_d,const unsigned int extended_imgWidth,const unsigned int extended_imgHeight, const float finv_imgWidth,const float finv_imgHeight, const float dc)
{
unsigned int tx =blockIdx.x*BLOCK_SIZE + threadIdx.x;
unsigned int ty =blockIdx.y*BLOCK_SIZE + threadIdx.y;
if( tx < (extended_imgWidth/2 + 1) && ty < extended_imgHeight ){
if(tx + ty > 0)
{
float attenuation_factor = (finv_imgWidth*finv_imgHeight)/ (4.f -2.f*( cos(PI2*(float)tx*finv_imgWidth) + cos(PI2*((float)ty)*finv_imgHeight)));
extended_rhs_fft_d[tx + ty*(extended_imgWidth/2 + 1) ].x *= attenuation_factor;
extended_rhs_fft_d[tx + ty*(extended_imgWidth/2 + 1) ].y *= attenuation_factor;
}
else
{
extended_rhs_fft_d[0].x = 4.f*dc*(finv_imgWidth*finv_imgHeight);
extended_rhs_fft_d[0].y = 0.f;
}
}
}
void GPUSolvers::FFTLaplaceSolver(float * rhs_d, float * symmetric_extended_rhs_d, float2 * symmetric_extended_rhs_fft_d, cufftHandle & fftPlanFwd, cufftHandle & fftPlanInv, const unsigned int array_width,const unsigned int array_height ,const float dc)
{
unsigned int extended_array_width = 2*array_width;
unsigned int extended_array_height = 2*array_height;
//if(array_size!=array_width*array_height){
// array_size = array_width*array_height;
//
// if(symmetric_extended_rhs_d!=0){
// cudaFree(symmetric_extended_rhs_d);
// }
// cudaMalloc((void**)&symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float));
// if(symmetric_extended_rhs_fft_d!=0){
// cudaFree(symmetric_extended_rhs_fft_d);
// }
// cudaMalloc((void**)&symmetric_extended_rhs_fft_d,(extended_array_height )*(extended_array_width/ 2 + 1) *sizeof(fcomplex));
//}
unsigned int blocksW = (unsigned int) ceilf( (float) array_width / (float) BLOCK_SIZE );
unsigned int blocksH = (unsigned int) ceilf( (float) array_height /(float) BLOCK_SIZE );
dim3 gridDim( blocksW, blocksH, 1 );
dim3 blockDim( BLOCK_SIZE, BLOCK_SIZE, 1 );
//cudaEvent_t start;
//cudaEventCreate(&start);
//cudaEvent_t stop;
//cudaEventCreate(&stop);
//cudaEventRecord(start, NULL);
ExtendedSymmetricRHS<<< gridDim, blockDim >>>( rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//cufftHandle fftPlanFwd, fftPlanInv;
// //printf("...creating R2C & C2R FFT plans for %i x %i\n", fftH, fftW);
// cufftSafeCall( cufftPlan2d(&fftPlanFwd, extended_array_height, extended_array_width, CUFFT_R2C) );
// cufftSafeCall( cufftPlan2d(&fftPlanInv, extended_array_height, extended_array_width, CUFFT_C2R) );
cufftSafeCall( cufftExecR2C(fftPlanFwd, (cufftReal *)symmetric_extended_rhs_d, (cufftComplex *)symmetric_extended_rhs_fft_d) );
unsigned int ext_blocksW = (unsigned int) ceilf( (float) (extended_array_width / 2 + 1) / (float) BLOCK_SIZE );
unsigned int ext_blocksH = (unsigned int) ceilf( (float) extended_array_height /(float) BLOCK_SIZE );
dim3 ext_gridDim( ext_blocksW, ext_blocksH, 1 );
SpectralModulation<<< ext_gridDim, blockDim >>>(symmetric_extended_rhs_fft_d,extended_array_width,extended_array_height, 1.f/(float)extended_array_width,1.f/(float)extended_array_height,dc);
cufftSafeCall( cufftExecC2R(fftPlanInv, (cufftComplex *)symmetric_extended_rhs_fft_d, (cufftReal *)symmetric_extended_rhs_d) );
ExtractRHS<<< gridDim, blockDim >>>( rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//cudaEventRecord(stop, NULL);
//cudaEventSynchronize(stop);
// float msecTotal = 0.0f;
// cudaEventElapsedTime(&msecTotal, start, stop);
//printf("Time= %.5f msec \n",msecTotal);
}
//void GPUSolvers::FFTLaplaceSolver() // PASSED TEST
//{
// int array_height = 359;
// int array_width = 400;
// const float dc = 57694.f;
//
// unsigned int extended_array_width = 2*array_width;
// unsigned int extended_array_height = 2*array_height;
//
// if(symmetric_extended_rhs_d_size!=array_width*array_height){
// if(rhs_d!=0){
// cudaFree(rhs_d);
// }
// else{
// cudaMalloc((void**)&rhs_d,array_width*array_height*sizeof(float));
// }
//
// if(symmetric_extended_rhs_d!=0){
// cudaFree(symmetric_extended_rhs_d);
// }
// else{
// cudaMalloc((void**)&symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float));
// }
// if(symmetric_extended_rhs_h!=0){
// delete symmetric_extended_rhs_h;
// }
// else{
// symmetric_extended_rhs_h = new float[extended_array_width*extended_array_height*sizeof(float)];
// }
// if(symmetric_extended_rhs_fft_d!=0){
// cudaFree(symmetric_extended_rhs_fft_d);
// }
// else{
// cudaMalloc((void**)&symmetric_extended_rhs_fft_d,(extended_array_height )*(extended_array_width/ 2 + 1) *sizeof(fcomplex));
// }
// if(symmetric_extended_rhs_fft_h!=0){
// delete symmetric_extended_rhs_fft_h;
// }
// else{
// symmetric_extended_rhs_fft_h = new fcomplex[(extended_array_height)*(extended_array_width/ 2 + 1) *sizeof(fcomplex)];
// }
// if(rhs_h!=0){
// delete rhs_h;
// }
// else{
// rhs_h = new float[array_width*array_height*sizeof(float)];
// }
// }
//
// FileIO::readInputdf("input.txt",rhs_h,array_height*array_width);
//
// printf("input [0] = %g \n",rhs_h[0]);
// printf("input [1] = %g \n",rhs_h[1]);
// printf("input [width - 1] = %g \n",rhs_h[array_width-1]);
// printf("input [width] = %g \n",rhs_h[array_width]);
//
// cudaMemcpy(rhs_d,rhs_h,array_width*array_height*sizeof(float),cudaMemcpyHostToDevice);
//
// unsigned int blocksW = (unsigned int) ceilf( (float) array_width / (float) BLOCK_SIZE );
// unsigned int blocksH = (unsigned int) ceilf( (float) array_height /(float) BLOCK_SIZE );
// dim3 gridDim( blocksW, blocksH, 1 );
// dim3 blockDim( BLOCK_SIZE, BLOCK_SIZE, 1 );
//
// ExtendedSymmetricRHS<<< gridDim, blockDim >>>( rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//
// cudaMemcpy(symmetric_extended_rhs_h,symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float),cudaMemcpyDeviceToHost);
//
// printf("extended [0] = %g \n",symmetric_extended_rhs_h[0]);
// printf("extended [1] = %g \n",symmetric_extended_rhs_h[1]);
// printf("extended [array_width-2] = %g \n",symmetric_extended_rhs_h[array_width-2]);
// printf("extended [array_width-1] = %g \n",symmetric_extended_rhs_h[array_width-1]);
// printf("extended [array_width] = %g \n",symmetric_extended_rhs_h[array_width]);
// printf("extended [array_width+1] = %g \n",symmetric_extended_rhs_h[array_width+1]);
// printf("extended [array_width+2] = %g \n",symmetric_extended_rhs_h[array_width+2]);
// printf("extended [extended_array_width-1] = %g \n",symmetric_extended_rhs_h[extended_array_width-1]);
// printf("extended [extended_array_width] = %g \n",symmetric_extended_rhs_h[extended_array_width]);
//
// cufftHandle fftPlanFwd, fftPlanInv;
// //printf("...creating R2C & C2R FFT plans for %i x %i\n", fftH, fftW);
// cufftSafeCall( cufftPlan2d(&fftPlanFwd, extended_array_height, extended_array_width, CUFFT_R2C) );
// cufftSafeCall( cufftPlan2d(&fftPlanInv, extended_array_height, extended_array_width, CUFFT_C2R) );
// cufftSafeCall( cufftExecR2C(fftPlanFwd, (cufftReal *)symmetric_extended_rhs_d, (cufftComplex *)symmetric_extended_rhs_fft_d) );
//
// cudaMemcpy(symmetric_extended_rhs_fft_h,symmetric_extended_rhs_fft_d,(extended_array_height)*(extended_array_width / 2 + 1) *sizeof(fcomplex),cudaMemcpyDeviceToHost);
//
// printf("extended fft [0] = (%g,%g) \n",symmetric_extended_rhs_fft_h[0].x,symmetric_extended_rhs_fft_h[0].y);
// printf("extended fft [1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[1].x,symmetric_extended_rhs_fft_h[1].y);
// printf("extended fft [array_width-2] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width-2].x,symmetric_extended_rhs_fft_h[array_width-2].y);
// printf("extended fft [array_width-1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width-1].x,symmetric_extended_rhs_fft_h[array_width-1].y);
// printf("extended fft [array_width] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width].x,symmetric_extended_rhs_fft_h[array_width].y);
// printf("extended fft [array_width+1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width+1].x,symmetric_extended_rhs_fft_h[array_width+1].y);
// printf("extended fft [array_width+2] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width+2].x,symmetric_extended_rhs_fft_h[array_width+2].y);
// printf("extended fft [extended_array_width-1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[extended_array_width-1].x,symmetric_extended_rhs_fft_h[extended_array_width-1].y);
// printf("extended fft [extended_array_width] = (%g,%g) \n",symmetric_extended_rhs_fft_h[extended_array_width].x,symmetric_extended_rhs_fft_h[extended_array_width].y);
//
//
// unsigned int ext_blocksW = (unsigned int) ceilf( (float) (extended_array_width / 2 + 1) / (float) BLOCK_SIZE );
// unsigned int ext_blocksH = (unsigned int) ceilf( (float) extended_array_height /(float) BLOCK_SIZE );
// dim3 ext_gridDim( ext_blocksW, ext_blocksH, 1 );
//
// //cudaEvent_t start;
// //cudaEventCreate(&start);
// //cudaEvent_t stop;
// //cudaEventCreate(&stop);
// //cudaEventRecord(start, NULL);
//
// SpectralModulation<<< ext_gridDim, blockDim >>>(symmetric_extended_rhs_fft_d,extended_array_width,extended_array_height, 1.f/(float)extended_array_width,1.f/(float)extended_array_height,dc);
//
// cudaMemcpy(symmetric_extended_rhs_fft_h,symmetric_extended_rhs_fft_d,(extended_array_height)*(extended_array_width / 2 + 1) *sizeof(fcomplex),cudaMemcpyDeviceToHost);
//
// printf("extended modulated fft [0] = (%g,%g) \n",symmetric_extended_rhs_fft_h[0].x,symmetric_extended_rhs_fft_h[0].y);
// printf("extended modulated fft [1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[1].x,symmetric_extended_rhs_fft_h[1].y);
// printf("extended modulated fft [array_width-2] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width-2].x,symmetric_extended_rhs_fft_h[array_width-2].y);
// printf("extended modulated fft [array_width-1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width-1].x,symmetric_extended_rhs_fft_h[array_width-1].y);
// printf("extended modulated fft [array_width] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width].x,symmetric_extended_rhs_fft_h[array_width].y);
// printf("extended modulated fft [array_width+1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width+1].x,symmetric_extended_rhs_fft_h[array_width+1].y);
// printf("extended modulated fft [array_width+2] = (%g,%g) \n",symmetric_extended_rhs_fft_h[array_width+2].x,symmetric_extended_rhs_fft_h[array_width+2].y);
// printf("extended modulated fft [extended_array_width-1] = (%g,%g) \n",symmetric_extended_rhs_fft_h[extended_array_width-1].x,symmetric_extended_rhs_fft_h[extended_array_width-1].y);
// printf("extended modulated fft [extended_array_width] = (%g,%g) \n",symmetric_extended_rhs_fft_h[extended_array_width].x,symmetric_extended_rhs_fft_h[extended_array_width].y);
//
//
// //cudaEventRecord(stop, NULL);
// //cudaEventSynchronize(stop);
// // float msecTotal = 0.0f;
// // cudaEventElapsedTime(&msecTotal, start, stop);
// //printf("Time= %.5f msec \n",msecTotal);
//
//
// cufftSafeCall( cufftExecC2R(fftPlanInv, (cufftComplex *)symmetric_extended_rhs_fft_d, (cufftReal *)symmetric_extended_rhs_d) );
// cudaMemcpy(symmetric_extended_rhs_h,symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float),cudaMemcpyDeviceToHost);
//
// printf("extended [0] = %g \n",symmetric_extended_rhs_h[0]);
// printf("extended [1] = %g \n",symmetric_extended_rhs_h[1]);
// printf("extended [array_width-2] = %g \n",symmetric_extended_rhs_h[array_width-2]);
// printf("extended [array_width-1] = %g \n",symmetric_extended_rhs_h[array_width-1]);
// printf("extended [array_width] = %g \n",symmetric_extended_rhs_h[array_width]);
// printf("extended [array_width+1] = %g \n",symmetric_extended_rhs_h[array_width+1]);
// printf("extended [array_width+2] = %g \n",symmetric_extended_rhs_h[array_width+2]);
// printf("extended [extended_array_width-1] = %g \n",symmetric_extended_rhs_h[extended_array_width-1]);
// printf("extended [extended_array_width] = %g \n",symmetric_extended_rhs_h[extended_array_width]);
//
// ExtractRHS<<< gridDim, blockDim >>>( rhs_d, symmetric_extended_rhs_d, array_width, array_height);
// cudaMemcpy(rhs_h,rhs_d,array_width*array_height*sizeof(float),cudaMemcpyDeviceToHost);
//
// printf("res [0] = %g \n",rhs_h[0]);
// printf("res [1] = %g \n",rhs_h[1]);
// printf("res [array_width-2] = %g \n",rhs_h[array_width-2]);
// printf("res [array_width-1] = %g \n",rhs_h[array_width-1]);
// printf("res [array_width] = %g \n",rhs_h[array_width]);
// printf("res [array_width+1] = %g \n",rhs_h[array_width+1]);
// printf("res [array_width+2] = %g \n",rhs_h[array_width+2]);
//
// FileIO::writeOutputff("output.txt",rhs_h,array_height*array_width);
//}
//void GPUSolvers::FFTLaplaceSolver(float * rhs_d, const unsigned int array_width,const unsigned int array_height)
//{
// unsigned int extended_array_width = 2*array_width;
// unsigned int extended_array_height = 2*array_height;
//
// if(symmetric_extended_rhs_d_size!=array_width*array_height){
// if(symmetric_extended_rhs_d!=0){
// cudaFree(symmetric_extended_rhs_d);
// }
// else{
// cudaMalloc((void**)&symmetric_extended_rhs_d,extended_array_width*extended_array_height*sizeof(float));
// }
// if(symmetric_extended_rhs_fft_d!=0){
// cudaFree(symmetric_extended_rhs_fft_d);
// }
// else{
// cudaMalloc((void**)&symmetric_extended_rhs_fft_d,extended_array_height*(extended_array_width / 2 + 1) *sizeof(fcomplex));
// }
// }
//
//
//
// cudaMemcpy(rhs_h,rhs_d,array_width*array_height*sizeof(float),cudaMemcpyDeviceToHost);
//
// printf("Entry 0 = %f", rhs_h[0]);
//
// unsigned int blocksW = (unsigned int) ceilf( (float) array_width / (float) BLOCK_SIZE );
// unsigned int blocksH = (unsigned int) ceilf( (float) array_height /(float) BLOCK_SIZE );
// dim3 gridDim( blocksW, blocksH, 1 );
// dim3 blockDim( BLOCK_SIZE, BLOCK_SIZE, 1 );
//
// ExtendedSymmetricRHS<<< gridDim, blockDim >>>( rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//
// cufftHandle fftPlanFwd, fftPlanInv;
// //printf("...creating R2C & C2R FFT plans for %i x %i\n", fftH, fftW);
// cufftSafeCall( cufftPlan2d(&fftPlanFwd, extended_array_height, extended_array_width, CUFFT_R2C) );
// cufftSafeCall( cufftPlan2d(&fftPlanInv, extended_array_height, extended_array_width, CUFFT_C2R) );
// cufftSafeCall( cufftExecR2C(fftPlanFwd, (cufftReal *)symmetric_extended_rhs_d, (cufftComplex *)symmetric_extended_rhs_fft_d) );
//
// unsigned int ext_blocksW = (unsigned int) ceilf( (float) (extended_array_width / 2 + 1) / (float) BLOCK_SIZE );
// unsigned int ext_blocksH = (unsigned int) ceilf( (float) extended_array_height /(float) BLOCK_SIZE );
// dim3 ext_gridDim( ext_blocksW, ext_blocksH, 1 );
//
// //cudaEvent_t start;
// //cudaEventCreate(&start);
// //cudaEvent_t stop;
// //cudaEventCreate(&stop);
// //cudaEventRecord(start, NULL);
//
// SpectralInversion<<< ext_gridDim, blockDim >>>(symmetric_extended_rhs_fft_d,extended_array_width,extended_array_height, 1.f/(float)extended_array_width,1.f/(float)extended_array_height);
//
// //cudaEventRecord(stop, NULL);
// //cudaEventSynchronize(stop);
// // float msecTotal = 0.0f;
// // cudaEventElapsedTime(&msecTotal, start, stop);
// //printf("Time= %.5f msec \n",msecTotal);
//
//
// cufftSafeCall( cufftExecC2R(fftPlanInv, (cufftComplex *)symmetric_extended_rhs_fft_d, (cufftReal *)symmetric_extended_rhs_d) );
//
// cudaMemcpy(symmetric_extended_rhs_fft_h,symmetric_extended_rhs_fft_d,extended_array_height*(extended_array_width / 2 + 1) *sizeof(fcomplex),cudaMemcpyDeviceToHost);
//
// ExtractRHS<<< gridDim, blockDim >>>( rhs_d, symmetric_extended_rhs_d, array_width, array_height);
//}
|
420cd73d7385cc5c4b9345a3582318f0af31499e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sstream>
#include <iostream>
#include <string>
#include <cub/util_ptx.cuh>
#include <cub/warp/warp_reduce.cuh>
#include "../common/utilities.hpp"
#include "../common/utilities_cuda.cuh"
#include "../common/configuration.hpp"
#include "dDNKG.hpp"
extern const std::string process_ident;
namespace dDNKG {
__constant__ double dt_c[8], dt_d[8], mp2[2048], beta;
__device__ double rhs_DNKG(double left, double center, double right, double mp2) {
return -(center * (mp2 + beta * center * center) - left - right);
}
/*
* Move entire chain in a warp. Compare with DNKG_FPUT_Toda.cu . Only difference is that the mp2 vector is held
* in shared memory in order to have a comparable register usage to the non-disorder version.
*/
__global__ void
move_chain_in_warp(double2 *planar, const double *mp2_gmem, uint32_t steps_grouping, uint16_t copies) {
//compile-time
constexpr int elements_in_thread = optimized_chain_length / 32 + !!(optimized_chain_length % 32),
full_lanes = optimized_chain_length % 32 ?: 32;
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x,
my_copy = idx / 32, lane = idx % 32, lane_left = (lane + 31) % 32, lane_right = (lane + 1) % 32;
bool full_lane = lane < full_lanes;
if (my_copy >= copies) return;
//offset copy
planar += my_copy * optimized_chain_length;
double phi[elements_in_thread + 2], pi[elements_in_thread];
__shared__ double mp2_shmem[elements_in_thread][32];
phi[elements_in_thread] = 0, pi[elements_in_thread - 1] = 0;
if (threadIdx.x < 32) mp2_shmem[elements_in_thread - 1][lane] = 0;
//offset previous threads
uint32_t thread_offset = full_lane ? lane * elements_in_thread : full_lanes * elements_in_thread +
(lane - full_lanes) * (elements_in_thread - 1);
planar += thread_offset;
mp2_gmem += thread_offset;
#pragma unroll
for (int i_0 = 0, i = 1; i_0 < elements_in_thread - 1; i_0++, i++) {
auto pair = planar[i_0];
phi[i] = pair.x, pi[i_0] = pair.y;
if (threadIdx.x < 32) mp2_shmem[i_0][lane] = mp2_gmem[i_0];
}
if (full_lane) {
auto pair = planar[elements_in_thread - 1];
phi[elements_in_thread] = pair.x, pi[elements_in_thread - 1] = pair.y;
if (threadIdx.x < 32) mp2_shmem[elements_in_thread - 1][lane] = mp2_gmem[elements_in_thread - 1];
}
__syncthreads();
for (uint32_t i = 0; i < steps_grouping; i++) {
//XXX this unroll may need to be tweaked
#pragma unroll (elements_in_thread > 4 ? 1 : 7)
for (int k = 0; k < 7; k++) {
double dt_c_k = dt_c[k];
if (i && !k)
dt_c_k *= 2; //merge last and first evolution of phi variable, since pi is not update in 8th steps of 6th order Yoshida
#pragma unroll
for (int i_0 = 0, i = 1; i_0 < elements_in_thread; i_0++, i++)
phi[i] += dt_c_k * pi[i_0];
//communicate nearest neighbours to adjacent threads
double communicate = full_lane ? phi[elements_in_thread] : phi[elements_in_thread - 1];
phi[0] = cub::ShuffleIndex < 32 > (communicate, lane_left, 0xFFFFFFFF);
phi[elements_in_thread + 1] = cub::ShuffleIndex < 32 > (phi[1], lane_right, 0xFFFFFFFF);
if (!full_lane) phi[elements_in_thread] = phi[elements_in_thread + 1];
#pragma unroll
for (int i_0 = 0, i = 1; i_0 < elements_in_thread; i_0++, i++)
pi[i_0] += dt_d[k] * rhs_DNKG(phi[i - 1], phi[i], phi[i + 1], mp2_shmem[i_0][lane]);
}
}
#pragma unroll
for (int i_0 = 0, i = 1; i_0 < elements_in_thread - 1; i_0++, i++) {
phi[i] += dt_c[7] * pi[i_0];
planar[i_0] = double2{phi[i], pi[i_0]};
}
if (full_lane) {
phi[elements_in_thread] += dt_c[7] * pi[elements_in_thread - 1];
planar[elements_in_thread - 1] = double2{phi[elements_in_thread], pi[elements_in_thread - 1]};
}
}
/*
* Move entire chain in a thread. Compare with DNKG_FPUT_Toda.cu .
* Since this is used for chain_length < 32, it is safe to use the constant memory version of mp2.
*/
template<uint16_t chain_length>
__global__ void move_chain_in_thread(double2 *planar, uint32_t steps_grouping, uint16_t copies) {
uint32_t my_copy = blockIdx.x * blockDim.x + threadIdx.x;
if (my_copy >= copies) return;
//offset copy
planar += my_copy * chain_length;
double2 pairs[chain_length];
#pragma unroll
for (uint16_t i = 0; i < chain_length; i++) pairs[i] = planar[i];
for (uint32_t i = 0; i < steps_grouping; i++) {
//XXX this unroll may need to be tweaked
#pragma unroll (chain_length > 4 ? 1 : chain_length)
for (int k = 0; k < 7; k++) {
double dt_c_k = dt_c[k];
if (i && !k)
dt_c_k *= 2; //merge last and first evolution of phi variable, since pi is not update in 8th steps of 6th order Yoshida
#pragma unroll
for (uint16_t i = 0; i < chain_length; i++)
pairs[i].x += dt_c_k * pairs[i].y;
pairs[0].y += dt_d[k] * rhs_DNKG(pairs[chain_length - 1].x, pairs[0].x, pairs[1].x, mp2[0]);
#pragma unroll
for (uint16_t i = 1; i < chain_length - 1; i++)
pairs[i].y += dt_d[k] * rhs_DNKG(pairs[i - 1].x, pairs[i].x, pairs[i + 1].x, mp2[i]);
pairs[chain_length - 1].y += dt_d[k] *
rhs_DNKG(pairs[chain_length - 2].x, pairs[chain_length - 1].x, pairs[0].x,
mp2[chain_length - 1]);
}
}
#pragma unroll
for (uint16_t i = 0; i < chain_length; i++) {
pairs[i].x += dt_c[7] * pairs[i].y;
planar[i] = pairs[i];
}
}
//some machinery to get the compiler to create all the versions of move_planar, and to get the right one at runtime
namespace {
using thread_kinfo = kernel_info<decltype(&move_chain_in_thread<0>)>;
template<int chain_length = 2>
struct thread_kernel_resolver {
static const thread_kinfo &get(int chain_length_required) {
if (chain_length != chain_length_required)
return thread_kernel_resolver<chain_length + 1>::get(chain_length_required);
static auto kinfo = make_kernel_info(move_chain_in_thread<chain_length>);
return kinfo;
}
};
template<>
struct thread_kernel_resolver<32> {
static const thread_kinfo &get(int) { throw std::logic_error("Shouldn't be here"); }
};
}
/*
* Move in split format. Compare with DNKG_FPUT_Toda.cu . The mp2 argument may alias the constant
* memory array if the constant buffer is large enough to hold all the linear parameter values,
* otherwise it resides in global memory.
*/
__global__ void move_split(double *phi, double *pi, const double *mp2, uint32_t chainlen, uint32_t shard_copies,
uint32_t steps_grouping) {
uint32_t chain_idx_0 = blockIdx.x * blockDim.x + threadIdx.x, chain_idx_last =
chain_idx_0 + (chainlen - 1) * shard_copies;
if (chain_idx_0 >= shard_copies) return;
//these values are handled outside of the inner loop, can be saved in registers to avoid memory access
double phi0 = phi[chain_idx_0], phi1 = phi[chain_idx_0 + shard_copies], pi0 = pi[chain_idx_0];
for (uint32_t i = 0; i < steps_grouping; i++) {
for (int k = 0; k < 7; k++) {
double dt_c_k = dt_c[k];
if (i && !k)
dt_c_k *= 2; //merge last and first evolution of phi variable, since pi is not update in 8th steps of 6th order Yoshida
double previous_pi = pi[chain_idx_0 + shard_copies]; //pi[1]
phi0 += dt_c_k * pi0;
phi1 += dt_c_k * previous_pi;
double last_updated_phi[]{phi0, phi1};
//this appears to be already unrolled by the compiler
auto mp2_i = mp2 + 1;
for (uint32_t i = chain_idx_0 + 2 * shard_copies; i <= chain_idx_last; i += shard_copies, mp2_i++) {
double current_pi = pi[i];
double current_updated_phi = (phi[i] += dt_c_k * current_pi);
pi[i - shard_copies] = previous_pi + dt_d[k] * rhs_DNKG(last_updated_phi[0],
last_updated_phi[1],
current_updated_phi,
*mp2_i);
previous_pi = current_pi;
last_updated_phi[0] = last_updated_phi[1];
last_updated_phi[1] = current_updated_phi;
}
pi[chain_idx_last] =
previous_pi + dt_d[k] * rhs_DNKG(last_updated_phi[0], last_updated_phi[1], phi0, *mp2_i);
pi0 += dt_d[k] * rhs_DNKG(last_updated_phi[1], phi0, phi1, *mp2);
}
}
phi0 += dt_c[7] * pi0;
phi1 += dt_c[7] * pi[chain_idx_0 + shard_copies];
for (uint32_t i = chain_idx_0 + 2 * shard_copies; i <= chain_idx_last; i += shard_copies)
phi[i] += dt_c[7] * pi[i];
phi[chain_idx_0] = phi0;
phi[chain_idx_0 + shard_copies] = phi1;
pi[chain_idx_0] = pi0;
}
completion move(plane2split &splitter, bool &use_split_kernel, const double *mp2_gmem, hipStream_t stream) {
if (use_split_kernel) {
static auto kinfo = make_kernel_info(move_split);
auto launch = kinfo.linear_configuration(gconf.shard_copies);
static auto mp2_const_ptr = (const double *) get_device_address(mp2);
auto mp2_ptr = gconf.chain_length <= sizeof(mp2) / sizeof(double) ? mp2_const_ptr : mp2_gmem;
kinfohipLaunchKernelGGL((.k) , dim3(launch.blocks), dim3(launch.threads), 0, stream ,
splitter.coords_transposed, splitter.momenta_transposed, mp2_ptr, gconf.chain_length, gconf.shard_copies,
gconf.kernel_batching);
} else {
if (gconf.chain_length < 32) {
auto &kinfo = thread_kernel_resolver<>::get(gconf.chain_length);
auto launch = kinfo.linear_configuration(gconf.shard_copies);
kinfohipLaunchKernelGGL((.k) , dim3(launch.blocks), dim3(launch.threads), 0, stream ,
gres.shard_gpu, gconf.kernel_batching, gconf.shard_copies);
} else if (gconf.chain_length == optimized_chain_length) {
static auto kinfo = make_kernel_info(move_chain_in_warp);
auto launch = kinfo.linear_configuration(uint32_t(gconf.shard_copies) * 32);
kinfohipLaunchKernelGGL((.k) , dim3(launch.blocks), dim3(launch.threads), 0, stream ,
gres.shard_gpu, mp2_gmem, gconf.kernel_batching, gconf.shard_copies);
} else {
static bool warned = false;
if (!warned && gconf.chain_length < 2048) {
collect_ostream(std::cerr) << process_ident << ": could not find optimized version for chain_length "
<< gconf.chain_length
<< ", try to reconfigure with -Doptimized_chain_length="
<< gconf.chain_length << " and recompile." << std::endl;
warned = true;
}
use_split_kernel = true;
return move(splitter, use_split_kernel, mp2_gmem, stream);
}
}
hipGetLastError() && assertcu;
return completion(stream);
}
__global__ void
make_linenergies_kernel(const double *projection_phi, const double *projection_pi, uint16_t chain_length,
uint16_t shard_copies, double *linenergies) {
uint32_t idx = (blockDim.x * blockIdx.x + threadIdx.x) / 32;
if (idx >= chain_length) return;
projection_phi += idx * shard_copies;
projection_pi += idx * shard_copies;
double sum = 0;
for (uint32_t c = threadIdx.x % 32; c < shard_copies; c += 32) {
auto p_phi = projection_phi[c], p_pi = projection_pi[c];
sum += p_phi * p_phi + p_pi * p_pi;
}
hipcub::WarpReduce<double>::TempStorage dummy;
#ifdef __CUDA_ARCH__
static_assert(sizeof(dummy) <= 1, "make_linenergies_kernel assumes you are on CC >= 3.5 to use warp shuffles");
#endif
sum = hipcub::WarpReduce<double>(dummy).Sum(sum);
if (threadIdx.x % 32) return;
linenergies[idx] = sum;
}
completion make_linenergies(const double *projection_phi, const double *projection_pi, hipStream_t stream) {
static auto kinfo = make_kernel_info(make_linenergies_kernel);
auto launch = kinfo.linear_configuration(uint32_t(gconf.chain_length) * 32);
kinfohipLaunchKernelGGL((.k) , dim3(launch.blocks), dim3(launch.threads), 0, stream ,
projection_phi, projection_pi, gconf.chain_length, gconf.shard_copies, gres.linenergies_gpu);
hipGetLastError() && assertcu;
return completion(stream);
}
}
| 420cd73d7385cc5c4b9345a3582318f0af31499e.cu | #include <sstream>
#include <iostream>
#include <string>
#include <cub/util_ptx.cuh>
#include <cub/warp/warp_reduce.cuh>
#include "../common/utilities.hpp"
#include "../common/utilities_cuda.cuh"
#include "../common/configuration.hpp"
#include "dDNKG.hpp"
extern const std::string process_ident;
namespace dDNKG {
__constant__ double dt_c[8], dt_d[8], mp2[2048], beta;
__device__ double rhs_DNKG(double left, double center, double right, double mp2) {
return -(center * (mp2 + beta * center * center) - left - right);
}
/*
* Move entire chain in a warp. Compare with DNKG_FPUT_Toda.cu . Only difference is that the mp2 vector is held
* in shared memory in order to have a comparable register usage to the non-disorder version.
*/
__global__ void
move_chain_in_warp(double2 *planar, const double *mp2_gmem, uint32_t steps_grouping, uint16_t copies) {
//compile-time
constexpr int elements_in_thread = optimized_chain_length / 32 + !!(optimized_chain_length % 32),
full_lanes = optimized_chain_length % 32 ?: 32;
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x,
my_copy = idx / 32, lane = idx % 32, lane_left = (lane + 31) % 32, lane_right = (lane + 1) % 32;
bool full_lane = lane < full_lanes;
if (my_copy >= copies) return;
//offset copy
planar += my_copy * optimized_chain_length;
double phi[elements_in_thread + 2], pi[elements_in_thread];
__shared__ double mp2_shmem[elements_in_thread][32];
phi[elements_in_thread] = 0, pi[elements_in_thread - 1] = 0;
if (threadIdx.x < 32) mp2_shmem[elements_in_thread - 1][lane] = 0;
//offset previous threads
uint32_t thread_offset = full_lane ? lane * elements_in_thread : full_lanes * elements_in_thread +
(lane - full_lanes) * (elements_in_thread - 1);
planar += thread_offset;
mp2_gmem += thread_offset;
#pragma unroll
for (int i_0 = 0, i = 1; i_0 < elements_in_thread - 1; i_0++, i++) {
auto pair = planar[i_0];
phi[i] = pair.x, pi[i_0] = pair.y;
if (threadIdx.x < 32) mp2_shmem[i_0][lane] = mp2_gmem[i_0];
}
if (full_lane) {
auto pair = planar[elements_in_thread - 1];
phi[elements_in_thread] = pair.x, pi[elements_in_thread - 1] = pair.y;
if (threadIdx.x < 32) mp2_shmem[elements_in_thread - 1][lane] = mp2_gmem[elements_in_thread - 1];
}
__syncthreads();
for (uint32_t i = 0; i < steps_grouping; i++) {
//XXX this unroll may need to be tweaked
#pragma unroll (elements_in_thread > 4 ? 1 : 7)
for (int k = 0; k < 7; k++) {
double dt_c_k = dt_c[k];
if (i && !k)
dt_c_k *= 2; //merge last and first evolution of phi variable, since pi is not update in 8th steps of 6th order Yoshida
#pragma unroll
for (int i_0 = 0, i = 1; i_0 < elements_in_thread; i_0++, i++)
phi[i] += dt_c_k * pi[i_0];
//communicate nearest neighbours to adjacent threads
double communicate = full_lane ? phi[elements_in_thread] : phi[elements_in_thread - 1];
phi[0] = cub::ShuffleIndex < 32 > (communicate, lane_left, 0xFFFFFFFF);
phi[elements_in_thread + 1] = cub::ShuffleIndex < 32 > (phi[1], lane_right, 0xFFFFFFFF);
if (!full_lane) phi[elements_in_thread] = phi[elements_in_thread + 1];
#pragma unroll
for (int i_0 = 0, i = 1; i_0 < elements_in_thread; i_0++, i++)
pi[i_0] += dt_d[k] * rhs_DNKG(phi[i - 1], phi[i], phi[i + 1], mp2_shmem[i_0][lane]);
}
}
#pragma unroll
for (int i_0 = 0, i = 1; i_0 < elements_in_thread - 1; i_0++, i++) {
phi[i] += dt_c[7] * pi[i_0];
planar[i_0] = double2{phi[i], pi[i_0]};
}
if (full_lane) {
phi[elements_in_thread] += dt_c[7] * pi[elements_in_thread - 1];
planar[elements_in_thread - 1] = double2{phi[elements_in_thread], pi[elements_in_thread - 1]};
}
}
/*
* Move entire chain in a thread. Compare with DNKG_FPUT_Toda.cu .
* Since this is used for chain_length < 32, it is safe to use the constant memory version of mp2.
*/
template<uint16_t chain_length>
__global__ void move_chain_in_thread(double2 *planar, uint32_t steps_grouping, uint16_t copies) {
uint32_t my_copy = blockIdx.x * blockDim.x + threadIdx.x;
if (my_copy >= copies) return;
//offset copy
planar += my_copy * chain_length;
double2 pairs[chain_length];
#pragma unroll
for (uint16_t i = 0; i < chain_length; i++) pairs[i] = planar[i];
for (uint32_t i = 0; i < steps_grouping; i++) {
//XXX this unroll may need to be tweaked
#pragma unroll (chain_length > 4 ? 1 : chain_length)
for (int k = 0; k < 7; k++) {
double dt_c_k = dt_c[k];
if (i && !k)
dt_c_k *= 2; //merge last and first evolution of phi variable, since pi is not update in 8th steps of 6th order Yoshida
#pragma unroll
for (uint16_t i = 0; i < chain_length; i++)
pairs[i].x += dt_c_k * pairs[i].y;
pairs[0].y += dt_d[k] * rhs_DNKG(pairs[chain_length - 1].x, pairs[0].x, pairs[1].x, mp2[0]);
#pragma unroll
for (uint16_t i = 1; i < chain_length - 1; i++)
pairs[i].y += dt_d[k] * rhs_DNKG(pairs[i - 1].x, pairs[i].x, pairs[i + 1].x, mp2[i]);
pairs[chain_length - 1].y += dt_d[k] *
rhs_DNKG(pairs[chain_length - 2].x, pairs[chain_length - 1].x, pairs[0].x,
mp2[chain_length - 1]);
}
}
#pragma unroll
for (uint16_t i = 0; i < chain_length; i++) {
pairs[i].x += dt_c[7] * pairs[i].y;
planar[i] = pairs[i];
}
}
//some machinery to get the compiler to create all the versions of move_planar, and to get the right one at runtime
namespace {
using thread_kinfo = kernel_info<decltype(&move_chain_in_thread<0>)>;
template<int chain_length = 2>
struct thread_kernel_resolver {
static const thread_kinfo &get(int chain_length_required) {
if (chain_length != chain_length_required)
return thread_kernel_resolver<chain_length + 1>::get(chain_length_required);
static auto kinfo = make_kernel_info(move_chain_in_thread<chain_length>);
return kinfo;
}
};
template<>
struct thread_kernel_resolver<32> {
static const thread_kinfo &get(int) { throw std::logic_error("Shouldn't be here"); }
};
}
/*
* Move in split format. Compare with DNKG_FPUT_Toda.cu . The mp2 argument may alias the constant
* memory array if the constant buffer is large enough to hold all the linear parameter values,
* otherwise it resides in global memory.
*/
__global__ void move_split(double *phi, double *pi, const double *mp2, uint32_t chainlen, uint32_t shard_copies,
uint32_t steps_grouping) {
uint32_t chain_idx_0 = blockIdx.x * blockDim.x + threadIdx.x, chain_idx_last =
chain_idx_0 + (chainlen - 1) * shard_copies;
if (chain_idx_0 >= shard_copies) return;
//these values are handled outside of the inner loop, can be saved in registers to avoid memory access
double phi0 = phi[chain_idx_0], phi1 = phi[chain_idx_0 + shard_copies], pi0 = pi[chain_idx_0];
for (uint32_t i = 0; i < steps_grouping; i++) {
for (int k = 0; k < 7; k++) {
double dt_c_k = dt_c[k];
if (i && !k)
dt_c_k *= 2; //merge last and first evolution of phi variable, since pi is not update in 8th steps of 6th order Yoshida
double previous_pi = pi[chain_idx_0 + shard_copies]; //pi[1]
phi0 += dt_c_k * pi0;
phi1 += dt_c_k * previous_pi;
double last_updated_phi[]{phi0, phi1};
//this appears to be already unrolled by the compiler
auto mp2_i = mp2 + 1;
for (uint32_t i = chain_idx_0 + 2 * shard_copies; i <= chain_idx_last; i += shard_copies, mp2_i++) {
double current_pi = pi[i];
double current_updated_phi = (phi[i] += dt_c_k * current_pi);
pi[i - shard_copies] = previous_pi + dt_d[k] * rhs_DNKG(last_updated_phi[0],
last_updated_phi[1],
current_updated_phi,
*mp2_i);
previous_pi = current_pi;
last_updated_phi[0] = last_updated_phi[1];
last_updated_phi[1] = current_updated_phi;
}
pi[chain_idx_last] =
previous_pi + dt_d[k] * rhs_DNKG(last_updated_phi[0], last_updated_phi[1], phi0, *mp2_i);
pi0 += dt_d[k] * rhs_DNKG(last_updated_phi[1], phi0, phi1, *mp2);
}
}
phi0 += dt_c[7] * pi0;
phi1 += dt_c[7] * pi[chain_idx_0 + shard_copies];
for (uint32_t i = chain_idx_0 + 2 * shard_copies; i <= chain_idx_last; i += shard_copies)
phi[i] += dt_c[7] * pi[i];
phi[chain_idx_0] = phi0;
phi[chain_idx_0 + shard_copies] = phi1;
pi[chain_idx_0] = pi0;
}
completion move(plane2split &splitter, bool &use_split_kernel, const double *mp2_gmem, cudaStream_t stream) {
if (use_split_kernel) {
static auto kinfo = make_kernel_info(move_split);
auto launch = kinfo.linear_configuration(gconf.shard_copies);
static auto mp2_const_ptr = (const double *) get_device_address(mp2);
auto mp2_ptr = gconf.chain_length <= sizeof(mp2) / sizeof(double) ? mp2_const_ptr : mp2_gmem;
kinfo.k <<< launch.blocks, launch.threads, 0, stream >>>
(splitter.coords_transposed, splitter.momenta_transposed, mp2_ptr, gconf.chain_length, gconf.shard_copies,
gconf.kernel_batching);
} else {
if (gconf.chain_length < 32) {
auto &kinfo = thread_kernel_resolver<>::get(gconf.chain_length);
auto launch = kinfo.linear_configuration(gconf.shard_copies);
kinfo.k <<< launch.blocks, launch.threads, 0, stream >>>
(gres.shard_gpu, gconf.kernel_batching, gconf.shard_copies);
} else if (gconf.chain_length == optimized_chain_length) {
static auto kinfo = make_kernel_info(move_chain_in_warp);
auto launch = kinfo.linear_configuration(uint32_t(gconf.shard_copies) * 32);
kinfo.k <<< launch.blocks, launch.threads, 0, stream >>>
(gres.shard_gpu, mp2_gmem, gconf.kernel_batching, gconf.shard_copies);
} else {
static bool warned = false;
if (!warned && gconf.chain_length < 2048) {
collect_ostream(std::cerr) << process_ident << ": could not find optimized version for chain_length "
<< gconf.chain_length
<< ", try to reconfigure with -Doptimized_chain_length="
<< gconf.chain_length << " and recompile." << std::endl;
warned = true;
}
use_split_kernel = true;
return move(splitter, use_split_kernel, mp2_gmem, stream);
}
}
cudaGetLastError() && assertcu;
return completion(stream);
}
__global__ void
make_linenergies_kernel(const double *projection_phi, const double *projection_pi, uint16_t chain_length,
uint16_t shard_copies, double *linenergies) {
uint32_t idx = (blockDim.x * blockIdx.x + threadIdx.x) / 32;
if (idx >= chain_length) return;
projection_phi += idx * shard_copies;
projection_pi += idx * shard_copies;
double sum = 0;
for (uint32_t c = threadIdx.x % 32; c < shard_copies; c += 32) {
auto p_phi = projection_phi[c], p_pi = projection_pi[c];
sum += p_phi * p_phi + p_pi * p_pi;
}
cub::WarpReduce<double>::TempStorage dummy;
#ifdef __CUDA_ARCH__
static_assert(sizeof(dummy) <= 1, "make_linenergies_kernel assumes you are on CC >= 3.5 to use warp shuffles");
#endif
sum = cub::WarpReduce<double>(dummy).Sum(sum);
if (threadIdx.x % 32) return;
linenergies[idx] = sum;
}
completion make_linenergies(const double *projection_phi, const double *projection_pi, cudaStream_t stream) {
static auto kinfo = make_kernel_info(make_linenergies_kernel);
auto launch = kinfo.linear_configuration(uint32_t(gconf.chain_length) * 32);
kinfo.k <<< launch.blocks, launch.threads, 0, stream >>>
(projection_phi, projection_pi, gconf.chain_length, gconf.shard_copies, gres.linenergies_gpu);
cudaGetLastError() && assertcu;
return completion(stream);
}
}
|
7dac6788e936a943e424a32952fba4f2ab751a83.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(unsigned *A, unsigned *B, int N)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
unsigned sum=0;
if(id < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = A[id] * B[id] * id;
for(unsigned j=0; j<ITERATIONS/4; ++j){
sum*=id*A[id];
}
A[id] = sum;
sum = A[id] * B[id] * id;
for(unsigned j=0; j<ITERATIONS/4; ++j){
sum*=id*B[id];
}
B[id] = sum;
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
unsigned *h_A1, *h_A2, *h_A3;
unsigned *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(unsigned);
h_A1 = (unsigned*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (unsigned*)malloc(size1);
if (h_A2 == 0) CleanupResources();
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_int(h_A1, N);
RandomInit_int(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 7dac6788e936a943e424a32952fba4f2ab751a83.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(unsigned *A, unsigned *B, int N)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
unsigned sum=0;
if(id < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = A[id] * B[id] * id;
for(unsigned j=0; j<ITERATIONS/4; ++j){
sum*=id*A[id];
}
A[id] = sum;
sum = A[id] * B[id] * id;
for(unsigned j=0; j<ITERATIONS/4; ++j){
sum*=id*B[id];
}
B[id] = sum;
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
unsigned *h_A1, *h_A2, *h_A3;
unsigned *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(unsigned);
h_A1 = (unsigned*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (unsigned*)malloc(size1);
if (h_A2 == 0) CleanupResources();
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_int(h_A1, N);
RandomInit_int(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( cudaThreadSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
9a27749ac7208a3e3e475e6764d06de5e39775dc.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Copyright 2015 Hung-Yi Pu, Kiyun Yun, Ziri Younsi, Sunk-Jin Yoon
Odyssey version 1.0 (released 2015)
This file is part of Odyssey source code. Odyssey is a public, GPU-based code
for General Relativistic Radiative Transfer (GRRT), following the
ray-tracing algorithm presented in
Fuerst, S. V., & Wu, K. 2007, A&A, 474, 55,
and the radiative transfer formulation described in
Younsi, Z., Wu, K., & Fuerst, S. V. 2012, A&A, 545, A13
Odyssey is distributed freely under the GNU general public license.
You can redistribute it and/or modify it under the terms of the License
http://www.gnu.org/licenses/gpl.txt
The current distribution website is:
https://github.com/hungyipu/Odyssey/
***********************************************************************************/
#include <fstream>
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <chrono>
#include <hip/hip_runtime.h>
#include "constants.h"
#include "kernels.hip"
int main()
{
// a set of variables defined in constants.h
double VariablesIn[VarINNUM];
A = 0.; // black hole spin
INCLINATION = acos(0.25)/PI*180.; // inclination angle in unit of degree
SIZE = IMAGE_SIZE;
printf("task1: image size = %d x %d pixels\n",IMAGE_SIZE,IMAGE_SIZE);
// number of grids; the coordinate of each grid is given by (GridIdxX,GridIdY)
int ImaDimX, ImaDimY;
// number of blocks; the coordinate of each block is given by (blockIdx.x ,blockIdx.y )
int GridDimX, GridDimY;
// number of threads; the coordinate of each thread is given by (threadIdx.x,threadIdx.y)
int BlockDimX, BlockDimY;
// device buffers
double *d_ResultsPixel, *d_VariablesIn;
// save output results in files
double* Results;
FILE *fp;
Results = new double[IMAGE_SIZE * IMAGE_SIZE * 3];
BlockDimX = 100;
BlockDimY = 1;
GridDimX = 1;
GridDimY = 50;
dim3 BlockDim (BlockDimX, BlockDimY);
dim3 GridDim(GridDimX, GridDimY);
//compute number of grides, to cover the whole image plane
ImaDimX = (int)ceil((double)IMAGE_SIZE / (BlockDimX * GridDimX));
ImaDimY = (int)ceil((double)IMAGE_SIZE / (BlockDimY * GridDimY));
hipMalloc(&d_ResultsPixel, sizeof(double) * IMAGE_SIZE * IMAGE_SIZE * 3);
hipMalloc(&d_VariablesIn, sizeof(double) * VarINNUM);
hipMemcpy(d_VariablesIn, VariablesIn, sizeof(double) * VarINNUM, hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int GridIdxY = 0; GridIdxY < ImaDimY; GridIdxY++){
for(int GridIdxX = 0; GridIdxX < ImaDimX; GridIdxX++){
hipLaunchKernelGGL(( task1), dim3(GridDim), dim3(BlockDim), 0, 0, d_ResultsPixel, d_VariablesIn, GridIdxX, GridIdxY);
}
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Total kernel execution time (task1) %f (s)\n", time * 1e-9f);
hipMemcpy(Results, d_ResultsPixel, sizeof(double) * IMAGE_SIZE * IMAGE_SIZE * 3, hipMemcpyDeviceToHost);
//save result to output
fp=fopen("Output_task1.txt","w");
if (fp != NULL) {
fprintf(fp,"###output data:(alpha, beta, redshift)\n");
for(int j = 0; j < IMAGE_SIZE; j++)
for(int i = 0; i < IMAGE_SIZE; i++)
{
fprintf(fp, "%f\t", (float)Results[3 * (IMAGE_SIZE * j + i) + 0]);
fprintf(fp, "%f\t", (float)Results[3 * (IMAGE_SIZE * j + i) + 1]);
fprintf(fp, "%f\n", (float)Results[3 * (IMAGE_SIZE * j + i) + 2]);
}
fclose(fp);
}
A = 0.; // black hole spin
INCLINATION = 45.; // inclination angle in unit of degree
SIZE = IMAGE_SIZE;
freq_obs = 340e9; // observed frequency
printf("task2: image size = %d x %d pixels\n",IMAGE_SIZE,IMAGE_SIZE);
hipMemcpy(d_VariablesIn, VariablesIn, sizeof(double) * VarINNUM, hipMemcpyHostToDevice);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
//compute number of grides, to cover the whole image plane
for(int GridIdxY = 0; GridIdxY < ImaDimY; GridIdxY++){
for(int GridIdxX = 0; GridIdxX < ImaDimX; GridIdxX++){
hipLaunchKernelGGL(( task2), dim3(GridDim), dim3(BlockDim), 0, 0, d_ResultsPixel, d_VariablesIn, GridIdxX, GridIdxY);
}
}
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Total kernel execution time (task2) %f (s)\n", time * 1e-9f);
hipMemcpy(Results, d_ResultsPixel, sizeof(double) * IMAGE_SIZE * IMAGE_SIZE * 3, hipMemcpyDeviceToHost);
hipFree(d_ResultsPixel);
hipFree(d_VariablesIn);
fp=fopen("Output_task2.txt","w");
if (fp != NULL) {
fprintf(fp,"###output data:(alpha, beta, Luminosity (erg/sec))\n");
for(int j = 0; j < IMAGE_SIZE; j++)
for(int i = 0; i < IMAGE_SIZE; i++)
{
fprintf(fp, "%f\t", (float)Results[3 * (IMAGE_SIZE * j + i) + 0]);
fprintf(fp, "%f\t", (float)Results[3 * (IMAGE_SIZE * j + i) + 1]);
fprintf(fp, "%f\n", (float)Results[3 * (IMAGE_SIZE * j + i) + 2]);
}
fclose(fp);
}
delete [] Results;
return 0;
}
| 9a27749ac7208a3e3e475e6764d06de5e39775dc.cu | /***********************************************************************************
Copyright 2015 Hung-Yi Pu, Kiyun Yun, Ziri Younsi, Sunk-Jin Yoon
Odyssey version 1.0 (released 2015)
This file is part of Odyssey source code. Odyssey is a public, GPU-based code
for General Relativistic Radiative Transfer (GRRT), following the
ray-tracing algorithm presented in
Fuerst, S. V., & Wu, K. 2007, A&A, 474, 55,
and the radiative transfer formulation described in
Younsi, Z., Wu, K., & Fuerst, S. V. 2012, A&A, 545, A13
Odyssey is distributed freely under the GNU general public license.
You can redistribute it and/or modify it under the terms of the License
http://www.gnu.org/licenses/gpl.txt
The current distribution website is:
https://github.com/hungyipu/Odyssey/
***********************************************************************************/
#include <fstream>
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <chrono>
#include <cuda.h>
#include "constants.h"
#include "kernels.cu"
int main()
{
// a set of variables defined in constants.h
double VariablesIn[VarINNUM];
A = 0.; // black hole spin
INCLINATION = acos(0.25)/PI*180.; // inclination angle in unit of degree
SIZE = IMAGE_SIZE;
printf("task1: image size = %d x %d pixels\n",IMAGE_SIZE,IMAGE_SIZE);
// number of grids; the coordinate of each grid is given by (GridIdxX,GridIdY)
int ImaDimX, ImaDimY;
// number of blocks; the coordinate of each block is given by (blockIdx.x ,blockIdx.y )
int GridDimX, GridDimY;
// number of threads; the coordinate of each thread is given by (threadIdx.x,threadIdx.y)
int BlockDimX, BlockDimY;
// device buffers
double *d_ResultsPixel, *d_VariablesIn;
// save output results in files
double* Results;
FILE *fp;
Results = new double[IMAGE_SIZE * IMAGE_SIZE * 3];
BlockDimX = 100;
BlockDimY = 1;
GridDimX = 1;
GridDimY = 50;
dim3 BlockDim (BlockDimX, BlockDimY);
dim3 GridDim(GridDimX, GridDimY);
//compute number of grides, to cover the whole image plane
ImaDimX = (int)ceil((double)IMAGE_SIZE / (BlockDimX * GridDimX));
ImaDimY = (int)ceil((double)IMAGE_SIZE / (BlockDimY * GridDimY));
cudaMalloc(&d_ResultsPixel, sizeof(double) * IMAGE_SIZE * IMAGE_SIZE * 3);
cudaMalloc(&d_VariablesIn, sizeof(double) * VarINNUM);
cudaMemcpy(d_VariablesIn, VariablesIn, sizeof(double) * VarINNUM, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int GridIdxY = 0; GridIdxY < ImaDimY; GridIdxY++){
for(int GridIdxX = 0; GridIdxX < ImaDimX; GridIdxX++){
task1<<<GridDim, BlockDim>>>(d_ResultsPixel, d_VariablesIn, GridIdxX, GridIdxY);
}
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Total kernel execution time (task1) %f (s)\n", time * 1e-9f);
cudaMemcpy(Results, d_ResultsPixel, sizeof(double) * IMAGE_SIZE * IMAGE_SIZE * 3, cudaMemcpyDeviceToHost);
//save result to output
fp=fopen("Output_task1.txt","w");
if (fp != NULL) {
fprintf(fp,"###output data:(alpha, beta, redshift)\n");
for(int j = 0; j < IMAGE_SIZE; j++)
for(int i = 0; i < IMAGE_SIZE; i++)
{
fprintf(fp, "%f\t", (float)Results[3 * (IMAGE_SIZE * j + i) + 0]);
fprintf(fp, "%f\t", (float)Results[3 * (IMAGE_SIZE * j + i) + 1]);
fprintf(fp, "%f\n", (float)Results[3 * (IMAGE_SIZE * j + i) + 2]);
}
fclose(fp);
}
A = 0.; // black hole spin
INCLINATION = 45.; // inclination angle in unit of degree
SIZE = IMAGE_SIZE;
freq_obs = 340e9; // observed frequency
printf("task2: image size = %d x %d pixels\n",IMAGE_SIZE,IMAGE_SIZE);
cudaMemcpy(d_VariablesIn, VariablesIn, sizeof(double) * VarINNUM, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
//compute number of grides, to cover the whole image plane
for(int GridIdxY = 0; GridIdxY < ImaDimY; GridIdxY++){
for(int GridIdxX = 0; GridIdxX < ImaDimX; GridIdxX++){
task2<<<GridDim, BlockDim>>>(d_ResultsPixel, d_VariablesIn, GridIdxX, GridIdxY);
}
}
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Total kernel execution time (task2) %f (s)\n", time * 1e-9f);
cudaMemcpy(Results, d_ResultsPixel, sizeof(double) * IMAGE_SIZE * IMAGE_SIZE * 3, cudaMemcpyDeviceToHost);
cudaFree(d_ResultsPixel);
cudaFree(d_VariablesIn);
fp=fopen("Output_task2.txt","w");
if (fp != NULL) {
fprintf(fp,"###output data:(alpha, beta, Luminosity (erg/sec))\n");
for(int j = 0; j < IMAGE_SIZE; j++)
for(int i = 0; i < IMAGE_SIZE; i++)
{
fprintf(fp, "%f\t", (float)Results[3 * (IMAGE_SIZE * j + i) + 0]);
fprintf(fp, "%f\t", (float)Results[3 * (IMAGE_SIZE * j + i) + 1]);
fprintf(fp, "%f\n", (float)Results[3 * (IMAGE_SIZE * j + i) + 2]);
}
fclose(fp);
}
delete [] Results;
return 0;
}
|
386fd9bedcf02f509d0c26994d7c47be3c5b6ed3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/momentum_impl.cuh"
#include <iostream>
#include "include/hip/hip_fp16.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/elementwise/elementswise_pub_impl.cuh"
template <typename T, typename S, typename G>
struct MomentumUpdateVariableFunctor {
const S *learning_rate_;
const S *momentum_;
MomentumUpdateVariableFunctor(const S *learning_rate, const S *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + gradient[0];
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <typename T, typename S, typename G>
struct MomentumUpdateVariableWithNesterovFunctor {
const S *learning_rate_;
const S *momentum_;
MomentumUpdateVariableWithNesterovFunctor(const S *learning_rate, const S *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + gradient[0];
variable[0] -= gradient[0] * learning_rate_[0] + accumulation[0] * momentum_[0] * learning_rate_[0];
}
};
template <>
struct MomentumUpdateVariableFunctor<half, float, half> {
const float *learning_rate_;
const float *momentum_;
MomentumUpdateVariableFunctor(const float *learning_rate, const float *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(half *variable, half *accumulation, const half *gradient) const {
accumulation[0] = __float2half(momentum_[0]) * accumulation[0] + gradient[0];
variable[0] -= __float2half(learning_rate_[0]) * accumulation[0];
}
};
template <>
struct MomentumUpdateVariableWithNesterovFunctor<half, float, half> {
const float *learning_rate_;
const float *momentum_;
MomentumUpdateVariableWithNesterovFunctor(const float *learning_rate, const float *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(half *variable, half *accumulation, const half *gradient) const {
accumulation[0] = __float2half(momentum_[0]) * accumulation[0] + gradient[0];
variable[0] -= gradient[0] * __float2half(learning_rate_[0]) +
accumulation[0] * __float2half(momentum_[0]) * __float2half(learning_rate_[0]);
}
};
template <>
struct MomentumUpdateVariableFunctor<float, float, half> {
const float *learning_rate_;
const float *momentum_;
MomentumUpdateVariableFunctor(const float *learning_rate, const float *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(float *variable, float *accumulation, const half *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + __half2float(gradient[0]);
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <>
struct MomentumUpdateVariableWithNesterovFunctor<float, float, half> {
const float *learning_rate_;
const float *momentum_;
MomentumUpdateVariableWithNesterovFunctor(const float *learning_rate, const float *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(float *variable, float *accumulation, const half *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + __half2float(gradient[0]);
variable[0] -= __half2float(gradient[0]) * learning_rate_[0] + accumulation[0] * momentum_[0] * learning_rate_[0];
}
};
template <typename T, typename S, typename G>
struct FusedMomentumWeightDecayScaleFunctor {
const S *learning_rate_;
const S *momentum_;
const S *weight_decay_;
const S *scale_;
FusedMomentumWeightDecayScaleFunctor(const S *learning_rate, const S *momentum, const S *weight_decay,
const S *scale) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->weight_decay_ = weight_decay;
this->scale_ = scale;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
T grad = (variable[0] * weight_decay_[0] + static_cast<T>(gradient[0])) * scale_[0];
accumulation[0] = momentum_[0] * accumulation[0] + grad;
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <>
struct FusedMomentumWeightDecayScaleFunctor<half, float, half> {
const float *learning_rate_;
const float *momentum_;
const float *weight_decay_;
const float *scale_;
FusedMomentumWeightDecayScaleFunctor(const float *learning_rate, const float *momentum, const float *weight_decay,
const float *scale) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->weight_decay_ = weight_decay;
this->scale_ = scale;
}
__device__ __forceinline__ void operator()(half *variable, half *accumulation, const half *gradient) const {
half grad = (variable[0] * __float2half(weight_decay_[0]) + gradient[0]) * __float2half(scale_[0]);
accumulation[0] = __float2half(momentum_[0]) * accumulation[0] + grad;
variable[0] -= __float2half(learning_rate_[0]) * accumulation[0];
}
};
template <typename T, typename S, typename G>
struct FusedMomentumScaleFunctor {
const S *learning_rate_;
const S *momentum_;
const S *scale_;
FusedMomentumScaleFunctor(const S *learning_rate, const S *momentum, const S *scale) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->scale_ = scale;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + static_cast<T>(gradient[0]) * scale_[0];
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <>
struct FusedMomentumScaleFunctor<half, float, half> {
const float *learning_rate_;
const float *momentum_;
const float *scale_;
FusedMomentumScaleFunctor(const float *learning_rate, const float *momentum, const float *scale) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->scale_ = scale;
}
__device__ __forceinline__ void operator()(half *variable, half *accumulation, half *gradient) const {
accumulation[0] = __float2half(momentum_[0]) * accumulation[0] + gradient[0] * __float2half(scale_[0]);
variable[0] -= __float2half(learning_rate_[0]) * accumulation[0];
}
};
template <typename T, typename S, typename G>
struct FusedWeightDecayMomentumFunctor {
const S *learning_rate_;
const S *momentum_;
const S *weight_decay_;
FusedWeightDecayMomentumFunctor(const S *learning_rate, const S *momentum, const S *weight_decay) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->weight_decay_ = weight_decay;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
T grad = variable[0] * weight_decay_[0] + static_cast<T>(gradient[0]);
accumulation[0] = momentum_[0] * accumulation[0] + grad;
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <typename T, typename S, typename G>
void MomentumUpdateVariable(const size_t size, T *variable, T *accumulation, const S *learning_rate, const G *gradient,
const S *momentum, bool use_nesterov, hipStream_t cuda_stream) {
if (use_nesterov) {
MomentumUpdateVariableWithNesterovFunctor<T, S, G> functor{learning_rate, momentum};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
} else {
MomentumUpdateVariableFunctor<T, S, G> functor{learning_rate, momentum};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
}
}
template <typename T, typename S, typename G>
void FusedWeightDecayScaleMomentum(const size_t size, S *weight_decay, S *scale, T *variable, T *accumulation,
const S *learning_rate, const G *gradient, const S *momentum,
hipStream_t cuda_stream) {
FusedMomentumWeightDecayScaleFunctor<T, S, G> functor{learning_rate, momentum, weight_decay, scale};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
}
template <typename T, typename S, typename G>
void FusedScaleMomentum(const size_t size, S *scale, T *variable, T *accumulation, const S *learning_rate,
const G *gradient, const S *momentum, hipStream_t cuda_stream) {
FusedMomentumScaleFunctor<T, S, G> functor{learning_rate, momentum, scale};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
}
template <typename T, typename S, typename G>
void FusedWeightDecayMomentum(const size_t size, S *weight_decay, T *variable, T *accumulation, const S *learning_rate,
const G *gradient, const S *momentum, hipStream_t cuda_stream) {
FusedWeightDecayMomentumFunctor<T, S, G> functor{learning_rate, momentum, weight_decay};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
}
// CombineFusedScaleMomentum
template <typename T, typename S, typename G>
__global__ void CombineFusedMomentumScaleKernel(const size_t num, const size_t *element_num, S **scale, T **variable,
T **accumulation, S **learning_rate, G **gradient, S **momentum) {
for (size_t idx = 0; idx < num; idx++) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) {
accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + static_cast<T>(gradient[idx][i]) * scale[idx][0];
variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i];
}
}
}
template <typename T, typename S, typename G>
void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, S **scale, T **variable,
T **accumulation, S **learning_rate, G **gradient, S **momentum,
hipStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block;
hipLaunchKernelGGL(( CombineFusedMomentumScaleKernel), dim3(block_per_grid), dim3(thread_per_block), 0, cuda_stream,
num, elements, scale, variable, accumulation, learning_rate, gradient, momentum);
}
// end CombineFusedScaleMomentum
// CombineFusedWeightDecayScaleMomentum
template <typename T, typename S, typename G>
__global__ void CombineFusedMomentumWeightDecayScaleKernel(const size_t num, const size_t *element_num,
S **weight_decay, S **scale, T **variable, T **accumulation,
S **learning_rate, G **gradient, S **momentum) {
for (size_t idx = 0; idx < num; idx++) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) {
T grad = (variable[idx][i] * weight_decay[idx][0] + static_cast<T>(gradient[idx][i])) * scale[idx][0];
accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + grad;
variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i];
}
}
}
template <typename T, typename S, typename G>
void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *element_num,
S **weight_decay, S **scale, T **variable, T **accumulation,
S **learning_rate, G **gradient, S **momentum, hipStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block;
hipLaunchKernelGGL(( CombineFusedMomentumWeightDecayScaleKernel), dim3(block_per_grid), dim3(thread_per_block), 0, cuda_stream,
num, element_num, weight_decay, scale, variable, accumulation, learning_rate, gradient, momentum);
}
// end CombineFusedWeightDecayScaleMomentum
template CUDA_LIB_EXPORT void MomentumUpdateVariable<float, float, float>(const size_t size, float *variable,
float *accumulation,
const float *learning_rate,
const float *gradient, const float *momentum,
bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<half, half, half>(const size_t size, half *variable,
half *accumulation, const half *learning_rate,
const half *gradient, const half *momentum,
bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<half, float, half>(const size_t size, half *variable,
half *accumulation, const float *learning_rate,
const half *gradient, const float *momentum,
bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<float, float, half>(const size_t size, float *variable,
float *accumulation,
const float *learning_rate,
const half *gradient, const float *momentum,
bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<int8_t, int8_t, int8_t>(
const size_t size, int8_t *variable, int8_t *accumulation, const int8_t *learning_rate, const int8_t *gradient,
const int8_t *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<uint8_t, uint8_t, uint8_t>(
const size_t size, uint8_t *variable, uint8_t *accumulation, const uint8_t *learning_rate, const uint8_t *gradient,
const uint8_t *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<int16_t, int16_t, int16_t>(
const size_t size, int16_t *variable, int16_t *accumulation, const int16_t *learning_rate, const int16_t *gradient,
const int16_t *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<uint16_t, uint16_t, uint16_t>(
const size_t size, uint16_t *variable, uint16_t *accumulation, const uint16_t *learning_rate,
const uint16_t *gradient, const uint16_t *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<uint32_t, uint32_t, uint32_t>(
const size_t size, uint32_t *variable, uint32_t *accumulation, const uint32_t *learning_rate,
const uint32_t *gradient, const uint32_t *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<int32_t, int32_t, int32_t>(
const size_t size, int32_t *variable, int32_t *accumulation, const int32_t *learning_rate, const int32_t *gradient,
const int32_t *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<int64_t, int64_t, int64_t>(
const size_t size, int64_t *variable, int64_t *accumulation, const int64_t *learning_rate, const int64_t *gradient,
const int64_t *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<uint64_t, uint64_t, uint64_t>(
const size_t size, uint64_t *variable, uint64_t *accumulation, const uint64_t *learning_rate,
const uint64_t *gradient, const uint64_t *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<double, double, double>(
const size_t size, double *variable, double *accumulation, const double *learning_rate, const double *gradient,
const double *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<Complex<float>, Complex<float>, Complex<float>>(
const size_t size, Complex<float> *variable, Complex<float> *accumulation, const Complex<float> *learning_rate,
const Complex<float> *gradient, const Complex<float> *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<Complex<double>, Complex<double>, Complex<double>>(
const size_t size, Complex<double> *variable, Complex<double> *accumulation, const Complex<double> *learning_rate,
const Complex<double> *gradient, const Complex<double> *momentum, bool use_nesterov, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
float *variable, float *accumulation,
const float *learning_rate, const float *gradient,
const float *momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
float *variable, float *accumulation,
const float *learning_rate, const half *gradient,
const float *momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayScaleMomentum(const size_t element_num, half *weight_decay, half *scale,
half *variable, half *accumulation,
const half *learning_rate, const half *gradient,
const half *momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
half *variable, half *accumulation,
const float *learning_rate, const half *gradient,
const float *momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable,
float *accumulation, const float *learning_rate,
const float *gradient, const float *momentum,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable,
float *accumulation, const float *learning_rate,
const half *gradient, const float *momentum,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedScaleMomentum(const size_t element_num, float *scale, float *variable,
float *accumulation, const float *learning_rate, const float *gradient,
const float *momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedScaleMomentum(const size_t element_num, float *scale, float *variable,
float *accumulation, const float *learning_rate, const half *gradient,
const float *momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedScaleMomentum(const size_t element_num, half *scale, half *variable,
half *accumulation, const half *learning_rate, const half *gradient,
const half *momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedScaleMomentum(const size_t element_num, float *scale, half *variable,
half *accumulation, const float *learning_rate, const half *gradient,
const float *momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CombineFusedWeightDecayScaleMomentum(
const size_t max, const size_t num, const size_t *elements, float **weight_decay, float **scale, float **variable,
float **accumulation, float **learning_rate, float **gradient, float **momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CombineFusedWeightDecayScaleMomentum(
const size_t max, const size_t num, const size_t *elements, float **weight_decay, float **scale, float **variable,
float **accumulation, float **learning_rate, half **gradient, float **momentum, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements,
float **scale, float **variable, float **accumulation,
float **learning_rate, float **gradient, float **momentum,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements,
float **scale, float **variable, float **accumulation,
float **learning_rate, half **gradient, float **momentum,
hipStream_t cuda_stream);
| 386fd9bedcf02f509d0c26994d7c47be3c5b6ed3.cu | /**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/momentum_impl.cuh"
#include <iostream>
#include "include/cuda_fp16.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/elementwise/elementswise_pub_impl.cuh"
template <typename T, typename S, typename G>
struct MomentumUpdateVariableFunctor {
const S *learning_rate_;
const S *momentum_;
MomentumUpdateVariableFunctor(const S *learning_rate, const S *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + gradient[0];
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <typename T, typename S, typename G>
struct MomentumUpdateVariableWithNesterovFunctor {
const S *learning_rate_;
const S *momentum_;
MomentumUpdateVariableWithNesterovFunctor(const S *learning_rate, const S *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + gradient[0];
variable[0] -= gradient[0] * learning_rate_[0] + accumulation[0] * momentum_[0] * learning_rate_[0];
}
};
template <>
struct MomentumUpdateVariableFunctor<half, float, half> {
const float *learning_rate_;
const float *momentum_;
MomentumUpdateVariableFunctor(const float *learning_rate, const float *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(half *variable, half *accumulation, const half *gradient) const {
accumulation[0] = __float2half(momentum_[0]) * accumulation[0] + gradient[0];
variable[0] -= __float2half(learning_rate_[0]) * accumulation[0];
}
};
template <>
struct MomentumUpdateVariableWithNesterovFunctor<half, float, half> {
const float *learning_rate_;
const float *momentum_;
MomentumUpdateVariableWithNesterovFunctor(const float *learning_rate, const float *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(half *variable, half *accumulation, const half *gradient) const {
accumulation[0] = __float2half(momentum_[0]) * accumulation[0] + gradient[0];
variable[0] -= gradient[0] * __float2half(learning_rate_[0]) +
accumulation[0] * __float2half(momentum_[0]) * __float2half(learning_rate_[0]);
}
};
template <>
struct MomentumUpdateVariableFunctor<float, float, half> {
const float *learning_rate_;
const float *momentum_;
MomentumUpdateVariableFunctor(const float *learning_rate, const float *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(float *variable, float *accumulation, const half *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + __half2float(gradient[0]);
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <>
struct MomentumUpdateVariableWithNesterovFunctor<float, float, half> {
const float *learning_rate_;
const float *momentum_;
MomentumUpdateVariableWithNesterovFunctor(const float *learning_rate, const float *momentum) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
}
__device__ __forceinline__ void operator()(float *variable, float *accumulation, const half *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + __half2float(gradient[0]);
variable[0] -= __half2float(gradient[0]) * learning_rate_[0] + accumulation[0] * momentum_[0] * learning_rate_[0];
}
};
template <typename T, typename S, typename G>
struct FusedMomentumWeightDecayScaleFunctor {
const S *learning_rate_;
const S *momentum_;
const S *weight_decay_;
const S *scale_;
FusedMomentumWeightDecayScaleFunctor(const S *learning_rate, const S *momentum, const S *weight_decay,
const S *scale) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->weight_decay_ = weight_decay;
this->scale_ = scale;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
T grad = (variable[0] * weight_decay_[0] + static_cast<T>(gradient[0])) * scale_[0];
accumulation[0] = momentum_[0] * accumulation[0] + grad;
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <>
struct FusedMomentumWeightDecayScaleFunctor<half, float, half> {
const float *learning_rate_;
const float *momentum_;
const float *weight_decay_;
const float *scale_;
FusedMomentumWeightDecayScaleFunctor(const float *learning_rate, const float *momentum, const float *weight_decay,
const float *scale) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->weight_decay_ = weight_decay;
this->scale_ = scale;
}
__device__ __forceinline__ void operator()(half *variable, half *accumulation, const half *gradient) const {
half grad = (variable[0] * __float2half(weight_decay_[0]) + gradient[0]) * __float2half(scale_[0]);
accumulation[0] = __float2half(momentum_[0]) * accumulation[0] + grad;
variable[0] -= __float2half(learning_rate_[0]) * accumulation[0];
}
};
template <typename T, typename S, typename G>
struct FusedMomentumScaleFunctor {
const S *learning_rate_;
const S *momentum_;
const S *scale_;
FusedMomentumScaleFunctor(const S *learning_rate, const S *momentum, const S *scale) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->scale_ = scale;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
accumulation[0] = momentum_[0] * accumulation[0] + static_cast<T>(gradient[0]) * scale_[0];
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <>
struct FusedMomentumScaleFunctor<half, float, half> {
const float *learning_rate_;
const float *momentum_;
const float *scale_;
FusedMomentumScaleFunctor(const float *learning_rate, const float *momentum, const float *scale) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->scale_ = scale;
}
__device__ __forceinline__ void operator()(half *variable, half *accumulation, half *gradient) const {
accumulation[0] = __float2half(momentum_[0]) * accumulation[0] + gradient[0] * __float2half(scale_[0]);
variable[0] -= __float2half(learning_rate_[0]) * accumulation[0];
}
};
template <typename T, typename S, typename G>
struct FusedWeightDecayMomentumFunctor {
const S *learning_rate_;
const S *momentum_;
const S *weight_decay_;
FusedWeightDecayMomentumFunctor(const S *learning_rate, const S *momentum, const S *weight_decay) {
this->learning_rate_ = learning_rate;
this->momentum_ = momentum;
this->weight_decay_ = weight_decay;
}
__device__ __forceinline__ void operator()(T *variable, T *accumulation, const G *gradient) const {
T grad = variable[0] * weight_decay_[0] + static_cast<T>(gradient[0]);
accumulation[0] = momentum_[0] * accumulation[0] + grad;
variable[0] -= learning_rate_[0] * accumulation[0];
}
};
template <typename T, typename S, typename G>
void MomentumUpdateVariable(const size_t size, T *variable, T *accumulation, const S *learning_rate, const G *gradient,
const S *momentum, bool use_nesterov, cudaStream_t cuda_stream) {
if (use_nesterov) {
MomentumUpdateVariableWithNesterovFunctor<T, S, G> functor{learning_rate, momentum};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
} else {
MomentumUpdateVariableFunctor<T, S, G> functor{learning_rate, momentum};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
}
}
template <typename T, typename S, typename G>
void FusedWeightDecayScaleMomentum(const size_t size, S *weight_decay, S *scale, T *variable, T *accumulation,
const S *learning_rate, const G *gradient, const S *momentum,
cudaStream_t cuda_stream) {
FusedMomentumWeightDecayScaleFunctor<T, S, G> functor{learning_rate, momentum, weight_decay, scale};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
}
template <typename T, typename S, typename G>
void FusedScaleMomentum(const size_t size, S *scale, T *variable, T *accumulation, const S *learning_rate,
const G *gradient, const S *momentum, cudaStream_t cuda_stream) {
FusedMomentumScaleFunctor<T, S, G> functor{learning_rate, momentum, scale};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
}
template <typename T, typename S, typename G>
void FusedWeightDecayMomentum(const size_t size, S *weight_decay, T *variable, T *accumulation, const S *learning_rate,
const G *gradient, const S *momentum, cudaStream_t cuda_stream) {
FusedWeightDecayMomentumFunctor<T, S, G> functor{learning_rate, momentum, weight_decay};
cuda::elementwise::EltWiseCudaOpsFunc(functor, (uint)(size), variable, accumulation, gradient, cuda_stream);
}
// CombineFusedScaleMomentum
template <typename T, typename S, typename G>
__global__ void CombineFusedMomentumScaleKernel(const size_t num, const size_t *element_num, S **scale, T **variable,
T **accumulation, S **learning_rate, G **gradient, S **momentum) {
for (size_t idx = 0; idx < num; idx++) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) {
accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + static_cast<T>(gradient[idx][i]) * scale[idx][0];
variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i];
}
}
}
template <typename T, typename S, typename G>
void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, S **scale, T **variable,
T **accumulation, S **learning_rate, G **gradient, S **momentum,
cudaStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block;
CombineFusedMomentumScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>(
num, elements, scale, variable, accumulation, learning_rate, gradient, momentum);
}
// end CombineFusedScaleMomentum
// CombineFusedWeightDecayScaleMomentum
template <typename T, typename S, typename G>
__global__ void CombineFusedMomentumWeightDecayScaleKernel(const size_t num, const size_t *element_num,
S **weight_decay, S **scale, T **variable, T **accumulation,
S **learning_rate, G **gradient, S **momentum) {
for (size_t idx = 0; idx < num; idx++) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) {
T grad = (variable[idx][i] * weight_decay[idx][0] + static_cast<T>(gradient[idx][i])) * scale[idx][0];
accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + grad;
variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i];
}
}
}
template <typename T, typename S, typename G>
void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *element_num,
S **weight_decay, S **scale, T **variable, T **accumulation,
S **learning_rate, G **gradient, S **momentum, cudaStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block;
CombineFusedMomentumWeightDecayScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>(
num, element_num, weight_decay, scale, variable, accumulation, learning_rate, gradient, momentum);
}
// end CombineFusedWeightDecayScaleMomentum
template CUDA_LIB_EXPORT void MomentumUpdateVariable<float, float, float>(const size_t size, float *variable,
float *accumulation,
const float *learning_rate,
const float *gradient, const float *momentum,
bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<half, half, half>(const size_t size, half *variable,
half *accumulation, const half *learning_rate,
const half *gradient, const half *momentum,
bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<half, float, half>(const size_t size, half *variable,
half *accumulation, const float *learning_rate,
const half *gradient, const float *momentum,
bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<float, float, half>(const size_t size, float *variable,
float *accumulation,
const float *learning_rate,
const half *gradient, const float *momentum,
bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<int8_t, int8_t, int8_t>(
const size_t size, int8_t *variable, int8_t *accumulation, const int8_t *learning_rate, const int8_t *gradient,
const int8_t *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<uint8_t, uint8_t, uint8_t>(
const size_t size, uint8_t *variable, uint8_t *accumulation, const uint8_t *learning_rate, const uint8_t *gradient,
const uint8_t *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<int16_t, int16_t, int16_t>(
const size_t size, int16_t *variable, int16_t *accumulation, const int16_t *learning_rate, const int16_t *gradient,
const int16_t *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<uint16_t, uint16_t, uint16_t>(
const size_t size, uint16_t *variable, uint16_t *accumulation, const uint16_t *learning_rate,
const uint16_t *gradient, const uint16_t *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<uint32_t, uint32_t, uint32_t>(
const size_t size, uint32_t *variable, uint32_t *accumulation, const uint32_t *learning_rate,
const uint32_t *gradient, const uint32_t *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<int32_t, int32_t, int32_t>(
const size_t size, int32_t *variable, int32_t *accumulation, const int32_t *learning_rate, const int32_t *gradient,
const int32_t *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<int64_t, int64_t, int64_t>(
const size_t size, int64_t *variable, int64_t *accumulation, const int64_t *learning_rate, const int64_t *gradient,
const int64_t *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<uint64_t, uint64_t, uint64_t>(
const size_t size, uint64_t *variable, uint64_t *accumulation, const uint64_t *learning_rate,
const uint64_t *gradient, const uint64_t *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<double, double, double>(
const size_t size, double *variable, double *accumulation, const double *learning_rate, const double *gradient,
const double *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<Complex<float>, Complex<float>, Complex<float>>(
const size_t size, Complex<float> *variable, Complex<float> *accumulation, const Complex<float> *learning_rate,
const Complex<float> *gradient, const Complex<float> *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void MomentumUpdateVariable<Complex<double>, Complex<double>, Complex<double>>(
const size_t size, Complex<double> *variable, Complex<double> *accumulation, const Complex<double> *learning_rate,
const Complex<double> *gradient, const Complex<double> *momentum, bool use_nesterov, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
float *variable, float *accumulation,
const float *learning_rate, const float *gradient,
const float *momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
float *variable, float *accumulation,
const float *learning_rate, const half *gradient,
const float *momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayScaleMomentum(const size_t element_num, half *weight_decay, half *scale,
half *variable, half *accumulation,
const half *learning_rate, const half *gradient,
const half *momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
half *variable, half *accumulation,
const float *learning_rate, const half *gradient,
const float *momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable,
float *accumulation, const float *learning_rate,
const float *gradient, const float *momentum,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable,
float *accumulation, const float *learning_rate,
const half *gradient, const float *momentum,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedScaleMomentum(const size_t element_num, float *scale, float *variable,
float *accumulation, const float *learning_rate, const float *gradient,
const float *momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedScaleMomentum(const size_t element_num, float *scale, float *variable,
float *accumulation, const float *learning_rate, const half *gradient,
const float *momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedScaleMomentum(const size_t element_num, half *scale, half *variable,
half *accumulation, const half *learning_rate, const half *gradient,
const half *momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void FusedScaleMomentum(const size_t element_num, float *scale, half *variable,
half *accumulation, const float *learning_rate, const half *gradient,
const float *momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CombineFusedWeightDecayScaleMomentum(
const size_t max, const size_t num, const size_t *elements, float **weight_decay, float **scale, float **variable,
float **accumulation, float **learning_rate, float **gradient, float **momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CombineFusedWeightDecayScaleMomentum(
const size_t max, const size_t num, const size_t *elements, float **weight_decay, float **scale, float **variable,
float **accumulation, float **learning_rate, half **gradient, float **momentum, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements,
float **scale, float **variable, float **accumulation,
float **learning_rate, float **gradient, float **momentum,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements,
float **scale, float **variable, float **accumulation,
float **learning_rate, half **gradient, float **momentum,
cudaStream_t cuda_stream);
|
03b11ca4415864bb31462120e5efa6f188206de5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <conio.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <windows.h>
using namespace std;
//Funciones que van a utilizarse a lo largo del programa
//CPU
void generarTablero(int *tablero, int filas, int columnas);
void imprimirTablero(int *tablero, int filas, int columnas);
void imprimirColumnas(int columnas);
void generarSemillas(int *tablero, int filas, int columnas);
void guardarPartida(int *tablero, int filas, int columnas/*, int dificultad*/);
void cargarPartida();
void modoManual(int *tablero, int filas, int columnas);
//GPU
__global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento);
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
//AUX
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
int main(void){
//Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mnimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas);
modoManual(tablero, filas, columnas);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Generar tablero con nmeros aleatorios
void generarTablero(int *tablero, int filas, int columnas){
srand(time(0));
int tamao = filas * columnas;
for (int i = 0; i < tamao; i++){
tablero[i] = 0;
}
generarSemillas(tablero, filas, columnas);
}
//Genera los nmeros para jugar en el tablero
void generarSemillas(int *tablero, int filas, int columnas){
int tamao = filas * columnas;
int contador = 0;
while (contador < 3){
int aux = rand() % 3;
int i = rand() % tamao;
//cout << "POSICION: " << i+1 << "\n";
if (tablero[i] == 0){
switch (aux){
case 0:
tablero[i] = 2;
break;
case 1:
tablero[i] = 4;
break;
case 2:
tablero[i] = 8;
break;
}
contador++;
}
}
}
//Funcin que imprime el nmero de columnas que va a tener el tablero para que sea ms facil elegir semillas
void imprimirColumnas(int columnas) {
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " " << i + 1;
}
else {
if (i < 9) {
cout << " " << i + 1;
}
else {
cout << " " << i + 1;
}
}
}
cout << "\n";
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " |";
}
else {
cout << " |";
}
}
cout << "\n";
}
//Imprimimos el tablero
void imprimirTablero(int *tablero, int filas, int columnas) {
cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n";
cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n";
imprimirColumnas(columnas);
for (int i = 0; i < filas; i++) {
if (i < 9) {
cout << i + 1 << " - ";
}
else {
cout << i + 1 << " - ";
}
for (int k = 0; k < columnas; k++) {
//Damos color en funcin del nmero imprimido
int bloque = tablero[i * filas + k];
switch (bloque) {
case 2:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo
break;
case 4:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo
break;
case 8:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado
break;
case 16:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul
break;
default:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco
}
if (bloque < 10) cout << "| " << bloque << " |";
else cout << "| " << bloque << "|";
}
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7);
cout << "\n";
}
}
__device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
switch (movimiento){
case 'W':
compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'S':
compruebaArriba(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'D':
compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'A':
//compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento);
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
break;
}
}
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
if (movimiento == 'W'){
for (int i = filas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){
tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
if (movimiento == 'S'){
for (int i = 0; i < filas - 1; i++){
for (int j = i; j < filas - 1; j++){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){
tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
if (movimiento == 'D'){
for (int i = 0; i < columnas - 1; i++){
for (int j = i; j < columnas - 1; j++){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j + 1)] == 0){
tablero[fila * columnas + (j + 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
if (movimiento == 'A'){
for (int i = columnas; i > 0; i--){
int contador = 0;
for (int j = i; j > 0; j--){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j - 1)] == 0){
tablero[fila * columnas + (j - 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
else if (tablero[(fila * columnas) + (j - 1)] != 0 && tablero[(fila * columnas) + j] == tablero[(fila * columnas) + (j - 1)] && contador == 0){
tablero[(fila * columnas) + (j - 1)] = tablero[fila * columnas + (j - 1)] * 2;
tablero[fila * columnas + j] = 0;
contador++;
}
}
}
}
}
/*
for (int i = filas - 1; i > 0; i--){
if (tablero[(i * columnas) + columna] == 0){
tablero[(i * columnas) + columna] = tablero[((i - 1) * columnas) + columna];
tablero[((i - 1) * columnas) + columna] = 0;
}
}*/
/*for (int i = filas - 1; i > 0; i--){
if (tablero[((i - 1) * columnas) + columna] != 0){
int a = i;
while (tablero[((a - 1) * columnas) + columna] == 0){
tablero[(a * columnas) + columna] = tablero[((a - 1) * columnas) + columna];
tablero[((a - 1) * columnas) + columna] = 0;
}
}
}*/
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila - 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
//compruebaArriba(tablero, fila - 1, columna, filas, columnas);
}
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila + 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna + 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna - 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){
//Guardamos la columna y la fila del hilo
int columnaHilo = threadIdx.x + blockIdx.x;
int filaHilo = threadIdx.y + blockIdx.y;
compruebaSemillas(tablero, filaHilo, filaHilo, filas, columnas, movimiento);
__syncthreads();
}
void guardarPartida(int *tablero, int filas, int columnas/*, int dificultad*/) {
ofstream doc;
doc.open("partida.txt");
doc << filas << "\n";
doc << columnas << "\n";
//doc << dificultad << "\n";
for (int i = 0; i < filas * columnas; i++) {
doc << tablero[i] << " ";
}
doc.close();
system("cls");
cout << "Guardado correctamente.\n\n";
}
void cargarPartida() { //NO FUNCIONA LEE
const string fichero = "partida.txt";
ifstream leer;
leer.open(fichero.c_str());
int d, *tablero;
int i = 0;
int n = 48;
int f = 0;
int c = 0;
char fila[80];
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
f = f * 10 + (n - 48);
}
}
}
n = 48;
i = 0;
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
c = c * 10 + (n - 48);
}
}
}
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
d = (int)fila[0] - 48;
}
tablero = new int[f*c];
for (int i = 0; i < f * c; i++) {
leer.getline(fila, 80, ' ');
tablero[i] = (int)fila[0] - 48;
}
leer.close();
modoManual(tablero, f, c);
}
void modoManual(int *tablero, int filas, int columnas){
//system("cls");
char movimiento = ' ';
while (movimiento != 'Z'){
imprimirTablero(tablero, filas, columnas);
cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n";
cin >> movimiento;
//while (movimiento != (ARRIBA || ABAJO || IZQUIERDA || DERECHA)) {
while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') {
cout << "Tecla no valida, introduzca una valida:\n";
cin >> movimiento;
}
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
hipMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
hipMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), hipMemcpyHostToDevice);
//Creamos los hilos en un solo bloque
dim3 DimGrid(1, 1);
dim3 DimBlock(filas, columnas);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
hipMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, hipMemcpyDeviceToHost);
//system("cls");
generarSemillas(tablero, filas, columnas);
hipFree(tablero_gpu);
}
//system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas/*, dificultad*/);
}
else {
cout << "Saliendo sin guardar...\n \n";
}
} | 03b11ca4415864bb31462120e5efa6f188206de5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <conio.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <windows.h>
using namespace std;
//Funciones que van a utilizarse a lo largo del programa
//CPU
void generarTablero(int *tablero, int filas, int columnas);
void imprimirTablero(int *tablero, int filas, int columnas);
void imprimirColumnas(int columnas);
void generarSemillas(int *tablero, int filas, int columnas);
void guardarPartida(int *tablero, int filas, int columnas/*, int dificultad*/);
void cargarPartida();
void modoManual(int *tablero, int filas, int columnas);
//GPU
__global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento);
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
//AUX
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
int main(void){
//Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mínimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas);
modoManual(tablero, filas, columnas);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Generar tablero con números aleatorios
void generarTablero(int *tablero, int filas, int columnas){
srand(time(0));
int tamaño = filas * columnas;
for (int i = 0; i < tamaño; i++){
tablero[i] = 0;
}
generarSemillas(tablero, filas, columnas);
}
//Genera los números para jugar en el tablero
void generarSemillas(int *tablero, int filas, int columnas){
int tamaño = filas * columnas;
int contador = 0;
while (contador < 3){
int aux = rand() % 3;
int i = rand() % tamaño;
//cout << "POSICION: " << i+1 << "\n";
if (tablero[i] == 0){
switch (aux){
case 0:
tablero[i] = 2;
break;
case 1:
tablero[i] = 4;
break;
case 2:
tablero[i] = 8;
break;
}
contador++;
}
}
}
//Función que imprime el número de columnas que va a tener el tablero para que sea más facil elegir semillas
void imprimirColumnas(int columnas) {
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " " << i + 1;
}
else {
if (i < 9) {
cout << " " << i + 1;
}
else {
cout << " " << i + 1;
}
}
}
cout << "\n";
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " |";
}
else {
cout << " |";
}
}
cout << "\n";
}
//Imprimimos el tablero
void imprimirTablero(int *tablero, int filas, int columnas) {
cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n";
cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n";
imprimirColumnas(columnas);
for (int i = 0; i < filas; i++) {
if (i < 9) {
cout << i + 1 << " - ";
}
else {
cout << i + 1 << " - ";
}
for (int k = 0; k < columnas; k++) {
//Damos color en función del número imprimido
int bloque = tablero[i * filas + k];
switch (bloque) {
case 2:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo
break;
case 4:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo
break;
case 8:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado
break;
case 16:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul
break;
default:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco
}
if (bloque < 10) cout << "| " << bloque << " |";
else cout << "| " << bloque << "|";
}
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7);
cout << "\n";
}
}
__device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
switch (movimiento){
case 'W':
compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'S':
compruebaArriba(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'D':
compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'A':
//compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento);
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
break;
}
}
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
if (movimiento == 'W'){
for (int i = filas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){
tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
if (movimiento == 'S'){
for (int i = 0; i < filas - 1; i++){
for (int j = i; j < filas - 1; j++){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){
tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
if (movimiento == 'D'){
for (int i = 0; i < columnas - 1; i++){
for (int j = i; j < columnas - 1; j++){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j + 1)] == 0){
tablero[fila * columnas + (j + 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
if (movimiento == 'A'){
for (int i = columnas; i > 0; i--){
int contador = 0;
for (int j = i; j > 0; j--){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j - 1)] == 0){
tablero[fila * columnas + (j - 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
else if (tablero[(fila * columnas) + (j - 1)] != 0 && tablero[(fila * columnas) + j] == tablero[(fila * columnas) + (j - 1)] && contador == 0){
tablero[(fila * columnas) + (j - 1)] = tablero[fila * columnas + (j - 1)] * 2;
tablero[fila * columnas + j] = 0;
contador++;
}
}
}
}
}
/*
for (int i = filas - 1; i > 0; i--){
if (tablero[(i * columnas) + columna] == 0){
tablero[(i * columnas) + columna] = tablero[((i - 1) * columnas) + columna];
tablero[((i - 1) * columnas) + columna] = 0;
}
}*/
/*for (int i = filas - 1; i > 0; i--){
if (tablero[((i - 1) * columnas) + columna] != 0){
int a = i;
while (tablero[((a - 1) * columnas) + columna] == 0){
tablero[(a * columnas) + columna] = tablero[((a - 1) * columnas) + columna];
tablero[((a - 1) * columnas) + columna] = 0;
}
}
}*/
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila - 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
//compruebaArriba(tablero, fila - 1, columna, filas, columnas);
}
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila + 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna + 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna - 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){
//Guardamos la columna y la fila del hilo
int columnaHilo = threadIdx.x + blockIdx.x;
int filaHilo = threadIdx.y + blockIdx.y;
compruebaSemillas(tablero, filaHilo, filaHilo, filas, columnas, movimiento);
__syncthreads();
}
void guardarPartida(int *tablero, int filas, int columnas/*, int dificultad*/) {
ofstream doc;
doc.open("partida.txt");
doc << filas << "\n";
doc << columnas << "\n";
//doc << dificultad << "\n";
for (int i = 0; i < filas * columnas; i++) {
doc << tablero[i] << " ";
}
doc.close();
system("cls");
cout << "Guardado correctamente.\n\n";
}
void cargarPartida() { //NO FUNCIONA LEÑE
const string fichero = "partida.txt";
ifstream leer;
leer.open(fichero.c_str());
int d, *tablero;
int i = 0;
int n = 48;
int f = 0;
int c = 0;
char fila[80];
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
f = f * 10 + (n - 48);
}
}
}
n = 48;
i = 0;
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
c = c * 10 + (n - 48);
}
}
}
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
d = (int)fila[0] - 48;
}
tablero = new int[f*c];
for (int i = 0; i < f * c; i++) {
leer.getline(fila, 80, ' ');
tablero[i] = (int)fila[0] - 48;
}
leer.close();
modoManual(tablero, f, c);
}
void modoManual(int *tablero, int filas, int columnas){
//system("cls");
char movimiento = ' ';
while (movimiento != 'Z'){
imprimirTablero(tablero, filas, columnas);
cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n";
cin >> movimiento;
//while (movimiento != (ARRIBA || ABAJO || IZQUIERDA || DERECHA)) {
while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') {
cout << "Tecla no valida, introduzca una valida:\n";
cin >> movimiento;
}
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
cudaMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
cudaMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), cudaMemcpyHostToDevice);
//Creamos los hilos en un solo bloque
dim3 DimGrid(1, 1);
dim3 DimBlock(filas, columnas);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
cudaMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, cudaMemcpyDeviceToHost);
//system("cls");
generarSemillas(tablero, filas, columnas);
cudaFree(tablero_gpu);
}
//system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas/*, dificultad*/);
}
else {
cout << "Saliendo sin guardar...\n \n";
}
} |
a7d63cf80e7ccacb218fbb1ae298c8f0790dade3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
__global__ void
Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
int nedges = g_graph_nodes[tid].no_of_edges;
int starting = g_graph_nodes[tid].starting;
int left = nedges - nedges / 4 * 4;
int cost = g_cost[tid];
for (int i=starting; i<(left + starting); i+=1)
{
int id1 = g_graph_edges[i];
if(!g_graph_visited[id1])
{
g_cost[id1]=cost + 1;
g_updating_graph_mask[id1]=true;
}
}
for (int i=starting + left; i<(nedges + starting); i+=4)
{
int id1 = g_graph_edges[i];
int id2 = g_graph_edges[i + 1];
int id3 = g_graph_edges[i + 2];
int id4 = g_graph_edges[i + 3];
if(!g_graph_visited[id1])
{
g_cost[id1]= cost + 1;
g_updating_graph_mask[id1]=true;
}
if (!g_graph_visited[id2]) {
g_cost[id2]= cost + 1;
g_updating_graph_mask[id2]=true;
}
if (!g_graph_visited[id3]) {
g_cost[id3]= cost + 1;
g_updating_graph_mask[id3]=true;
}
if (!g_graph_visited[id4]) {
g_cost[id4]= cost + 1;
g_updating_graph_mask[id4]=true;
}
}
}
}
#endif
| a7d63cf80e7ccacb218fbb1ae298c8f0790dade3.cu | /*********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
The CUDA Kernel for Applying BFS on a loaded Graph. Created By Pawan Harish
**********************************************************************************/
#ifndef _KERNEL_H_
#define _KERNEL_H_
__global__ void
Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_updating_graph_mask, bool *g_graph_visited, int* g_cost, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
int nedges = g_graph_nodes[tid].no_of_edges;
int starting = g_graph_nodes[tid].starting;
int left = nedges - nedges / 4 * 4;
int cost = g_cost[tid];
for (int i=starting; i<(left + starting); i+=1)
{
int id1 = g_graph_edges[i];
if(!g_graph_visited[id1])
{
g_cost[id1]=cost + 1;
g_updating_graph_mask[id1]=true;
}
}
for (int i=starting + left; i<(nedges + starting); i+=4)
{
int id1 = g_graph_edges[i];
int id2 = g_graph_edges[i + 1];
int id3 = g_graph_edges[i + 2];
int id4 = g_graph_edges[i + 3];
if(!g_graph_visited[id1])
{
g_cost[id1]= cost + 1;
g_updating_graph_mask[id1]=true;
}
if (!g_graph_visited[id2]) {
g_cost[id2]= cost + 1;
g_updating_graph_mask[id2]=true;
}
if (!g_graph_visited[id3]) {
g_cost[id3]= cost + 1;
g_updating_graph_mask[id3]=true;
}
if (!g_graph_visited[id4]) {
g_cost[id4]= cost + 1;
g_updating_graph_mask[id4]=true;
}
}
}
}
#endif
|
ffa15333da8cdc1dc6920e507f5fbac15e86d16c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sqrt_kernel_large.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
unsigned int len = 1;
unsigned int rowsz = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sqrt_kernel_large), dim3(gridBlock),dim3(threadBlock), 0, 0, x,len,rowsz);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sqrt_kernel_large), dim3(gridBlock),dim3(threadBlock), 0, 0, x,len,rowsz);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sqrt_kernel_large), dim3(gridBlock),dim3(threadBlock), 0, 0, x,len,rowsz);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ffa15333da8cdc1dc6920e507f5fbac15e86d16c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sqrt_kernel_large.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
unsigned int len = 1;
unsigned int rowsz = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sqrt_kernel_large<<<gridBlock,threadBlock>>>(x,len,rowsz);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sqrt_kernel_large<<<gridBlock,threadBlock>>>(x,len,rowsz);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sqrt_kernel_large<<<gridBlock,threadBlock>>>(x,len,rowsz);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
68f2377b4fe885a595c1f37881b9fada0d5f87ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include <assert.h>
#include <hip/hip_fp16.h>
#include "common_cuda_helper.hpp"
#include "modulated_deform_conv/modulated_deform_conv_cuda.cuh"
#include "trt_modulated_deform_conv_kernel.hpp"
#include "trt_plugin_helper.hpp"
template <typename T>
void trt_modulated_deformable_im2col(const T* data_im_, const T* data_offset_, const T* data_mask_,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, T* data_col_,
hipStream_t stream) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel<T>)
, dim3(GET_BLOCKS(num_kernels)), dim3(THREADS_PER_BLOCK), 0, stream,
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
cudaCheckError();
}
template <typename scalar_t>
__global__ void output_add_bias_kernel(scalar_t* output, const scalar_t* bias, size_t step_batch,
size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) { output[index] += bias[(index % step_batch) / step_channel]; }
}
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
template <>
__global__ void output_add_bias_kernel<__half>(__half* output, const __half* bias,
size_t step_batch, size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
const __half b = bias[(index % step_batch) / step_channel];
const __half o = output[index];
output[index] = __hadd(o, b);
}
}
#else
template <>
__global__ void output_add_bias_kernel<__half>(__half* output, const __half* bias,
size_t step_batch, size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
const __half b = bias[(index % step_batch) / step_channel];
const __half o = output[index];
output[index] = __float2half(__half2float(o) + __half2float(b));
}
}
#endif
template <typename scalar_t>
static void output_add_bias(scalar_t* output, const scalar_t* bias, size_t batch, size_t channel,
size_t height, size_t width, hipStream_t stream) {
size_t step_channel = height * width;
size_t step_batch = step_channel * channel;
size_t n = step_batch * batch;
hipLaunchKernelGGL(( output_add_bias_kernel), dim3(GET_BLOCKS(n)), dim3(THREADS_PER_BLOCK), 0, stream, output, bias, step_batch,
step_channel, n);
}
template <typename scalar_t>
void ModulatedDeformConvForwardCUDAKernelLauncher(
const scalar_t* input, const scalar_t* weight, const scalar_t* bias, const scalar_t* offset,
const scalar_t* mask, scalar_t* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
hipblasHandle_t cublas_handle, hipStream_t stream) {
bool with_bias = (bias != nullptr);
im2col_step = ::min(int(batch), im2col_step);
assert(batch % im2col_step == 0);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
scalar_t* columns = (scalar_t*)workspace;
const size_t input_step = channels * height * width;
const size_t offset_step = deformable_group * kernel_h * kernel_w * 2 * height * width;
const size_t mask_step = deformable_group * kernel_h * kernel_w * height * width;
const size_t out_step = channels_out * height_out * width_out;
const size_t out_group_step = out_step / group;
const size_t col_g_step = channels * kernel_w * kernel_h / group * height_out * width_out;
const size_t weight_g_step = channels_out / group * channels / group * kernel_h * kernel_w;
const int m = channels_out / group;
const int n = height_out * width_out;
const int k = channels / group * kernel_h * kernel_w;
scalar_t alpha = 1.;
scalar_t beta = 0.;
for (int b = 0; b < batch; b++) {
const scalar_t* input_start = input + b * input_step;
const scalar_t* offset_start = offset + b * offset_step;
const scalar_t* mask_start = mask + b * mask_step;
trt_modulated_deformable_im2col<scalar_t>(
input_start, offset_start, mask_start, 1, channels, height, width, height_out, width_out,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group, columns, stream);
for (int g = 0; g < group; g++) {
const scalar_t* weight_start = weight + g * weight_g_step;
scalar_t* col_start = columns + g * col_g_step;
scalar_t* out_buffer_start = output + b * out_step + g * out_group_step;
cublasGemmWrap<scalar_t>(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, &alpha, col_start,
n, weight_start, k, &beta, out_buffer_start, n);
cudaCheckError();
}
}
if (with_bias) {
output_add_bias<scalar_t>(output, bias, batch, channels_out, height_out, width_out, stream);
}
}
template void ModulatedDeformConvForwardCUDAKernelLauncher<float>(
const float* input, const float* weight, const float* bias, const float* offset,
const float* mask, float* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
hipblasHandle_t cublas_handle, hipStream_t stream);
template void ModulatedDeformConvForwardCUDAKernelLauncher<__half>(
const __half* input, const __half* weight, const __half* bias, const __half* offset,
const __half* mask, __half* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
hipblasHandle_t cublas_handle, hipStream_t stream);
| 68f2377b4fe885a595c1f37881b9fada0d5f87ca.cu | // Copyright (c) OpenMMLab. All rights reserved
#include <assert.h>
#include <cuda_fp16.h>
#include "common_cuda_helper.hpp"
#include "modulated_deform_conv/modulated_deform_conv_cuda.cuh"
#include "trt_modulated_deform_conv_kernel.hpp"
#include "trt_plugin_helper.hpp"
template <typename T>
void trt_modulated_deformable_im2col(const T* data_im_, const T* data_offset_, const T* data_mask_,
const int batch_size, const int channels, const int height_im,
const int width_im, const int height_col, const int width_col,
const int kernel_h, const int kenerl_w, const int pad_h,
const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, T* data_col_,
cudaStream_t stream) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
modulated_deformable_im2col_gpu_kernel<T>
<<<GET_BLOCKS(num_kernels), THREADS_PER_BLOCK, 0, stream>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
cudaCheckError();
}
template <typename scalar_t>
__global__ void output_add_bias_kernel(scalar_t* output, const scalar_t* bias, size_t step_batch,
size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) { output[index] += bias[(index % step_batch) / step_channel]; }
}
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
template <>
__global__ void output_add_bias_kernel<__half>(__half* output, const __half* bias,
size_t step_batch, size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
const __half b = bias[(index % step_batch) / step_channel];
const __half o = output[index];
output[index] = __hadd(o, b);
}
}
#else
template <>
__global__ void output_add_bias_kernel<__half>(__half* output, const __half* bias,
size_t step_batch, size_t step_channel, size_t n) {
CUDA_1D_KERNEL_LOOP(index, n) {
const __half b = bias[(index % step_batch) / step_channel];
const __half o = output[index];
output[index] = __float2half(__half2float(o) + __half2float(b));
}
}
#endif
template <typename scalar_t>
static void output_add_bias(scalar_t* output, const scalar_t* bias, size_t batch, size_t channel,
size_t height, size_t width, cudaStream_t stream) {
size_t step_channel = height * width;
size_t step_batch = step_channel * channel;
size_t n = step_batch * batch;
output_add_bias_kernel<<<GET_BLOCKS(n), THREADS_PER_BLOCK, 0, stream>>>(output, bias, step_batch,
step_channel, n);
}
template <typename scalar_t>
void ModulatedDeformConvForwardCUDAKernelLauncher(
const scalar_t* input, const scalar_t* weight, const scalar_t* bias, const scalar_t* offset,
const scalar_t* mask, scalar_t* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
cublasHandle_t cublas_handle, cudaStream_t stream) {
bool with_bias = (bias != nullptr);
im2col_step = std::min(int(batch), im2col_step);
assert(batch % im2col_step == 0);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
scalar_t* columns = (scalar_t*)workspace;
const size_t input_step = channels * height * width;
const size_t offset_step = deformable_group * kernel_h * kernel_w * 2 * height * width;
const size_t mask_step = deformable_group * kernel_h * kernel_w * height * width;
const size_t out_step = channels_out * height_out * width_out;
const size_t out_group_step = out_step / group;
const size_t col_g_step = channels * kernel_w * kernel_h / group * height_out * width_out;
const size_t weight_g_step = channels_out / group * channels / group * kernel_h * kernel_w;
const int m = channels_out / group;
const int n = height_out * width_out;
const int k = channels / group * kernel_h * kernel_w;
scalar_t alpha = 1.;
scalar_t beta = 0.;
for (int b = 0; b < batch; b++) {
const scalar_t* input_start = input + b * input_step;
const scalar_t* offset_start = offset + b * offset_step;
const scalar_t* mask_start = mask + b * mask_step;
trt_modulated_deformable_im2col<scalar_t>(
input_start, offset_start, mask_start, 1, channels, height, width, height_out, width_out,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group, columns, stream);
for (int g = 0; g < group; g++) {
const scalar_t* weight_start = weight + g * weight_g_step;
scalar_t* col_start = columns + g * col_g_step;
scalar_t* out_buffer_start = output + b * out_step + g * out_group_step;
cublasGemmWrap<scalar_t>(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, &alpha, col_start,
n, weight_start, k, &beta, out_buffer_start, n);
cudaCheckError();
}
}
if (with_bias) {
output_add_bias<scalar_t>(output, bias, batch, channels_out, height_out, width_out, stream);
}
}
template void ModulatedDeformConvForwardCUDAKernelLauncher<float>(
const float* input, const float* weight, const float* bias, const float* offset,
const float* mask, float* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
cublasHandle_t cublas_handle, cudaStream_t stream);
template void ModulatedDeformConvForwardCUDAKernelLauncher<__half>(
const __half* input, const __half* weight, const __half* bias, const __half* offset,
const __half* mask, __half* output, void* workspace, int batch, int channels, int height,
int width, int channels_out, int kernel_w, int kernel_h, int stride_w, int stride_h, int pad_w,
int pad_h, int dilation_w, int dilation_h, int group, int deformable_group, int im2col_step,
cublasHandle_t cublas_handle, cudaStream_t stream);
|
eb8354991216510f56a997bce2cc09e2bc84862b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "TwoStepBDGPU.cuh"
#include "hoomd/extern/saruprngCUDA.h"
#include "hoomd/VectorMath.h"
#include "hoomd/HOOMDMath.h"
#include <assert.h>
/*! \file TwoSteBDGPU.cu
\brief Defines GPU kernel code for Brownian integration on the GPU. Used by TwoStepBDGPU.
*/
//! Shared memory array for gpu_langevin_step_two_kernel()
extern __shared__ Scalar s_gammas[];
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_orientation Device array of orientation quaternion
\param d_torque Device array of net torque on each particle
\param d_inertia Device array of moment of inertial of each particle
\param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation)
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param aniso If set true, the system would go through rigid body updates for its orientation
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param d_noiseless_t If set true, there will be no translational noise (random force)
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_brownian_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const Scalar *d_gamma_r,
Scalar4 *d_orientation,
Scalar4 *d_torque,
const Scalar3 *d_inertia,
Scalar4 *d_angmom,
const Scalar *d_gamma,
const unsigned int n_types,
const bool use_lambda,
const Scalar lambda,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const bool aniso,
const Scalar deltaT,
unsigned int D,
const bool d_noiseless_t,
const bool d_noiseless_r)
{
if (!use_lambda)
{
// read in the gamma (1 dimensional array), stored in s_gammas[0: n_type] (Pythonic convention)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// read in the gamma_r, stored in s_gammas[n_type: 2 * n_type], which is s_gamma_r[0:n_type]
Scalar * s_gammas_r = s_gammas + n_types;
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas_r[cur_offset + threadIdx.x] = d_gamma_r[cur_offset + threadIdx.x];
}
__syncthreads();
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
// determine the particle to work on
unsigned int idx = d_group_members[group_idx];
Scalar4 postype = d_pos[idx];
Scalar4 vel = d_vel[idx];
Scalar4 net_force = d_net_force[idx];
int3 image = d_image[idx];
// read in the tag of our particle.
unsigned int ptag = d_tag[idx];
// compute the random force
SaruGPU saru(ptag, timestep + seed, 0x9977665);
Scalar rx = saru.s<Scalar>(-1,1);
Scalar ry = saru.s<Scalar>(-1,1);
Scalar rz = saru.s<Scalar>(-1,1);
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// determine gamma from diameter
gamma = lambda*d_diameter[idx];
}
else
{
// determine gamma from type
unsigned int typ = __scalar_as_int(postype.w);
gamma = s_gammas[typ];
}
// compute the bd force (the extra factor of 3 is because <rx^2> is 1/3 in the uniform -1,1 distribution
// it is not the dimensionality of the system
Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*gamma*T/deltaT);
if (d_noiseless_t)
coeff = Scalar(0.0);
Scalar Fr_x = rx*coeff;
Scalar Fr_y = ry*coeff;
Scalar Fr_z = rz*coeff;
if (D < 3)
Fr_z = Scalar(0.0);
// update position
postype.x += (net_force.x + Fr_x) * deltaT / gamma;
postype.y += (net_force.y + Fr_y) * deltaT / gamma;
postype.z += (net_force.z + Fr_z) * deltaT / gamma;
// particles may have been moved slightly outside the box by the above steps, wrap them back into place
box.wrap(postype, image);
// draw a new random velocity for particle j
Scalar mass = vel.w;
Scalar sigma = fast::sqrt(T/mass);
vel.x = gaussian_rng(saru, sigma);
vel.y = gaussian_rng(saru, sigma);
if (D > 2)
vel.z = gaussian_rng(saru, sigma);
else
vel.z = 0;
// write out data
d_pos[idx] = postype;
d_vel[idx] = vel;
d_image[idx] = image;
// rotational random force and orientation quaternion updates
if (aniso)
{
unsigned int type_r = __scalar_as_int(d_pos[idx].w);
// gamma_r is stored in the second half of s_gammas a.k.a s_gammas_r
Scalar gamma_r = s_gammas_r[type_r];
if (gamma_r > 0)
{
vec3<Scalar> p_vec;
quat<Scalar> q(d_orientation[idx]);
vec3<Scalar> t(d_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// check if the shape is degenerate
bool x_zero, y_zero, z_zero;
x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON);
Scalar sigma_r = fast::sqrt(Scalar(2.0)*gamma_r*T/deltaT);
if (d_noiseless_r)
sigma_r = Scalar(0.0);
// original Gaussian random torque
// Gaussian random distribution is preferred in terms of preserving the exact math
vec3<Scalar> bf_torque;
bf_torque.x = gaussian_rng(saru, sigma_r);
bf_torque.y = gaussian_rng(saru, sigma_r);
bf_torque.z = gaussian_rng(saru, sigma_r);
if (x_zero) bf_torque.x = 0;
if (y_zero) bf_torque.y = 0;
if (z_zero) bf_torque.z = 0;
// use the damping by gamma_r and rotate back to lab frame
// For Future Updates: take special care when have anisotropic gamma_r
bf_torque = rotate(q, bf_torque);
if (D < 3)
{
bf_torque.x = 0;
bf_torque.y = 0;
t.x = 0;
t.y = 0;
}
// do the integration for quaternion
q += Scalar(0.5) * deltaT * ((t + bf_torque) / gamma_r) * q ;
q = q * (Scalar(1.0) / slow::sqrt(norm2(q)));
d_orientation[idx] = quat_to_scalar4(q);
// draw a new random ang_mom for particle j in body frame
p_vec.x = gaussian_rng(saru, fast::sqrt(T * I.x));
p_vec.y = gaussian_rng(saru, fast::sqrt(T * I.y));
p_vec.z = gaussian_rng(saru, fast::sqrt(T * I.z));
if (x_zero) p_vec.x = 0;
if (y_zero) p_vec.y = 0;
if (z_zero) p_vec.z = 0;
// !! Note this ang_mom isn't well-behaving in 2D,
// !! because may have effective non-zero ang_mom in x,y
// store ang_mom quaternion
quat<Scalar> p = Scalar(2.0) * q * p_vec;
d_angmom[idx] = quat_to_scalar4(p);
}
}
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_orientation Device array of orientation quaternion
\param d_torque Device array of net torque on each particle
\param d_inertia Device array of moment of inertial of each particle
\param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation)
\param langevin_args Collected arguments for gpu_brownian_step_one_kernel()
\param aniso If set true, the system would go through rigid body updates for its orientation
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param d_noiseless_t If set true, there will be no translational noise (random force)
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
This is just a driver for gpu_brownian_step_one_kernel(), see it for details.
*/
hipError_t gpu_brownian_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim& box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const Scalar *d_gamma_r,
Scalar4 *d_orientation,
Scalar4 *d_torque,
const Scalar3 *d_inertia,
Scalar4 *d_angmom,
const langevin_step_two_args& langevin_args,
const bool aniso,
const Scalar deltaT,
const unsigned int D,
const bool d_noiseless_t,
const bool d_noiseless_r)
{
// setup the grid to run the kernel
dim3 grid(langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_brownian_step_one_kernel), dim3(grid), dim3(threads), max( (unsigned int)(sizeof(Scalar)*langevin_args.n_types) * 2,
(unsigned int)(langevin_args.block_size*sizeof(Scalar))
), 0,
d_pos,
d_vel,
d_image,
box,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
d_gamma_r,
d_orientation,
d_torque,
d_inertia,
d_angmom,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
aniso,
deltaT,
D,
d_noiseless_t,
d_noiseless_r);
return hipSuccess;
}
| eb8354991216510f56a997bce2cc09e2bc84862b.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "TwoStepBDGPU.cuh"
#include "hoomd/extern/saruprngCUDA.h"
#include "hoomd/VectorMath.h"
#include "hoomd/HOOMDMath.h"
#include <assert.h>
/*! \file TwoSteBDGPU.cu
\brief Defines GPU kernel code for Brownian integration on the GPU. Used by TwoStepBDGPU.
*/
//! Shared memory array for gpu_langevin_step_two_kernel()
extern __shared__ Scalar s_gammas[];
//! Takes the second half-step forward in the Langevin integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_orientation Device array of orientation quaternion
\param d_torque Device array of net torque on each particle
\param d_inertia Device array of moment of inertial of each particle
\param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation)
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param use_lambda If true, gamma = lambda * diameter
\param lambda Scale factor to convert diameter to lambda (when use_lambda is true)
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param aniso If set true, the system would go through rigid body updates for its orientation
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param d_noiseless_t If set true, there will be no translational noise (random force)
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_brownian_step_one_kernel(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const Scalar *d_gamma_r,
Scalar4 *d_orientation,
Scalar4 *d_torque,
const Scalar3 *d_inertia,
Scalar4 *d_angmom,
const Scalar *d_gamma,
const unsigned int n_types,
const bool use_lambda,
const Scalar lambda,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const bool aniso,
const Scalar deltaT,
unsigned int D,
const bool d_noiseless_t,
const bool d_noiseless_r)
{
if (!use_lambda)
{
// read in the gamma (1 dimensional array), stored in s_gammas[0: n_type] (Pythonic convention)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// read in the gamma_r, stored in s_gammas[n_type: 2 * n_type], which is s_gamma_r[0:n_type]
Scalar * s_gammas_r = s_gammas + n_types;
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas_r[cur_offset + threadIdx.x] = d_gamma_r[cur_offset + threadIdx.x];
}
__syncthreads();
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
// determine the particle to work on
unsigned int idx = d_group_members[group_idx];
Scalar4 postype = d_pos[idx];
Scalar4 vel = d_vel[idx];
Scalar4 net_force = d_net_force[idx];
int3 image = d_image[idx];
// read in the tag of our particle.
unsigned int ptag = d_tag[idx];
// compute the random force
SaruGPU saru(ptag, timestep + seed, 0x9977665);
Scalar rx = saru.s<Scalar>(-1,1);
Scalar ry = saru.s<Scalar>(-1,1);
Scalar rz = saru.s<Scalar>(-1,1);
// calculate the magnitude of the random force
Scalar gamma;
if (use_lambda)
{
// determine gamma from diameter
gamma = lambda*d_diameter[idx];
}
else
{
// determine gamma from type
unsigned int typ = __scalar_as_int(postype.w);
gamma = s_gammas[typ];
}
// compute the bd force (the extra factor of 3 is because <rx^2> is 1/3 in the uniform -1,1 distribution
// it is not the dimensionality of the system
Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*gamma*T/deltaT);
if (d_noiseless_t)
coeff = Scalar(0.0);
Scalar Fr_x = rx*coeff;
Scalar Fr_y = ry*coeff;
Scalar Fr_z = rz*coeff;
if (D < 3)
Fr_z = Scalar(0.0);
// update position
postype.x += (net_force.x + Fr_x) * deltaT / gamma;
postype.y += (net_force.y + Fr_y) * deltaT / gamma;
postype.z += (net_force.z + Fr_z) * deltaT / gamma;
// particles may have been moved slightly outside the box by the above steps, wrap them back into place
box.wrap(postype, image);
// draw a new random velocity for particle j
Scalar mass = vel.w;
Scalar sigma = fast::sqrt(T/mass);
vel.x = gaussian_rng(saru, sigma);
vel.y = gaussian_rng(saru, sigma);
if (D > 2)
vel.z = gaussian_rng(saru, sigma);
else
vel.z = 0;
// write out data
d_pos[idx] = postype;
d_vel[idx] = vel;
d_image[idx] = image;
// rotational random force and orientation quaternion updates
if (aniso)
{
unsigned int type_r = __scalar_as_int(d_pos[idx].w);
// gamma_r is stored in the second half of s_gammas a.k.a s_gammas_r
Scalar gamma_r = s_gammas_r[type_r];
if (gamma_r > 0)
{
vec3<Scalar> p_vec;
quat<Scalar> q(d_orientation[idx]);
vec3<Scalar> t(d_torque[idx]);
vec3<Scalar> I(d_inertia[idx]);
// check if the shape is degenerate
bool x_zero, y_zero, z_zero;
x_zero = (I.x < EPSILON); y_zero = (I.y < EPSILON); z_zero = (I.z < EPSILON);
Scalar sigma_r = fast::sqrt(Scalar(2.0)*gamma_r*T/deltaT);
if (d_noiseless_r)
sigma_r = Scalar(0.0);
// original Gaussian random torque
// Gaussian random distribution is preferred in terms of preserving the exact math
vec3<Scalar> bf_torque;
bf_torque.x = gaussian_rng(saru, sigma_r);
bf_torque.y = gaussian_rng(saru, sigma_r);
bf_torque.z = gaussian_rng(saru, sigma_r);
if (x_zero) bf_torque.x = 0;
if (y_zero) bf_torque.y = 0;
if (z_zero) bf_torque.z = 0;
// use the damping by gamma_r and rotate back to lab frame
// For Future Updates: take special care when have anisotropic gamma_r
bf_torque = rotate(q, bf_torque);
if (D < 3)
{
bf_torque.x = 0;
bf_torque.y = 0;
t.x = 0;
t.y = 0;
}
// do the integration for quaternion
q += Scalar(0.5) * deltaT * ((t + bf_torque) / gamma_r) * q ;
q = q * (Scalar(1.0) / slow::sqrt(norm2(q)));
d_orientation[idx] = quat_to_scalar4(q);
// draw a new random ang_mom for particle j in body frame
p_vec.x = gaussian_rng(saru, fast::sqrt(T * I.x));
p_vec.y = gaussian_rng(saru, fast::sqrt(T * I.y));
p_vec.z = gaussian_rng(saru, fast::sqrt(T * I.z));
if (x_zero) p_vec.x = 0;
if (y_zero) p_vec.y = 0;
if (z_zero) p_vec.z = 0;
// !! Note this ang_mom isn't well-behaving in 2D,
// !! because may have effective non-zero ang_mom in x,y
// store ang_mom quaternion
quat<Scalar> p = Scalar(2.0) * q * p_vec;
d_angmom[idx] = quat_to_scalar4(p);
}
}
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle positions and masses
\param d_image array of particle images
\param box simulation box
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma_r List of per-type gamma_rs (rotational drag coeff.)
\param d_orientation Device array of orientation quaternion
\param d_torque Device array of net torque on each particle
\param d_inertia Device array of moment of inertial of each particle
\param d_angmom Device array of transformed angular momentum quaternion of each particle (see online documentation)
\param langevin_args Collected arguments for gpu_brownian_step_one_kernel()
\param aniso If set true, the system would go through rigid body updates for its orientation
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
\param d_noiseless_t If set true, there will be no translational noise (random force)
\param d_noiseless_r If set true, there will be no rotational noise (random torque)
This is just a driver for gpu_brownian_step_one_kernel(), see it for details.
*/
cudaError_t gpu_brownian_step_one(Scalar4 *d_pos,
Scalar4 *d_vel,
int3 *d_image,
const BoxDim& box,
const Scalar *d_diameter,
const unsigned int *d_tag,
const unsigned int *d_group_members,
const unsigned int group_size,
const Scalar4 *d_net_force,
const Scalar *d_gamma_r,
Scalar4 *d_orientation,
Scalar4 *d_torque,
const Scalar3 *d_inertia,
Scalar4 *d_angmom,
const langevin_step_two_args& langevin_args,
const bool aniso,
const Scalar deltaT,
const unsigned int D,
const bool d_noiseless_t,
const bool d_noiseless_r)
{
// setup the grid to run the kernel
dim3 grid(langevin_args.num_blocks, 1, 1);
dim3 grid1(1, 1, 1);
dim3 threads(langevin_args.block_size, 1, 1);
dim3 threads1(256, 1, 1);
// run the kernel
gpu_brownian_step_one_kernel<<< grid, threads, max( (unsigned int)(sizeof(Scalar)*langevin_args.n_types) * 2,
(unsigned int)(langevin_args.block_size*sizeof(Scalar))
)>>>
(d_pos,
d_vel,
d_image,
box,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
d_gamma_r,
d_orientation,
d_torque,
d_inertia,
d_angmom,
langevin_args.d_gamma,
langevin_args.n_types,
langevin_args.use_lambda,
langevin_args.lambda,
langevin_args.timestep,
langevin_args.seed,
langevin_args.T,
aniso,
deltaT,
D,
d_noiseless_t,
d_noiseless_r);
return cudaSuccess;
}
|
629c976eccc86d87396c58d67d3d59f1b2cc86b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mandelKernel(double planoFactorXd, double planoFactorYd, double planoVxd, double planoVyd, int maxIteracionesd, unsigned int *coloresd, int img_width, int img_height, int num_processes, int my_pid, int rw) {
int columna, fila;
double X, Y;
double pReal = 0.0;
double pImag = 0.0;
double pRealAnt, pImagAnt, distancia;
// Determine pixel
columna = blockIdx.x * blockDim.x + threadIdx.x;
fila = (rw * MAX_ROWS_PER_KERNEL) + (blockIdx.y * blockDim.y) + threadIdx.y;
int real_row = (fila * num_processes) + my_pid;
if(real_row >= img_height)
return;
// Real pixel coords
X = (planoFactorXd * (double)columna) + planoVxd;
Y = (planoFactorYd * ((double)(img_height - 1) - (double)real_row)) + planoVyd;
int i = 0;
do {
pRealAnt = pReal;
pImagAnt = pImag;
pReal = ((pRealAnt * pRealAnt) - (pImagAnt * pImagAnt)) + X;
pImag = (2.0 * (pRealAnt * pImagAnt)) + Y;
i++;
distancia = pReal*pReal + pImag*pImag;
}while ((i < maxIteracionesd) && (distancia <= 4.0));
if(i == maxIteracionesd) i = 0;
coloresd[(fila * img_width) + columna] = i;
} | 629c976eccc86d87396c58d67d3d59f1b2cc86b0.cu | #include "includes.h"
__global__ void mandelKernel(double planoFactorXd, double planoFactorYd, double planoVxd, double planoVyd, int maxIteracionesd, unsigned int *coloresd, int img_width, int img_height, int num_processes, int my_pid, int rw) {
int columna, fila;
double X, Y;
double pReal = 0.0;
double pImag = 0.0;
double pRealAnt, pImagAnt, distancia;
// Determine pixel
columna = blockIdx.x * blockDim.x + threadIdx.x;
fila = (rw * MAX_ROWS_PER_KERNEL) + (blockIdx.y * blockDim.y) + threadIdx.y;
int real_row = (fila * num_processes) + my_pid;
if(real_row >= img_height)
return;
// Real pixel coords
X = (planoFactorXd * (double)columna) + planoVxd;
Y = (planoFactorYd * ((double)(img_height - 1) - (double)real_row)) + planoVyd;
int i = 0;
do {
pRealAnt = pReal;
pImagAnt = pImag;
pReal = ((pRealAnt * pRealAnt) - (pImagAnt * pImagAnt)) + X;
pImag = (2.0 * (pRealAnt * pImagAnt)) + Y;
i++;
distancia = pReal*pReal + pImag*pImag;
}while ((i < maxIteracionesd) && (distancia <= 4.0));
if(i == maxIteracionesd) i = 0;
coloresd[(fila * img_width) + columna] = i;
} |
afa4536dc3a35c1f85c200d32fd079ab5ead39d3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* File: chronoGPU.cu
* Author: Maxime MARIA
*/
#include "chronoGPU.hpp"
#include "common.hpp"
#include <iostream>
ChronoGPU::ChronoGPU()
: m_started( false ) {
HANDLE_ERROR( hipEventCreate( &m_start ) );
HANDLE_ERROR( hipEventCreate( &m_end ) );
}
ChronoGPU::~ChronoGPU() {
if ( m_started ) {
stop();
std::cerr << "ChronoGPU::~ChronoGPU(): chrono wasn't turned off!" << std::endl;
}
HANDLE_ERROR( hipEventDestroy( m_start ) );
HANDLE_ERROR( hipEventDestroy( m_end ) );
}
void ChronoGPU::start() {
if ( !m_started ) {
HANDLE_ERROR( hipEventRecord( m_start, 0 ) );
m_started = true;
}
else
std::cerr << "ChronoGPU::start(): chrono is already started!" << std::endl;
}
void ChronoGPU::stop() {
if ( m_started ) {
HANDLE_ERROR( hipEventRecord( m_end, 0 ) );
HANDLE_ERROR( hipEventSynchronize( m_end ) );
m_started = false;
}
else
std::cerr << "ChronoGPU::stop(): chrono wasn't started!" << std::endl;
}
float ChronoGPU::elapsedTime() {
float time = 0.f;
HANDLE_ERROR( hipEventElapsedTime( &time, m_start, m_end ) );
return time;
}
| afa4536dc3a35c1f85c200d32fd079ab5ead39d3.cu | /*
* File: chronoGPU.cu
* Author: Maxime MARIA
*/
#include "chronoGPU.hpp"
#include "common.hpp"
#include <iostream>
ChronoGPU::ChronoGPU()
: m_started( false ) {
HANDLE_ERROR( cudaEventCreate( &m_start ) );
HANDLE_ERROR( cudaEventCreate( &m_end ) );
}
ChronoGPU::~ChronoGPU() {
if ( m_started ) {
stop();
std::cerr << "ChronoGPU::~ChronoGPU(): chrono wasn't turned off!" << std::endl;
}
HANDLE_ERROR( cudaEventDestroy( m_start ) );
HANDLE_ERROR( cudaEventDestroy( m_end ) );
}
void ChronoGPU::start() {
if ( !m_started ) {
HANDLE_ERROR( cudaEventRecord( m_start, 0 ) );
m_started = true;
}
else
std::cerr << "ChronoGPU::start(): chrono is already started!" << std::endl;
}
void ChronoGPU::stop() {
if ( m_started ) {
HANDLE_ERROR( cudaEventRecord( m_end, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( m_end ) );
m_started = false;
}
else
std::cerr << "ChronoGPU::stop(): chrono wasn't started!" << std::endl;
}
float ChronoGPU::elapsedTime() {
float time = 0.f;
HANDLE_ERROR( cudaEventElapsedTime( &time, m_start, m_end ) );
return time;
}
|
f3023196fd23781507a688d45bbcbbc5844fe0ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/EmptyTensor.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/native/hip/Resize.h>
#include <ATen/native/TensorFactories.h>
#include <c10/util/accumulate.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_efficientzerotensor_native.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_strided_native.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/tril_indices_native.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/triu_indices_native.h>
#include <ATen/ops/triu_native.h>
#endif
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace at::native {
Tensor& eye_out_cuda(int64_t n, Tensor& result) {
// the default value of `m` equals to `n`
return at::native::eye_out_cuda(n, n, result);
}
Tensor& eye_out_cuda(int64_t n, int64_t m, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
TORCH_CHECK(m >= 0, "m must be greater or equal to 0, got ", m);
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
return at::detail::empty_cuda(size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
}
Tensor _efficientzerotensor_cuda(IntArrayRef size,
c10::optional<ScalarType> dtype,
c10::optional<Layout> layout,
c10::optional<Device> device,
c10::optional<bool> pin_memory) {
auto device_ = device_or_default(device);
if (!device_.has_index()) {
device_.set_index(at::hip::current_device());
}
auto allocator = at::native::ZeroTensorAllocator(device_);
auto dtype_ = dtype_or_default(dtype);
auto zero_ks = at::DispatchKeySet(c10::DispatchKey::CUDA) | at::DispatchKeySet(c10::DispatchKey::ZeroTensor);
auto out = at::detail::empty_generic(size, &allocator, zero_ks, dtype_, c10::nullopt);
return out;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
return at::detail::empty_strided_cuda(size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#if defined(USE_ROCM)
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_INDEX_TYPES(tensor.scalar_type(), "tril_indices_cuda", [&] {
hipLaunchKernelGGL(( tril_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.mutable_data_ptr<index_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_INDEX_TYPES(tensor.scalar_type(), "triu_indices_cuda", [&] {
hipLaunchKernelGGL(( triu_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.mutable_data_ptr<index_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
} // namespace at::native
| f3023196fd23781507a688d45bbcbbc5844fe0ce.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/EmptyTensor.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/native/cuda/Resize.h>
#include <ATen/native/TensorFactories.h>
#include <c10/util/accumulate.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_efficientzerotensor_native.h>
#include <ATen/ops/empty_native.h>
#include <ATen/ops/empty_strided_native.h>
#include <ATen/ops/eye_native.h>
#include <ATen/ops/tril_indices_native.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/triu_indices_native.h>
#include <ATen/ops/triu_native.h>
#endif
#include <algorithm>
#include <cmath>
#include <cstddef>
namespace at::native {
Tensor& eye_out_cuda(int64_t n, Tensor& result) {
// the default value of `m` equals to `n`
return at::native::eye_out_cuda(n, n, result);
}
Tensor& eye_out_cuda(int64_t n, int64_t m, Tensor& result) {
TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
TORCH_CHECK(m >= 0, "m must be greater or equal to 0, got ", m);
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt) {
return at::detail::empty_cuda(size, dtype_opt, layout_opt, device_opt, pin_memory_opt, memory_format_opt);
}
Tensor _efficientzerotensor_cuda(IntArrayRef size,
c10::optional<ScalarType> dtype,
c10::optional<Layout> layout,
c10::optional<Device> device,
c10::optional<bool> pin_memory) {
auto device_ = device_or_default(device);
if (!device_.has_index()) {
device_.set_index(at::cuda::current_device());
}
auto allocator = at::native::ZeroTensorAllocator(device_);
auto dtype_ = dtype_or_default(dtype);
auto zero_ks = at::DispatchKeySet(c10::DispatchKey::CUDA) | at::DispatchKeySet(c10::DispatchKey::ZeroTensor);
auto out = at::detail::empty_generic(size, &allocator, zero_ks, dtype_, c10::nullopt);
return out;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
return at::detail::empty_strided_cuda(size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the conversion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on inequality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
#if defined(USE_ROCM)
C10_LAUNCH_BOUNDS_1(512)
#endif
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_INDEX_TYPES(tensor.scalar_type(), "tril_indices_cuda", [&] {
tril_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.mutable_data_ptr<index_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt) {
check_args(row, col, layout_opt);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, dtype_opt, layout_opt, device_opt, pin_memory_opt);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
TORCH_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_INDEX_TYPES(tensor.scalar_type(), "triu_indices_cuda", [&] {
triu_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.mutable_data_ptr<index_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
return tensor;
}
} // namespace at::native
|
45bbe0851883d4efdd1e58c867f425ada8c9822d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*--------------------------------------------------------------------------*\
Copyright (c) 2008-2009, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _3D_CUBIC_BSPLINE_PREFILTER_H_
#define _3D_CUBIC_BSPLINE_PREFILTER_H_
#include <stdio.h>
#include <cutil.h>
#include "cubicPrefilter_kernel.cu"
#define MAX_DIMENSION 512
#define MEM_INTERLEAVE 32
//--------------------------------------------------------------------------
// Global CUDA procedures
//--------------------------------------------------------------------------
__global__ void SamplesToCoefficients3DX_simple(
float* volume, // in-place processing
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = (z * height + y) * width;
float* line = volume + startIdx; //direct access
ConvertToInterpolationCoefficients(line, width);
}
__global__ void SamplesToCoefficients3DX(
float* volume, // in-place processing
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = (z * height + y) * width;
float line[MAX_DIMENSION];
// access the memory in an interleaved manner, to gain some performance
for (uint offset=0; offset < MEM_INTERLEAVE; offset++)
for (uint x=offset, i=startIdx+offset; x < width; x+=MEM_INTERLEAVE, i+=MEM_INTERLEAVE)
line[x] = volume[i];
ConvertToInterpolationCoefficients(line, width);
for (uint offset=0; offset < MEM_INTERLEAVE; offset++)
for (uint x=offset, i=startIdx+offset; x < width; x+=MEM_INTERLEAVE, i+=MEM_INTERLEAVE)
volume[i] = line[x];
}
__global__ void SamplesToCoefficients3DY(
float* volume, // in-place processing
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in y-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = z * height * width + x;
float line[MAX_DIMENSION];
// copy the line to fast local memory
for (uint y = 0, i = startIdx; y < height; y++) {
line[y] = volume[i];
i += width;
}
ConvertToInterpolationCoefficients(line, height);
// copy the line back to the volume
for (uint y = 0, i = startIdx; y < height; y++) {
volume[i] = line[y];
i += width;
}
}
__global__ void SamplesToCoefficients3DZ(
float* volume, // in-place processing
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in z-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint y = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = y * width + x;
const uint slice = height * width;
float line[MAX_DIMENSION];
// copy the line to fast local memory
for (uint z = 0, i = startIdx; z < depth; z++) {
line[z] = volume[i];
i += slice;
}
ConvertToInterpolationCoefficients(line, height);
// copy the line back to the volume
for (uint z = 0, i = startIdx; z < depth; z++) {
volume[i] = line[z];
i += slice;
}
}
#undef MAX_DIMENSION
#undef MEM_INTERLEAVE
//--------------------------------------------------------------------------
// Exported functions
//--------------------------------------------------------------------------
//! Convert the voxel values into cubic b-spline coefficients
//! @param volume pointer to the voxel volume in GPU (device) memory
//! @param width volume width in number of voxels
//! @param height volume height in number of voxels
//! @param depth volume depth in number of voxels
extern "C"
void CubicBSplinePrefilter3D(float* volume, uint width, uint height, uint depth)
{
// Try to determine the optimal block dimensions
uint dimX = min(min(PowTwoDivider(width), PowTwoDivider(height)), 64);
uint dimY = min(min(PowTwoDivider(depth), PowTwoDivider(height)), 512/dimX);
dim3 dimBlock(dimX, dimY);
// Replace the voxel values by the b-spline coefficients
dim3 dimGridX(height / dimBlock.x, depth / dimBlock.y);
hipLaunchKernelGGL(( SamplesToCoefficients3DX), dim3(dimGridX), dim3(dimBlock), 0, 0, volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DX kernel failed");
dim3 dimGridY(width / dimBlock.x, depth / dimBlock.y);
hipLaunchKernelGGL(( SamplesToCoefficients3DY), dim3(dimGridY), dim3(dimBlock), 0, 0, volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DY kernel failed");
dim3 dimGridZ(width / dimBlock.x, height / dimBlock.y);
hipLaunchKernelGGL(( SamplesToCoefficients3DZ), dim3(dimGridZ), dim3(dimBlock), 0, 0, volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DZ kernel failed");
}
//! Convert the voxel values into cubic b-spline coefficients
//! @param volume pointer to the voxel volume in GPU (device) memory
//! @param width volume width in number of voxels
//! @param height volume height in number of voxels
//! @param depth volume depth in number of voxels
//! @note Prints stopwatch feedback
extern "C"
void CubicBSplinePrefilter3DTimer(float* volume, uint width, uint height, uint depth)
{
printf("\nCubic B-Spline Prefilter timer:\n");
uint hTimer;
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
// Try to determine the optimal block dimensions
uint dimX = min(min(PowTwoDivider(width), PowTwoDivider(height)), 64);
uint dimY = min(min(PowTwoDivider(depth), PowTwoDivider(height)), 512/dimX);
dim3 dimBlock(dimX, dimY);
// Replace the voxel values by the b-spline coefficients
dim3 dimGridX(height / dimBlock.x, depth / dimBlock.y);
hipLaunchKernelGGL(( SamplesToCoefficients3DX), dim3(dimGridX), dim3(dimBlock), 0, 0, volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DX kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueX = cutGetTimerValue(hTimer);
printf("x-direction : %f msec\n", timerValueX);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimGridY(width / dimBlock.x, depth / dimBlock.y);
hipLaunchKernelGGL(( SamplesToCoefficients3DY), dim3(dimGridY), dim3(dimBlock), 0, 0, volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DY kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueY = cutGetTimerValue(hTimer);
printf("y-direction : %f msec\n", timerValueY);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimGridZ(width / dimBlock.x, height / dimBlock.y);
hipLaunchKernelGGL(( SamplesToCoefficients3DZ), dim3(dimGridZ), dim3(dimBlock), 0, 0, volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DZ kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueZ = cutGetTimerValue(hTimer);
printf("z-direction : %f msec\n", timerValueZ);
printf("total : %f msec\n\n", timerValueX+timerValueY+timerValueZ);
}
#endif //_3D_CUBIC_BSPLINE_PREFILTER_H_
| 45bbe0851883d4efdd1e58c867f425ada8c9822d.cu | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2009, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _3D_CUBIC_BSPLINE_PREFILTER_H_
#define _3D_CUBIC_BSPLINE_PREFILTER_H_
#include <stdio.h>
#include <cutil.h>
#include "cubicPrefilter_kernel.cu"
#define MAX_DIMENSION 512
#define MEM_INTERLEAVE 32
//--------------------------------------------------------------------------
// Global CUDA procedures
//--------------------------------------------------------------------------
__global__ void SamplesToCoefficients3DX_simple(
float* volume, // in-place processing
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = (z * height + y) * width;
float* line = volume + startIdx; //direct access
ConvertToInterpolationCoefficients(line, width);
}
__global__ void SamplesToCoefficients3DX(
float* volume, // in-place processing
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in x-direction
const uint y = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = (z * height + y) * width;
float line[MAX_DIMENSION];
// access the memory in an interleaved manner, to gain some performance
for (uint offset=0; offset < MEM_INTERLEAVE; offset++)
for (uint x=offset, i=startIdx+offset; x < width; x+=MEM_INTERLEAVE, i+=MEM_INTERLEAVE)
line[x] = volume[i];
ConvertToInterpolationCoefficients(line, width);
for (uint offset=0; offset < MEM_INTERLEAVE; offset++)
for (uint x=offset, i=startIdx+offset; x < width; x+=MEM_INTERLEAVE, i+=MEM_INTERLEAVE)
volume[i] = line[x];
}
__global__ void SamplesToCoefficients3DY(
float* volume, // in-place processing
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in y-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint z = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = z * height * width + x;
float line[MAX_DIMENSION];
// copy the line to fast local memory
for (uint y = 0, i = startIdx; y < height; y++) {
line[y] = volume[i];
i += width;
}
ConvertToInterpolationCoefficients(line, height);
// copy the line back to the volume
for (uint y = 0, i = startIdx; y < height; y++) {
volume[i] = line[y];
i += width;
}
}
__global__ void SamplesToCoefficients3DZ(
float* volume, // in-place processing
uint width, // width of the volume
uint height, // height of the volume
uint depth) // depth of the volume
{
// process lines in z-direction
const uint x = blockIdx.x * blockDim.x + threadIdx.x;
const uint y = blockIdx.y * blockDim.y + threadIdx.y;
const uint startIdx = y * width + x;
const uint slice = height * width;
float line[MAX_DIMENSION];
// copy the line to fast local memory
for (uint z = 0, i = startIdx; z < depth; z++) {
line[z] = volume[i];
i += slice;
}
ConvertToInterpolationCoefficients(line, height);
// copy the line back to the volume
for (uint z = 0, i = startIdx; z < depth; z++) {
volume[i] = line[z];
i += slice;
}
}
#undef MAX_DIMENSION
#undef MEM_INTERLEAVE
//--------------------------------------------------------------------------
// Exported functions
//--------------------------------------------------------------------------
//! Convert the voxel values into cubic b-spline coefficients
//! @param volume pointer to the voxel volume in GPU (device) memory
//! @param width volume width in number of voxels
//! @param height volume height in number of voxels
//! @param depth volume depth in number of voxels
extern "C"
void CubicBSplinePrefilter3D(float* volume, uint width, uint height, uint depth)
{
// Try to determine the optimal block dimensions
uint dimX = min(min(PowTwoDivider(width), PowTwoDivider(height)), 64);
uint dimY = min(min(PowTwoDivider(depth), PowTwoDivider(height)), 512/dimX);
dim3 dimBlock(dimX, dimY);
// Replace the voxel values by the b-spline coefficients
dim3 dimGridX(height / dimBlock.x, depth / dimBlock.y);
SamplesToCoefficients3DX<<<dimGridX, dimBlock>>>(volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DX kernel failed");
dim3 dimGridY(width / dimBlock.x, depth / dimBlock.y);
SamplesToCoefficients3DY<<<dimGridY, dimBlock>>>(volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DY kernel failed");
dim3 dimGridZ(width / dimBlock.x, height / dimBlock.y);
SamplesToCoefficients3DZ<<<dimGridZ, dimBlock>>>(volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DZ kernel failed");
}
//! Convert the voxel values into cubic b-spline coefficients
//! @param volume pointer to the voxel volume in GPU (device) memory
//! @param width volume width in number of voxels
//! @param height volume height in number of voxels
//! @param depth volume depth in number of voxels
//! @note Prints stopwatch feedback
extern "C"
void CubicBSplinePrefilter3DTimer(float* volume, uint width, uint height, uint depth)
{
printf("\nCubic B-Spline Prefilter timer:\n");
uint hTimer;
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
// Try to determine the optimal block dimensions
uint dimX = min(min(PowTwoDivider(width), PowTwoDivider(height)), 64);
uint dimY = min(min(PowTwoDivider(depth), PowTwoDivider(height)), 512/dimX);
dim3 dimBlock(dimX, dimY);
// Replace the voxel values by the b-spline coefficients
dim3 dimGridX(height / dimBlock.x, depth / dimBlock.y);
SamplesToCoefficients3DX<<<dimGridX, dimBlock>>>(volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DX kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueX = cutGetTimerValue(hTimer);
printf("x-direction : %f msec\n", timerValueX);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimGridY(width / dimBlock.x, depth / dimBlock.y);
SamplesToCoefficients3DY<<<dimGridY, dimBlock>>>(volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DY kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueY = cutGetTimerValue(hTimer);
printf("y-direction : %f msec\n", timerValueY);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
dim3 dimGridZ(width / dimBlock.x, height / dimBlock.y);
SamplesToCoefficients3DZ<<<dimGridZ, dimBlock>>>(volume, width, height, depth);
CUT_CHECK_ERROR("SamplesToCoefficients3DZ kernel failed");
CUT_SAFE_CALL(cutStopTimer(hTimer));
double timerValueZ = cutGetTimerValue(hTimer);
printf("z-direction : %f msec\n", timerValueZ);
printf("total : %f msec\n\n", timerValueX+timerValueY+timerValueZ);
}
#endif //_3D_CUBIC_BSPLINE_PREFILTER_H_
|
b805e7c40d00344885fc70a21ed7aaeefa6454fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//**********************************************************************
// *
// University Of North Carolina Charlotte *
// *
//Program: Vecotr adder *
//Description: This program is for testing GPU performance with multi- *
// dimention multiple function_coaleced + tiling. *
// *
// *
//File Name: MM_gpu4.cu *
//File Version: 1.0 *
//Baseline: Homework_1 *
// *
//Course: ECGR6090- Heterogeneous Computing *
// *
//Programmed by: Roy Liu *
//Under Suppervision of: Dr. Hamed Tabkhi *
// *
//Input file: No *
// *
//Output:Time of program running *
//**********************************************************************
/*
Matrix:
A: M x N M(height), N(width),
B: N x K N(height), K(width),
C: M X K M(height), K(width),
*/
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define M 100
#define N 100
#define K 100
#define BLOCK_SIZE 16
#define TILE_SIZE 16
// kernel on GPU device, coalesced
__global__ void matrixMul_coalesced(int *A_gpu,int *B_gpu, int *C_gpu, int A_rows, int A_columns, int B_columns)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.y * BLOCK_SIZE + threadIdx.x;
int row = blockIdx.x * BLOCK_SIZE + threadIdx.y;
int width = gridDim.x * BLOCK_SIZE;
int i,j,sum;
__shared__ int shared_A_coal[BLOCK_SIZE*BLOCK_SIZE];
__shared__ int shared_B_coal[BLOCK_SIZE*BLOCK_SIZE];
__shared__ int shared_A_tile[TILE_SIZE][TILE_SIZE];
__shared__ int shared_B_tile[TILE_SIZE][TILE_SIZE];
//coalescing
for (i = 0; i< BLOCK_SIZE; i += BLOCK_SIZE) //Blocksize X= Block size Y
{
shared_A_coal[threadIdx.x*BLOCK_SIZE + threadIdx.y + i] = A_gpu[(row+i)*width + col];
shared_B_coal[threadIdx.x*BLOCK_SIZE + threadIdx.y + i] = B_gpu[(row+i)*width + col];
}
__syncthreads();
for (j = 0; j < BLOCK_SIZE; j += BLOCK_SIZE)
{
A_gpu[(row+j)*width + col] = shared_A_coal[threadIdx.x + (threadIdx.y+j)*BLOCK_SIZE];
B_gpu[(row+j)*width + col] = shared_B_coal[threadIdx.x + (threadIdx.y+j)*BLOCK_SIZE];
}
__syncthreads();
//Tiling
row = blockIdx.y * blockDim.y + threadIdx.y;
col = blockIdx.x * blockDim.x + threadIdx.x;
if (row<A_rows && col<B_columns)
{
for (i = 0; i <= A_columns/TILE_SIZE; i++)
{
// Tile both matrix A and B
shared_A_tile[ty][tx] = A_gpu[row * A_columns + i* TILE_SIZE + tx];
shared_B_tile[ty][tx] = B_gpu[(i * TILE_SIZE + ty) * B_columns + col];
__syncthreads();
for (j = 0; j < TILE_SIZE; j++)
{
if (j + (i * TILE_SIZE) < A_columns)
{
sum += (shared_A_tile[ty][j] * shared_B_tile[j][tx]);
}
}
}
C_gpu[row * B_columns + col] = sum;
}
}
void random_ints(int* r, int a, int b);
int main()
{
int *A_cpu, *B_cpu, *C_cpu;
int *A_gpu, *B_gpu, *C_gpu;
//for counting run time
struct timeval start, end;
float timer;
gettimeofday(&start, NULL);
// memcopy A_cpu to A_gpu
//memcopy B_cpu to B_gpu
A_cpu =(int*)malloc(sizeof(int)*M*N);
random_ints(A_cpu,M,N);
B_cpu =(int*)malloc(sizeof(int)*K*N);
random_ints(B_cpu,N,K);
C_cpu =(int*)malloc(sizeof(int)*M*K);
hipMalloc((void**)&A_gpu, sizeof(int)*M*N);
hipMalloc((void**)&B_gpu, sizeof(int)*N*K);
hipMalloc((void**)&C_gpu, sizeof(int)*M*K);
hipMemcpy(A_gpu, A_cpu, sizeof(int)*M*N, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B_cpu, sizeof(int)*N*K, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((M + BLOCK_SIZE - 1) / BLOCK_SIZE, (K + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipLaunchKernelGGL(( matrixMul_coalesced), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,M,N,K); //C matrix: K(width) x M(height)
//memcopy C_gpu to C_cpu
hipMemcpy(C_cpu, C_gpu, sizeof(int)*M*K, hipMemcpyDeviceToHost);
free(A_cpu);
free(B_cpu);
free(C_cpu);
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(C_gpu);
hipDeviceSynchronize();
gettimeofday(&end, NULL);
timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("Number of loop is: %dx%dx%d.\nRunning time is: %f ms\n", M,N,K,timer/1000);
return 0;
}
void random_ints(int* r, int a, int b)
{ srand(time(0));
int i,j;
for (i = 0; i < a; ++i) {
for (j = 0; j < b; ++j) {
r[i * b + j] = rand() % 100;
}
}
}
| b805e7c40d00344885fc70a21ed7aaeefa6454fd.cu | //**********************************************************************
// *
// University Of North Carolina Charlotte *
// *
//Program: Vecotr adder *
//Description: This program is for testing GPU performance with multi- *
// dimention multiple function_coaleced + tiling. *
// *
// *
//File Name: MM_gpu4.cu *
//File Version: 1.0 *
//Baseline: Homework_1 *
// *
//Course: ECGR6090- Heterogeneous Computing *
// *
//Programmed by: Roy Liu *
//Under Suppervision of: Dr. Hamed Tabkhi *
// *
//Input file: No *
// *
//Output:Time of program running *
//**********************************************************************
/*
Matrix:
A: M x N M(height), N(width),
B: N x K N(height), K(width),
C: M X K M(height), K(width),
*/
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define M 100
#define N 100
#define K 100
#define BLOCK_SIZE 16
#define TILE_SIZE 16
// kernel on GPU device, coalesced
__global__ void matrixMul_coalesced(int *A_gpu,int *B_gpu, int *C_gpu, int A_rows, int A_columns, int B_columns)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int col = blockIdx.y * BLOCK_SIZE + threadIdx.x;
int row = blockIdx.x * BLOCK_SIZE + threadIdx.y;
int width = gridDim.x * BLOCK_SIZE;
int i,j,sum;
__shared__ int shared_A_coal[BLOCK_SIZE*BLOCK_SIZE];
__shared__ int shared_B_coal[BLOCK_SIZE*BLOCK_SIZE];
__shared__ int shared_A_tile[TILE_SIZE][TILE_SIZE];
__shared__ int shared_B_tile[TILE_SIZE][TILE_SIZE];
//coalescing
for (i = 0; i< BLOCK_SIZE; i += BLOCK_SIZE) //Blocksize X= Block size Y
{
shared_A_coal[threadIdx.x*BLOCK_SIZE + threadIdx.y + i] = A_gpu[(row+i)*width + col];
shared_B_coal[threadIdx.x*BLOCK_SIZE + threadIdx.y + i] = B_gpu[(row+i)*width + col];
}
__syncthreads();
for (j = 0; j < BLOCK_SIZE; j += BLOCK_SIZE)
{
A_gpu[(row+j)*width + col] = shared_A_coal[threadIdx.x + (threadIdx.y+j)*BLOCK_SIZE];
B_gpu[(row+j)*width + col] = shared_B_coal[threadIdx.x + (threadIdx.y+j)*BLOCK_SIZE];
}
__syncthreads();
//Tiling
row = blockIdx.y * blockDim.y + threadIdx.y;
col = blockIdx.x * blockDim.x + threadIdx.x;
if (row<A_rows && col<B_columns)
{
for (i = 0; i <= A_columns/TILE_SIZE; i++)
{
// Tile both matrix A and B
shared_A_tile[ty][tx] = A_gpu[row * A_columns + i* TILE_SIZE + tx];
shared_B_tile[ty][tx] = B_gpu[(i * TILE_SIZE + ty) * B_columns + col];
__syncthreads();
for (j = 0; j < TILE_SIZE; j++)
{
if (j + (i * TILE_SIZE) < A_columns)
{
sum += (shared_A_tile[ty][j] * shared_B_tile[j][tx]);
}
}
}
C_gpu[row * B_columns + col] = sum;
}
}
void random_ints(int* r, int a, int b);
int main()
{
int *A_cpu, *B_cpu, *C_cpu;
int *A_gpu, *B_gpu, *C_gpu;
//for counting run time
struct timeval start, end;
float timer;
gettimeofday(&start, NULL);
// memcopy A_cpu to A_gpu
//memcopy B_cpu to B_gpu
A_cpu =(int*)malloc(sizeof(int)*M*N);
random_ints(A_cpu,M,N);
B_cpu =(int*)malloc(sizeof(int)*K*N);
random_ints(B_cpu,N,K);
C_cpu =(int*)malloc(sizeof(int)*M*K);
cudaMalloc((void**)&A_gpu, sizeof(int)*M*N);
cudaMalloc((void**)&B_gpu, sizeof(int)*N*K);
cudaMalloc((void**)&C_gpu, sizeof(int)*M*K);
cudaMemcpy(A_gpu, A_cpu, sizeof(int)*M*N, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B_cpu, sizeof(int)*N*K, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((M + BLOCK_SIZE - 1) / BLOCK_SIZE, (K + BLOCK_SIZE - 1) / BLOCK_SIZE);
matrixMul_coalesced<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,M,N,K); //C matrix: K(width) x M(height)
//memcopy C_gpu to C_cpu
cudaMemcpy(C_cpu, C_gpu, sizeof(int)*M*K, cudaMemcpyDeviceToHost);
free(A_cpu);
free(B_cpu);
free(C_cpu);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
timer = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("Number of loop is: %dx%dx%d.\nRunning time is: %f ms\n", M,N,K,timer/1000);
return 0;
}
void random_ints(int* r, int a, int b)
{ srand(time(0));
int i,j;
for (i = 0; i < a; ++i) {
for (j = 0; j < b; ++j) {
r[i * b + j] = rand() % 100;
}
}
}
|
95b67f8a1c169a8f9520151f233da317d1161142.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5000;
int some = 0;
int i = 0;
//const int a[arraySize] = { 1, 2, 3, 4, 5 };
//const int b[arraySize] = { 10, 20, 30, 40, 50 };
int a[arraySize];
int b[arraySize];
for (i=0 ;i < arraySize; i++){
b[i] = 0;
}
for (i=0 ;i < arraySize; i++){
a[i] = ++some;
}
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
for (i=0 ;i < arraySize; i++){
printf("%d ", c[i]);
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(5), dim3(size/5), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 95b67f8a1c169a8f9520151f233da317d1161142.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5000;
int some = 0;
int i = 0;
//const int a[arraySize] = { 1, 2, 3, 4, 5 };
//const int b[arraySize] = { 10, 20, 30, 40, 50 };
int a[arraySize];
int b[arraySize];
for (i=0 ;i < arraySize; i++){
b[i] = 0;
}
for (i=0 ;i < arraySize; i++){
a[i] = ++some;
}
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
for (i=0 ;i < arraySize; i++){
printf("%d ", c[i]);
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<5, size/5>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
5123f5600a81b090c13b9d094d9b2226ce468a43.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <float.h>
#include <string.h>
#include <vector>
#include <string>
#include <cmath>
#include <chrono>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "omp.h"
#include "mpi.h"
using namespace std;
#define CUDA_ERROR(err) { \
if (err != hipSuccess) { \
fprintf(stderr, "ERROR: CUDA failed in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(err)); \
return(1); \
} \
} \
typedef unsigned char uchar;
struct vector_cords {
double x;
double y;
double z;
};
struct polygon {
vector_cords p1;
vector_cords p2;
vector_cords p3;
vector_cords color;
};
__host__ __device__ vector_cords operator + (vector_cords v1, vector_cords v2) {
return vector_cords{v1.x + v2.x,
v1.y + v2.y,
v1.z + v2.z};
}
__host__ __device__ vector_cords operator - (vector_cords v1, vector_cords v2) {
return vector_cords{v1.x - v2.x,
v1.y - v2.y,
v1.z - v2.z};
}
__host__ __device__ vector_cords operator * (vector_cords v, double num) {
return vector_cords{v.x * num,
v.y * num,
v.z * num};
}
__host__ __device__ double scal_mul(vector_cords v1, vector_cords v2) {
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z;
}
__host__ __device__ double len(vector_cords v) {
return sqrt(scal_mul(v, v));
}
__host__ __device__ vector_cords norm(vector_cords v) {
double num = len(v);
return vector_cords{v.x / num,
v.y / num,
v.z / num};
}
__host__ __device__ vector_cords crossing(vector_cords v1, vector_cords v2) {
return {v1.y * v2.z - v1.z * v2.y,
v1.z * v2.x - v1.x * v2.z,
v1.x * v2.y - v1.y * v2.x};
}
__host__ __device__ vector_cords multiply(vector_cords a, vector_cords b, vector_cords c, vector_cords v) {
return { a.x * v.x + b.x * v.y + c.x * v.z,
a.y * v.x + b.y * v.y + c.y * v.z,
a.z * v.x + b.z * v.y + c.z * v.z };
}
vector_cords normalise_color(vector_cords color) {
return {color.x * 255.,
color.y * 255.,
color.z * 255.};
}
__host__ __device__ uchar4 ray_aux(vector_cords pos, vector_cords dir, vector_cords light_pos,
vector_cords light_color, polygon *polygons, int n) {
int min_value = -1;
double ts_min;
for (int i = 0; i < n; ++i) {
vector_cords e1 = polygons[i].p2 - polygons[i].p1;
vector_cords e2 = polygons[i].p3 - polygons[i].p1;
vector_cords p = crossing(dir, e2);
double div = scal_mul(p, e1);
if (fabs(div) < 1e-10)
continue;
vector_cords t = pos - polygons[i].p1;
double u = scal_mul(p, t) / div;
if (u < 0.0 || u > 1.0)
continue;
vector_cords q = crossing(t, e1);
double v = scal_mul(q, dir) / div;
if (v < 0.0 || v + u > 1.0)
continue;
double ts = scal_mul(q, e2) / div;
if (ts < 0.0)
continue;
if (min_value == -1 || ts < ts_min) {
min_value = i;
ts_min = ts;
}
}
if (min_value == -1)
return {0, 0, 0, 0};
pos = dir * ts_min + pos;
dir = light_pos - pos;
double length = len(dir);
dir = norm(dir);
for (int i = 0; i < n; i++) {
vector_cords e1 = polygons[i].p2 - polygons[i].p1;
vector_cords e2 = polygons[i].p3 - polygons[i].p1;
vector_cords p = crossing(dir, e2);
double div = scal_mul(p, e1);
if (fabs(div) < 1e-10)
continue;
vector_cords t = pos - polygons[i].p1;
double u = scal_mul(p, t) / div;
if (u < 0.0 || u > 1.0)
continue;
vector_cords q = crossing(t, e1);
double v = scal_mul(q, dir) / div;
if (v < 0.0 || v + u > 1.0)
continue;
double ts = scal_mul(q, e2) / div;
if (ts > 0.0 && ts < length && i != min_value) {
return {0, 0, 0, 0};
}
}
uchar4 color_min;
color_min.x = polygons[min_value].color.x;
color_min.y = polygons[min_value].color.y;
color_min.z = polygons[min_value].color.z;
color_min.x *= light_color.x;
color_min.y *= light_color.y;
color_min.z *= light_color.z;
color_min.w = 0;
return color_min;
}
void render_cpu(vector_cords p_c, vector_cords p_v, int w, int h, double fov, uchar4* pixels, vector_cords light_pos,
vector_cords light_col, polygon* polygons, int n) {
double dw = (double)2.0 / (double)(w - 1.0);
double dh = (double)2.0 / (double)(h - 1.0);
double z = 1.0 / tan(fov * M_PI / 360.0);
vector_cords b_z = norm(p_v - p_c);
vector_cords b_x = norm(crossing(b_z, {0.0, 0.0, 1.0}));
vector_cords b_y = norm(crossing(b_x, b_z));
#pragma omp parallel
for (int i = 0; i < w; i++)
for (int j = 0; j < h; j++) {
vector_cords v;
v.x = (double)-1.0 + dw * (double)i;
v.y = ((double)-1.0 + dh * (double)j) * (double)h / (double)w;
v.z = z;
vector_cords dir = multiply(b_x, b_y, b_z, v);
pixels[(h - 1 - j) * w + i] = ray_aux(p_c, norm(dir), light_pos, light_col, polygons, n);
}
}
__global__ void render_gpu(vector_cords p_c, vector_cords p_v, int w, int h, double fov, uchar4* pixels,
vector_cords light_pos, vector_cords light_col, polygon* polygons, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
double dw = (double)2.0 / (double)(w - 1.0);
double dh = (double)2.0 / (double)(h - 1.0);
double z = 1.0 / tan(fov * M_PI / 360.0);
vector_cords b_z = norm(p_v - p_c);
vector_cords b_x = norm(crossing(b_z, {0.0, 0.0, 1.0}));
vector_cords b_y = norm(crossing(b_x, b_z));
for (int i = idx; i < w; i += offsetX)
for (int j = idy; j < h; j += offsetY) {
vector_cords v;
v.x = (double)-1.0 + dw * (double)i;
v.y = ((double)-1.0 + dh * (double)j) * (double)h / (double)w;
v.z = z;
vector_cords dir = multiply(b_x, b_y, b_z, v);
pixels[(h - 1 - j) * w + i] = ray_aux(p_c, norm(dir), light_pos, light_col, polygons, n);
}
}
void ssaa_cpu(uchar4 *pixels, int w, int h, int coeff, uchar4 *ssaa_pixels) {
#pragma omp parallel
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int4 mid_pixel = { 0, 0, 0, 0 };
for (int j = 0; j < coeff; j++) {
for (int i = 0; i < coeff; i++) {
int index = y * w * coeff * coeff + x * coeff + j * w * coeff + i;
mid_pixel.x += ssaa_pixels[index].x;
mid_pixel.y += ssaa_pixels[index].y;
mid_pixel.z += ssaa_pixels[index].z;
mid_pixel.w += 0;
}
}
pixels[y * w + x].x = (uchar)(int)(mid_pixel.x / (coeff * coeff));
pixels[y * w + x].y = (uchar)(int)(mid_pixel.y / (coeff * coeff));
pixels[y * w + x].z = (uchar)(int)(mid_pixel.z / (coeff * coeff));
pixels[y * w + x].w = 0;
}
}
}
__global__ void ssaa_gpu(uchar4 *pixels, int w, int h, int coeff, uchar4 *ssaa_pixels) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
for (int y = idy; y < h; y += offsetY) {
for (int x = idx; x < w; x += offsetX) {
int4 mid = { 0, 0, 0, 0 };
for (int j = 0; j < coeff; j++) {
for (int i = 0; i < coeff; i++) {
int index = y * w * coeff * coeff + x * coeff + j * w * coeff + i;
mid.x += ssaa_pixels[index].x;
mid.y += ssaa_pixels[index].y;
mid.z += ssaa_pixels[index].z;
mid.w += 0;
}
}
pixels[y * w + x].x = (uchar)(mid.x / (coeff * coeff));
pixels[y * w + x].y = (uchar)(mid.y / (coeff * coeff));
pixels[y * w + x].z = (uchar)(mid.z / (coeff * coeff));
pixels[y * w + x].w = 0;
}
}
}
void cube(vector_cords center, double r, vector_cords color, vector<polygon> &polygons) {
// cout << "Creating cube\n";
color = normalise_color(color);
// Create all vertices
vector<vector_cords> vertices(8);
vector_cords point_a {-1 / sqrt(3), -1 / sqrt(3), -1 / sqrt(3)};
vector_cords point_b {-1 / sqrt(3), -1 / sqrt(3), 1 / sqrt(3)};
vector_cords point_c {-1 / sqrt(3), 1 / sqrt(3), -1 / sqrt(3)};
vector_cords point_d {-1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3)};
vector_cords point_e {1 / sqrt(3), -1 / sqrt(3), -1 / sqrt(3)};
vector_cords point_f {1 / sqrt(3), -1 / sqrt(3), 1 / sqrt(3)};
vector_cords point_g {1 / sqrt(3), 1 / sqrt(3), -1 / sqrt(3)};
vector_cords point_h {1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3)};
// 6 sides means 12 polygons of triangles
// Create with shifting
polygons.push_back({point_a * r + center, point_b * r + center, point_d * r + center, color});
polygons.push_back({point_a * r + center, point_c * r + center, point_d * r + center, color});
polygons.push_back({point_b * r + center, point_f * r + center, point_h * r + center, color});
polygons.push_back({point_b * r + center, point_d * r + center, point_h * r + center, color});
polygons.push_back({point_e * r + center, point_f * r + center, point_h * r + center, color});
polygons.push_back({point_e * r + center, point_g * r + center, point_h * r + center, color});
polygons.push_back({point_a * r + center, point_e * r + center, point_g * r + center, color});
polygons.push_back({point_a * r + center, point_c * r + center, point_g * r + center, color});
polygons.push_back({point_a * r + center, point_b * r + center, point_f * r + center, color});
polygons.push_back({point_a * r + center, point_e * r + center, point_f * r + center, color});
polygons.push_back({point_c * r + center, point_d * r + center, point_h * r + center, color});
polygons.push_back({point_c * r + center, point_g * r + center, point_h * r + center, color});
}
void octahedron(vector_cords center, double r, vector_cords color, vector<polygon> &polygons) {
// cout << "Creating octahedron\n";
color = normalise_color(color);
// Start from fixed points and shift after
vector<vector_cords> vertices {{1, 0, 0,},
{-1, 0, 0},
{0, 1, 0,},
{0, -1, 0,},
{0, 0, 1,},
{0, 0, -1 }
};
// 8 sides
vector<vector<int>> order{{5, 2, 0,},
{5, 0, 3,},
{5, 3, 1,},
{5, 1, 2,},
{4, 3, 0,},
{4, 1, 3,},
{4, 2, 1,},
{4, 0, 2}
};
// Shifting
for(int i = 0; i < 6; i++)
vertices[i] = vertices[i] * r + center;
// 8 polygons (they are triangles)
for(int i = 0; i < 8; i++)
polygons.push_back({vertices[order[i][0]], vertices[order[i][1]], vertices[order[i][2]], color});
}
void dodecahedron(vector_cords center, double r, vector_cords color, vector<polygon> &polygons) {
// cout << "Creating dodecahedron\n";
color = normalise_color(color);
double a = (1 + sqrt(5)) / 2;
double b = 2 / (1 + sqrt(5));
// 20 vertices and 12 * 3 polygons (because pentagon == 3 triangles)
vector<vector_cords> vertices {{-b, 0, a} ,
{ b, 0, a} ,
{-1, 1, 1} ,
{ 1, 1, 1} ,
{ 1, -1, 1} ,
{-1, -1, 1} ,
{ 0, -a, b} ,
{ 0, a, b} ,
{-a, -b, 0} ,
{-a, b, 0} ,
{ a, b, 0} ,
{ a, -b, 0} ,
{ 0, -a, -b} ,
{ 0, a, -b} ,
{ 1, 1, -1} ,
{ 1, -1, -1} ,
{-1, -1, -1} ,
{-1, 1, -1} ,
{ b, 0, -a} ,
{-b, 0, -a}
};
for (auto &j: vertices) {
j.x /= sqrt(3);
j.y /= sqrt(3);
j.z /= sqrt(3);
}
// Shifting
for (auto &j: vertices) {
j.x = j.x * r + center.x;
j.y = j.y * r + center.y;
j.z = j.z * r + center.z;
}
// Applying 36 polygons
polygons.push_back({vertices[4], vertices[0], vertices[6], color});
polygons.push_back({vertices[0], vertices[5], vertices[6], color});
polygons.push_back({vertices[0], vertices[4], vertices[1], color});
polygons.push_back({vertices[0], vertices[3], vertices[7], color});
polygons.push_back({vertices[2], vertices[0], vertices[7], color});
polygons.push_back({vertices[0], vertices[1], vertices[3], color});
polygons.push_back({vertices[10], vertices[1], vertices[11], color});
polygons.push_back({vertices[3], vertices[1], vertices[10], color});
polygons.push_back({vertices[1], vertices[4], vertices[11], color});
polygons.push_back({vertices[5], vertices[0], vertices[8], color});
polygons.push_back({vertices[0], vertices[2], vertices[9], color});
polygons.push_back({vertices[8], vertices[0], vertices[9], color});
polygons.push_back({vertices[5], vertices[8], vertices[16], color});
polygons.push_back({vertices[6], vertices[5], vertices[12], color});
polygons.push_back({vertices[12], vertices[5], vertices[16], color});
polygons.push_back({vertices[4], vertices[12], vertices[15], color});
polygons.push_back({vertices[4], vertices[6], vertices[12], color});
polygons.push_back({vertices[11], vertices[4], vertices[15], color});
polygons.push_back({vertices[2], vertices[13], vertices[17], color});
polygons.push_back({vertices[2], vertices[7], vertices[13], color});
polygons.push_back({vertices[9], vertices[2], vertices[17], color});
polygons.push_back({vertices[13], vertices[3], vertices[14], color});
polygons.push_back({vertices[7], vertices[3], vertices[13], color});
polygons.push_back({vertices[3], vertices[10], vertices[14], color});
polygons.push_back({vertices[8], vertices[17], vertices[19], color});
polygons.push_back({vertices[16], vertices[8], vertices[19], color});
polygons.push_back({vertices[8], vertices[9], vertices[17], color});
polygons.push_back({vertices[14], vertices[11], vertices[18], color});
polygons.push_back({vertices[11], vertices[15], vertices[18], color});
polygons.push_back({vertices[10], vertices[11], vertices[14], color});
polygons.push_back({vertices[12], vertices[19], vertices[18], color});
polygons.push_back({vertices[15], vertices[12], vertices[18], color});
polygons.push_back({vertices[12], vertices[16], vertices[19], color});
polygons.push_back({vertices[19], vertices[13], vertices[18], color});
polygons.push_back({vertices[17], vertices[13], vertices[19], color});
polygons.push_back({vertices[13], vertices[14], vertices[18], color});
}
void scene(vector_cords a, vector_cords b, vector_cords c, vector_cords d, vector_cords color,
vector<polygon> &polygons) {
// cout << "Creating scene\n";
color = normalise_color(color);
polygons.push_back(polygon{a, b, c, color});
polygons.push_back(polygon{c, d, a, color});
}
int cpu_mode(vector_cords p_c, vector_cords p_v, int w, int ssaa_w, int h, int ssaa_h, double fov, uchar4* pixels,
uchar4* pixels_ssaa, vector_cords light_pos, vector_cords light_col, polygon* polygons, int n, int ssaa_multiplier) {
render_cpu(p_c, p_v, ssaa_w, ssaa_h, fov, pixels_ssaa, light_pos, light_col, polygons, n);
ssaa_cpu(pixels, w, h, ssaa_multiplier, pixels_ssaa);
return 0;
}
int gpu_mode(vector_cords p_c, vector_cords p_v, int w, int ssaa_w, int h, int ssaa_h, double fov, uchar4* pixels,
uchar4* pixels_ssaa, vector_cords light_pos, vector_cords light_col, polygon* polygons, int n, int ssaa_multiplier) {
// cerr << "Allocate pixels\n";
// Allocating on gpu
uchar4* gpu_pixels;
CUDA_ERROR(hipMalloc((uchar4**)(&gpu_pixels), w * h * sizeof(uchar4)));
CUDA_ERROR(hipMemcpy(gpu_pixels, pixels, w * h * sizeof(uchar4), hipMemcpyHostToDevice));
// cerr << "Allocate ssaa pixels\n";
uchar4* gpu_pixels_ssaa;
CUDA_ERROR(hipMalloc((uchar4**)(&gpu_pixels_ssaa), ssaa_w * ssaa_h * sizeof(uchar4)));
CUDA_ERROR(hipMemcpy(gpu_pixels_ssaa, pixels_ssaa, ssaa_w * ssaa_h * sizeof(uchar4), hipMemcpyHostToDevice));
// cerr << "Allocate polygons\n";
polygon* gpu_polygons;
CUDA_ERROR(hipMalloc((polygon**)(&gpu_polygons), n * sizeof(polygon)));
CUDA_ERROR(hipMemcpy(gpu_polygons, polygons, n * sizeof(polygon), hipMemcpyHostToDevice));
// cerr << "Start render\n";
// Rendering
hipLaunchKernelGGL(( render_gpu) , dim3(128), dim3(128) , 0, 0, p_c, p_v, ssaa_w, ssaa_h, fov, gpu_pixels_ssaa, light_pos, light_col, gpu_polygons, n);
hipDeviceSynchronize();
CUDA_ERROR(hipGetLastError());
// cerr << "Start ssaa\n";
// Ssaa smoothing algo
hipLaunchKernelGGL(( ssaa_gpu) , dim3(128), dim3(128) , 0, 0, gpu_pixels, w, h, ssaa_multiplier, gpu_pixels_ssaa);
hipDeviceSynchronize();
CUDA_ERROR(hipGetLastError());
CUDA_ERROR(hipMemcpy(pixels, gpu_pixels, w * h * sizeof(uchar4), hipMemcpyDeviceToHost));
// Free memory
CUDA_ERROR(hipFree(gpu_pixels));
CUDA_ERROR(hipFree(gpu_pixels_ssaa));
CUDA_ERROR(hipFree(gpu_polygons));
return 0;
}
int main(int argc, char* argv[]) {
string mode;
if (argv[1])
mode = argv[1];
bool is_gpu = true;
if (argc > 2) {
cout << "Incorrect params. Please use '--help' for help\n";
return 0;
}
if (argc == 1 || mode == "--gpu")
is_gpu = true;
if (mode == "--cpu")
is_gpu = false;
if (mode == "--default") {
cout << "100\n"
"./frames_data\n"
"640 480 120\n"
"7.0 3.0 0.0 2.0 1.0 2.0 6.0 1.0 0.0 0.0\n"
"2.0 0.0 0.0 0.5 0.1 1.0 4.0 1.0 0.0 0.0\n"
"4.0 4.0 0.0 1.0 0.0 1.0 2.0 0.0 0.0 0.0\n"
"1.0 1.0 0.0 1.0 1.0 0.0 2.0 0.0 0.0 0.0\n"
"-2.5 -2.5 0.0 0.0 1.0 1.0 2.0 0.0 0.0 0.0\n"
"-10.0 -10.0 -1.0 -10.0 10.0 -1.0 10.0 10.0 -1.0 10.0 -10.0 -1.0 ./folder 0.0 0.9 0.0 0.5\n"
"1\n"
"100 100 100 1.0 1.0 1.0\n"
"1 3\n";
return 0;
}
if (mode == "--help") {
cout << "<--------------- HELP --------------->\n"
"Start program without args will cause computation in gpu mode\n"
"--cpu For computation with using cpu\n"
"--gpu For computation with using gpu\n"
"--default Print best configuration for input data\n"
"--help For help\n"
"<---------------END OF HELP--------------->\n";
return 0;
}
int total_frames, width, height, fov;
string path_to_frames;
double r_0c, z_0c, phi_0c;
double A_rc, A_zc;
double w_rc, w_zc, w_phic;
double p_rc, p_zc;
double r_0v, z_0v, phi_0v;
double A_rv, A_zv;
double w_rv, w_zv, w_phiv;
double p_rv, p_zv;
vector_cords color;
vector_cords cube_center, cube_color;
vector_cords octahedron_center, octahedron_color;
vector_cords dodecahedron_center, dodecahedron_color;
double cube_radius, octahedron_radius, dodecahedron_radius;
string unused;
vector_cords scene_a, scene_b, scene_c, scene_d;
vector_cords light_pos, light_col;
vector<polygon> polygons;
polygon *polygons_as_array;
uchar4 *pixels = nullptr;
uchar4 *pixels_ssaa = nullptr;
int n_lights; // Should be 1 (1 light)
int recursion_step; // Should be 1 (unused)
int ssaa_multiplier;
// MPI variables
char proc_name[MPI_MAX_PROCESSOR_NAME];
int numproc, proc_name_len, id;
// Get properties of device
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
// MPI initialise
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
MPI_Get_processor_name(proc_name, &proc_name_len);
fprintf(stderr, "proc %2d(%d) on %s(%s)\n", id, numproc, proc_name, devProp.name);
fflush(stderr);
// Each process using available video card
// int device_cnt;
// hipGetDeviceCount(&device_cnt);
// hipSetDevice(id % device_cnt);
// Read data only from 0 process
if (id == 0) {
// Frames
cin >> total_frames;
cin >> path_to_frames;
cin >> width >> height >> fov;
// Camera trajectory
cin >> r_0c >> z_0c >> phi_0c;
cin >> A_rc >> A_zc;
cin >> w_rc >> w_zc >> w_phic;
cin >> p_rc >> p_zc;
// cerr << r_0c << " " << z_0c << " " << phi_0c << "\n";
// cerr << A_rc << " " << A_zc << "\n";
// cerr << w_rc << " " << w_zc<< " " << w_phic << "\n";
// cerr << p_rc << " " << p_zc << "\n";
cin >> r_0v >> z_0v >> phi_0v;
cin >> A_rv >> A_zv;
cin >> w_rv >> w_zv >> w_phiv;
cin >> p_rv >> p_zv;
// cerr << r_0v << " " << z_0v << " " << phi_0v << "\n";
// cerr << A_rv << " " << A_zv << "\n";
// cerr << w_rv << " " << w_zv << " " << w_phiv << "\n";
// cerr << p_rv << " " << p_zv << "\n";
// Figures params without creating
cin >> cube_center.x >> cube_center.y >> cube_center.z;
cin >> cube_color.x >> cube_color.y >> cube_color.z;
cin >> cube_radius >> unused >> unused >> unused;
// cube(center, radius, color, polygons);
cin >> octahedron_center.x >> octahedron_center.y >> octahedron_center.z;
cin >> octahedron_color.x >> octahedron_color.y >> octahedron_color.z;
cin >> octahedron_radius >> unused >> unused >> unused;
// octahedron(center, radius, color, polygons);
cin >> dodecahedron_center.x >> dodecahedron_center.y >> dodecahedron_center.z;
cin >> dodecahedron_color.x >> dodecahedron_color.y >> dodecahedron_color.z;
cin >> dodecahedron_radius >> unused >> unused >> unused;
// dodecahedron(center, radius, color, polygons);
// cin >> center.x >> center.y >> center.z >> color.x >> color.y >> color.z >> radius >> unused >> unused
// >> unused;
// cube(center, radius, color, polygons);
// cin >> center.x >> center.y >> center.z >> color.x >> color.y >> color.z >> radius >> unused >> unused
// >> unused;
// octahedron(center, radius, color, polygons);
// cin >> center.x >> center.y >> center.z >> color.x >> color.y >> color.z >> radius >> unused >> unused
// >> unused;
// dodecahedron(center, radius, color, polygons);
// Scene
cin >> scene_a.x >> scene_a.y >> scene_a.z;
cin >> scene_b.x >> scene_b.y >> scene_b.z;
cin >> scene_c.x >> scene_c.y >> scene_c.z;
cin >> scene_d.x >> scene_d.y >> scene_d.z;
cin >> unused;
cin >> color.x >> color.y >> color.z;
cin >> unused;
// scene(scene_a, scene_b, scene_c, scene_d, color, polygons);
// Lights
cin >> n_lights;
cin >> light_pos.x >> light_pos.y >> light_pos.z;
cin >> light_col.x >> light_col.y >> light_col.z;
// Recursion
cin >> recursion_step;
// cerr << recursion_step << "\n";
// SSAA params
cin >> ssaa_multiplier;
// cerr << ssaa_multiplier << "\n";
}
char filename_as_array[256];
int filename_size = path_to_frames.size();
strcpy(filename_as_array, path_to_frames.c_str());
// Send info to all processes
MPI_Bcast(&total_frames, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(filename_as_array, 256, MPI_CHAR, 0, MPI_COMM_WORLD);
MPI_Bcast(&filename_size, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&width, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&height, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&fov, 1, MPI_INT, 0, MPI_COMM_WORLD);
// Camera
MPI_Bcast(&r_0c, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&z_0c, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&phi_0c, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&A_rc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&A_zc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_rc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_zc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_phic, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&p_rc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&p_zc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&r_0v, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&z_0v, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&phi_0v, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&A_rv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&A_zv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_rv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_zv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_phiv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&p_rv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&p_zv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Cube params
MPI_Bcast(&cube_center.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_center.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_center.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_color.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_color.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_color.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Octahedron params
MPI_Bcast(&octahedron_center.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_center.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_center.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_color.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_color.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_color.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Dodecahedron params
MPI_Bcast(&dodecahedron_center.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_center.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_center.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_color.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_color.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_color.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Scene params
MPI_Bcast(&scene_a.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_a.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_a.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_b.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_b.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_b.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_c.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_c.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_c.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_d.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_d.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_d.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&color.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&color.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&color.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Lights
MPI_Bcast(&n_lights, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_pos.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_pos.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_pos.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_col.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_col.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_col.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
int ssaa_width = width * ssaa_multiplier;
int ssaa_height = height * ssaa_multiplier;
// SSAA params
MPI_Bcast(&ssaa_multiplier, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&ssaa_width, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&ssaa_height, 1, MPI_INT, 0, MPI_COMM_WORLD);
pixels = new uchar4[ssaa_width * ssaa_height];
pixels_ssaa = new uchar4[ssaa_width * ssaa_height]; // cpu
MPI_Barrier(MPI_COMM_WORLD);
// Figures and scene creating
cube(cube_center, cube_radius, cube_color, polygons);
octahedron(octahedron_center, octahedron_radius, octahedron_color, polygons);
dodecahedron(dodecahedron_center, dodecahedron_radius, dodecahedron_color, polygons);
scene(scene_a, scene_b, scene_c, scene_d, color, polygons);
polygons_as_array = polygons.data();
int total_polygons = polygons.size();
// for (int i = 0; i < polygons.size(); ++i) {
// cerr << "p1:" << polygons[i].p1.x << " " << polygons[i].p1.y << " " << polygons[i].p1.z << "\n";
// cerr << "p2:" << polygons[i].p2.x << " " << polygons[i].p2.y << " " << polygons[i].p2.z << "\n";
// cerr << "p3:" << polygons[i].p3.x << " " << polygons[i].p3.y << " " << polygons[i].p3.z << "\n";
// cerr << "color:" << polygons[i].color.x << " " << polygons[i].color.y << " " << polygons[i].color.z << "\n";
// }
if (id == 0) {
cout << "Start rendering. Total polygons: " << total_polygons << ". Frame size: " << width << "x" << height;
cout << ". Total frames: " << total_frames << "\n";
cout << "|\tIteration number\t|\t time in ms\t|\ttotal rays |\n";
}
double r_c, z_c, phi_c , r_v, z_v, phi_v;
vector_cords p_c, p_v;
int sum_of_rays;
double total_duration_time = 0;
for (int i = id; i < total_frames; i += numproc) {
auto start = chrono::steady_clock::now();
double time_step = 2.0 * M_PI / total_frames;
double cur_time = i * time_step;
// Movement
r_c = r_0c + A_rc * sin(w_rc * cur_time + p_rc);
z_c = z_0c + A_zc * sin(w_zc * cur_time + p_zc);
phi_c = phi_0c + w_phic * cur_time;
r_v = r_0v + A_rv * sin(w_rv * cur_time + p_rv);
z_v = z_0v + A_zv * sin(w_zv * cur_time + p_zv);
phi_v = phi_0v + w_phiv * cur_time;
p_c = { r_c * cos(phi_c), r_c * sin(phi_c), z_c };
p_v = { r_v * cos(phi_v), r_v * sin(phi_v), z_v };
// Total sum of rays (will be the same coz of recursion)
sum_of_rays = ssaa_width * ssaa_height;
int res;
if (is_gpu)
res = gpu_mode(p_c, p_v, width, ssaa_width, height, ssaa_height, (double)fov, pixels, pixels_ssaa,
light_pos, light_col, polygons_as_array, total_polygons, ssaa_multiplier);
else
res = cpu_mode(p_c, p_v, width, ssaa_width, height, ssaa_height, (double)fov, pixels, pixels_ssaa,
light_pos, light_col, polygons_as_array, total_polygons, ssaa_multiplier);
if (res)
cout << "An error occurred. Check output\n";
auto end = chrono::steady_clock::now();
cout << "|\tIteration " << i + 1 << " of " << total_frames << "\t|\t";
double iteration_time = ((double)chrono::duration_cast<chrono::microseconds>(end - start).count()) / 1000.0;
total_duration_time += iteration_time;
cout << iteration_time << "ms\t|\t";
cout << sum_of_rays << "\t|\n";
string frame_name = string(filename_as_array);
frame_name += "/" + to_string(i) + ".data";
FILE* f = fopen(frame_name.c_str(), "wb");
// fwrite(&ssaa_width, sizeof(int), 1, f);
// fwrite(&ssaa_height, sizeof(int), 1, f);
// fwrite(pixels_ssaa, sizeof(uchar4), ssaa_width * ssaa_height, f);
fwrite(&width, sizeof(int), 1, f);
fwrite(&height, sizeof(int), 1, f);
fwrite(pixels, sizeof(uchar4), width * height, f);
fclose(f);
}
if (pixels)
delete[] pixels;
if (pixels_ssaa)
delete[] pixels_ssaa;
if (id == 0)
cout << "Done with total duration: " << total_duration_time << "ms\n";
MPI_Finalize();
return 0;
} | 5123f5600a81b090c13b9d094d9b2226ce468a43.cu | #include <iostream>
#include <stdio.h>
#include <float.h>
#include <string.h>
#include <vector>
#include <string>
#include <cmath>
#include <chrono>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "omp.h"
#include "mpi.h"
using namespace std;
#define CUDA_ERROR(err) { \
if (err != cudaSuccess) { \
fprintf(stderr, "ERROR: CUDA failed in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \
return(1); \
} \
} \
typedef unsigned char uchar;
struct vector_cords {
double x;
double y;
double z;
};
struct polygon {
vector_cords p1;
vector_cords p2;
vector_cords p3;
vector_cords color;
};
__host__ __device__ vector_cords operator + (vector_cords v1, vector_cords v2) {
return vector_cords{v1.x + v2.x,
v1.y + v2.y,
v1.z + v2.z};
}
__host__ __device__ vector_cords operator - (vector_cords v1, vector_cords v2) {
return vector_cords{v1.x - v2.x,
v1.y - v2.y,
v1.z - v2.z};
}
__host__ __device__ vector_cords operator * (vector_cords v, double num) {
return vector_cords{v.x * num,
v.y * num,
v.z * num};
}
__host__ __device__ double scal_mul(vector_cords v1, vector_cords v2) {
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z;
}
__host__ __device__ double len(vector_cords v) {
return sqrt(scal_mul(v, v));
}
__host__ __device__ vector_cords norm(vector_cords v) {
double num = len(v);
return vector_cords{v.x / num,
v.y / num,
v.z / num};
}
__host__ __device__ vector_cords crossing(vector_cords v1, vector_cords v2) {
return {v1.y * v2.z - v1.z * v2.y,
v1.z * v2.x - v1.x * v2.z,
v1.x * v2.y - v1.y * v2.x};
}
__host__ __device__ vector_cords multiply(vector_cords a, vector_cords b, vector_cords c, vector_cords v) {
return { a.x * v.x + b.x * v.y + c.x * v.z,
a.y * v.x + b.y * v.y + c.y * v.z,
a.z * v.x + b.z * v.y + c.z * v.z };
}
vector_cords normalise_color(vector_cords color) {
return {color.x * 255.,
color.y * 255.,
color.z * 255.};
}
__host__ __device__ uchar4 ray_aux(vector_cords pos, vector_cords dir, vector_cords light_pos,
vector_cords light_color, polygon *polygons, int n) {
int min_value = -1;
double ts_min;
for (int i = 0; i < n; ++i) {
vector_cords e1 = polygons[i].p2 - polygons[i].p1;
vector_cords e2 = polygons[i].p3 - polygons[i].p1;
vector_cords p = crossing(dir, e2);
double div = scal_mul(p, e1);
if (fabs(div) < 1e-10)
continue;
vector_cords t = pos - polygons[i].p1;
double u = scal_mul(p, t) / div;
if (u < 0.0 || u > 1.0)
continue;
vector_cords q = crossing(t, e1);
double v = scal_mul(q, dir) / div;
if (v < 0.0 || v + u > 1.0)
continue;
double ts = scal_mul(q, e2) / div;
if (ts < 0.0)
continue;
if (min_value == -1 || ts < ts_min) {
min_value = i;
ts_min = ts;
}
}
if (min_value == -1)
return {0, 0, 0, 0};
pos = dir * ts_min + pos;
dir = light_pos - pos;
double length = len(dir);
dir = norm(dir);
for (int i = 0; i < n; i++) {
vector_cords e1 = polygons[i].p2 - polygons[i].p1;
vector_cords e2 = polygons[i].p3 - polygons[i].p1;
vector_cords p = crossing(dir, e2);
double div = scal_mul(p, e1);
if (fabs(div) < 1e-10)
continue;
vector_cords t = pos - polygons[i].p1;
double u = scal_mul(p, t) / div;
if (u < 0.0 || u > 1.0)
continue;
vector_cords q = crossing(t, e1);
double v = scal_mul(q, dir) / div;
if (v < 0.0 || v + u > 1.0)
continue;
double ts = scal_mul(q, e2) / div;
if (ts > 0.0 && ts < length && i != min_value) {
return {0, 0, 0, 0};
}
}
uchar4 color_min;
color_min.x = polygons[min_value].color.x;
color_min.y = polygons[min_value].color.y;
color_min.z = polygons[min_value].color.z;
color_min.x *= light_color.x;
color_min.y *= light_color.y;
color_min.z *= light_color.z;
color_min.w = 0;
return color_min;
}
void render_cpu(vector_cords p_c, vector_cords p_v, int w, int h, double fov, uchar4* pixels, vector_cords light_pos,
vector_cords light_col, polygon* polygons, int n) {
double dw = (double)2.0 / (double)(w - 1.0);
double dh = (double)2.0 / (double)(h - 1.0);
double z = 1.0 / tan(fov * M_PI / 360.0);
vector_cords b_z = norm(p_v - p_c);
vector_cords b_x = norm(crossing(b_z, {0.0, 0.0, 1.0}));
vector_cords b_y = norm(crossing(b_x, b_z));
#pragma omp parallel
for (int i = 0; i < w; i++)
for (int j = 0; j < h; j++) {
vector_cords v;
v.x = (double)-1.0 + dw * (double)i;
v.y = ((double)-1.0 + dh * (double)j) * (double)h / (double)w;
v.z = z;
vector_cords dir = multiply(b_x, b_y, b_z, v);
pixels[(h - 1 - j) * w + i] = ray_aux(p_c, norm(dir), light_pos, light_col, polygons, n);
}
}
__global__ void render_gpu(vector_cords p_c, vector_cords p_v, int w, int h, double fov, uchar4* pixels,
vector_cords light_pos, vector_cords light_col, polygon* polygons, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
double dw = (double)2.0 / (double)(w - 1.0);
double dh = (double)2.0 / (double)(h - 1.0);
double z = 1.0 / tan(fov * M_PI / 360.0);
vector_cords b_z = norm(p_v - p_c);
vector_cords b_x = norm(crossing(b_z, {0.0, 0.0, 1.0}));
vector_cords b_y = norm(crossing(b_x, b_z));
for (int i = idx; i < w; i += offsetX)
for (int j = idy; j < h; j += offsetY) {
vector_cords v;
v.x = (double)-1.0 + dw * (double)i;
v.y = ((double)-1.0 + dh * (double)j) * (double)h / (double)w;
v.z = z;
vector_cords dir = multiply(b_x, b_y, b_z, v);
pixels[(h - 1 - j) * w + i] = ray_aux(p_c, norm(dir), light_pos, light_col, polygons, n);
}
}
void ssaa_cpu(uchar4 *pixels, int w, int h, int coeff, uchar4 *ssaa_pixels) {
#pragma omp parallel
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int4 mid_pixel = { 0, 0, 0, 0 };
for (int j = 0; j < coeff; j++) {
for (int i = 0; i < coeff; i++) {
int index = y * w * coeff * coeff + x * coeff + j * w * coeff + i;
mid_pixel.x += ssaa_pixels[index].x;
mid_pixel.y += ssaa_pixels[index].y;
mid_pixel.z += ssaa_pixels[index].z;
mid_pixel.w += 0;
}
}
pixels[y * w + x].x = (uchar)(int)(mid_pixel.x / (coeff * coeff));
pixels[y * w + x].y = (uchar)(int)(mid_pixel.y / (coeff * coeff));
pixels[y * w + x].z = (uchar)(int)(mid_pixel.z / (coeff * coeff));
pixels[y * w + x].w = 0;
}
}
}
__global__ void ssaa_gpu(uchar4 *pixels, int w, int h, int coeff, uchar4 *ssaa_pixels) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offsetX = blockDim.x * gridDim.x;
int offsetY = blockDim.y * gridDim.y;
for (int y = idy; y < h; y += offsetY) {
for (int x = idx; x < w; x += offsetX) {
int4 mid = { 0, 0, 0, 0 };
for (int j = 0; j < coeff; j++) {
for (int i = 0; i < coeff; i++) {
int index = y * w * coeff * coeff + x * coeff + j * w * coeff + i;
mid.x += ssaa_pixels[index].x;
mid.y += ssaa_pixels[index].y;
mid.z += ssaa_pixels[index].z;
mid.w += 0;
}
}
pixels[y * w + x].x = (uchar)(mid.x / (coeff * coeff));
pixels[y * w + x].y = (uchar)(mid.y / (coeff * coeff));
pixels[y * w + x].z = (uchar)(mid.z / (coeff * coeff));
pixels[y * w + x].w = 0;
}
}
}
void cube(vector_cords center, double r, vector_cords color, vector<polygon> &polygons) {
// cout << "Creating cube\n";
color = normalise_color(color);
// Create all vertices
vector<vector_cords> vertices(8);
vector_cords point_a {-1 / sqrt(3), -1 / sqrt(3), -1 / sqrt(3)};
vector_cords point_b {-1 / sqrt(3), -1 / sqrt(3), 1 / sqrt(3)};
vector_cords point_c {-1 / sqrt(3), 1 / sqrt(3), -1 / sqrt(3)};
vector_cords point_d {-1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3)};
vector_cords point_e {1 / sqrt(3), -1 / sqrt(3), -1 / sqrt(3)};
vector_cords point_f {1 / sqrt(3), -1 / sqrt(3), 1 / sqrt(3)};
vector_cords point_g {1 / sqrt(3), 1 / sqrt(3), -1 / sqrt(3)};
vector_cords point_h {1 / sqrt(3), 1 / sqrt(3), 1 / sqrt(3)};
// 6 sides means 12 polygons of triangles
// Create with shifting
polygons.push_back({point_a * r + center, point_b * r + center, point_d * r + center, color});
polygons.push_back({point_a * r + center, point_c * r + center, point_d * r + center, color});
polygons.push_back({point_b * r + center, point_f * r + center, point_h * r + center, color});
polygons.push_back({point_b * r + center, point_d * r + center, point_h * r + center, color});
polygons.push_back({point_e * r + center, point_f * r + center, point_h * r + center, color});
polygons.push_back({point_e * r + center, point_g * r + center, point_h * r + center, color});
polygons.push_back({point_a * r + center, point_e * r + center, point_g * r + center, color});
polygons.push_back({point_a * r + center, point_c * r + center, point_g * r + center, color});
polygons.push_back({point_a * r + center, point_b * r + center, point_f * r + center, color});
polygons.push_back({point_a * r + center, point_e * r + center, point_f * r + center, color});
polygons.push_back({point_c * r + center, point_d * r + center, point_h * r + center, color});
polygons.push_back({point_c * r + center, point_g * r + center, point_h * r + center, color});
}
void octahedron(vector_cords center, double r, vector_cords color, vector<polygon> &polygons) {
// cout << "Creating octahedron\n";
color = normalise_color(color);
// Start from fixed points and shift after
vector<vector_cords> vertices {{1, 0, 0,},
{-1, 0, 0},
{0, 1, 0,},
{0, -1, 0,},
{0, 0, 1,},
{0, 0, -1 }
};
// 8 sides
vector<vector<int>> order{{5, 2, 0,},
{5, 0, 3,},
{5, 3, 1,},
{5, 1, 2,},
{4, 3, 0,},
{4, 1, 3,},
{4, 2, 1,},
{4, 0, 2}
};
// Shifting
for(int i = 0; i < 6; i++)
vertices[i] = vertices[i] * r + center;
// 8 polygons (they are triangles)
for(int i = 0; i < 8; i++)
polygons.push_back({vertices[order[i][0]], vertices[order[i][1]], vertices[order[i][2]], color});
}
void dodecahedron(vector_cords center, double r, vector_cords color, vector<polygon> &polygons) {
// cout << "Creating dodecahedron\n";
color = normalise_color(color);
double a = (1 + sqrt(5)) / 2;
double b = 2 / (1 + sqrt(5));
// 20 vertices and 12 * 3 polygons (because pentagon == 3 triangles)
vector<vector_cords> vertices {{-b, 0, a} ,
{ b, 0, a} ,
{-1, 1, 1} ,
{ 1, 1, 1} ,
{ 1, -1, 1} ,
{-1, -1, 1} ,
{ 0, -a, b} ,
{ 0, a, b} ,
{-a, -b, 0} ,
{-a, b, 0} ,
{ a, b, 0} ,
{ a, -b, 0} ,
{ 0, -a, -b} ,
{ 0, a, -b} ,
{ 1, 1, -1} ,
{ 1, -1, -1} ,
{-1, -1, -1} ,
{-1, 1, -1} ,
{ b, 0, -a} ,
{-b, 0, -a}
};
for (auto &j: vertices) {
j.x /= sqrt(3);
j.y /= sqrt(3);
j.z /= sqrt(3);
}
// Shifting
for (auto &j: vertices) {
j.x = j.x * r + center.x;
j.y = j.y * r + center.y;
j.z = j.z * r + center.z;
}
// Applying 36 polygons
polygons.push_back({vertices[4], vertices[0], vertices[6], color});
polygons.push_back({vertices[0], vertices[5], vertices[6], color});
polygons.push_back({vertices[0], vertices[4], vertices[1], color});
polygons.push_back({vertices[0], vertices[3], vertices[7], color});
polygons.push_back({vertices[2], vertices[0], vertices[7], color});
polygons.push_back({vertices[0], vertices[1], vertices[3], color});
polygons.push_back({vertices[10], vertices[1], vertices[11], color});
polygons.push_back({vertices[3], vertices[1], vertices[10], color});
polygons.push_back({vertices[1], vertices[4], vertices[11], color});
polygons.push_back({vertices[5], vertices[0], vertices[8], color});
polygons.push_back({vertices[0], vertices[2], vertices[9], color});
polygons.push_back({vertices[8], vertices[0], vertices[9], color});
polygons.push_back({vertices[5], vertices[8], vertices[16], color});
polygons.push_back({vertices[6], vertices[5], vertices[12], color});
polygons.push_back({vertices[12], vertices[5], vertices[16], color});
polygons.push_back({vertices[4], vertices[12], vertices[15], color});
polygons.push_back({vertices[4], vertices[6], vertices[12], color});
polygons.push_back({vertices[11], vertices[4], vertices[15], color});
polygons.push_back({vertices[2], vertices[13], vertices[17], color});
polygons.push_back({vertices[2], vertices[7], vertices[13], color});
polygons.push_back({vertices[9], vertices[2], vertices[17], color});
polygons.push_back({vertices[13], vertices[3], vertices[14], color});
polygons.push_back({vertices[7], vertices[3], vertices[13], color});
polygons.push_back({vertices[3], vertices[10], vertices[14], color});
polygons.push_back({vertices[8], vertices[17], vertices[19], color});
polygons.push_back({vertices[16], vertices[8], vertices[19], color});
polygons.push_back({vertices[8], vertices[9], vertices[17], color});
polygons.push_back({vertices[14], vertices[11], vertices[18], color});
polygons.push_back({vertices[11], vertices[15], vertices[18], color});
polygons.push_back({vertices[10], vertices[11], vertices[14], color});
polygons.push_back({vertices[12], vertices[19], vertices[18], color});
polygons.push_back({vertices[15], vertices[12], vertices[18], color});
polygons.push_back({vertices[12], vertices[16], vertices[19], color});
polygons.push_back({vertices[19], vertices[13], vertices[18], color});
polygons.push_back({vertices[17], vertices[13], vertices[19], color});
polygons.push_back({vertices[13], vertices[14], vertices[18], color});
}
void scene(vector_cords a, vector_cords b, vector_cords c, vector_cords d, vector_cords color,
vector<polygon> &polygons) {
// cout << "Creating scene\n";
color = normalise_color(color);
polygons.push_back(polygon{a, b, c, color});
polygons.push_back(polygon{c, d, a, color});
}
int cpu_mode(vector_cords p_c, vector_cords p_v, int w, int ssaa_w, int h, int ssaa_h, double fov, uchar4* pixels,
uchar4* pixels_ssaa, vector_cords light_pos, vector_cords light_col, polygon* polygons, int n, int ssaa_multiplier) {
render_cpu(p_c, p_v, ssaa_w, ssaa_h, fov, pixels_ssaa, light_pos, light_col, polygons, n);
ssaa_cpu(pixels, w, h, ssaa_multiplier, pixels_ssaa);
return 0;
}
int gpu_mode(vector_cords p_c, vector_cords p_v, int w, int ssaa_w, int h, int ssaa_h, double fov, uchar4* pixels,
uchar4* pixels_ssaa, vector_cords light_pos, vector_cords light_col, polygon* polygons, int n, int ssaa_multiplier) {
// cerr << "Allocate pixels\n";
// Allocating on gpu
uchar4* gpu_pixels;
CUDA_ERROR(cudaMalloc((uchar4**)(&gpu_pixels), w * h * sizeof(uchar4)));
CUDA_ERROR(cudaMemcpy(gpu_pixels, pixels, w * h * sizeof(uchar4), cudaMemcpyHostToDevice));
// cerr << "Allocate ssaa pixels\n";
uchar4* gpu_pixels_ssaa;
CUDA_ERROR(cudaMalloc((uchar4**)(&gpu_pixels_ssaa), ssaa_w * ssaa_h * sizeof(uchar4)));
CUDA_ERROR(cudaMemcpy(gpu_pixels_ssaa, pixels_ssaa, ssaa_w * ssaa_h * sizeof(uchar4), cudaMemcpyHostToDevice));
// cerr << "Allocate polygons\n";
polygon* gpu_polygons;
CUDA_ERROR(cudaMalloc((polygon**)(&gpu_polygons), n * sizeof(polygon)));
CUDA_ERROR(cudaMemcpy(gpu_polygons, polygons, n * sizeof(polygon), cudaMemcpyHostToDevice));
// cerr << "Start render\n";
// Rendering
render_gpu <<< 128, 128 >>> (p_c, p_v, ssaa_w, ssaa_h, fov, gpu_pixels_ssaa, light_pos, light_col, gpu_polygons, n);
cudaThreadSynchronize();
CUDA_ERROR(cudaGetLastError());
// cerr << "Start ssaa\n";
// Ssaa smoothing algo
ssaa_gpu <<< 128, 128 >>> (gpu_pixels, w, h, ssaa_multiplier, gpu_pixels_ssaa);
cudaThreadSynchronize();
CUDA_ERROR(cudaGetLastError());
CUDA_ERROR(cudaMemcpy(pixels, gpu_pixels, w * h * sizeof(uchar4), cudaMemcpyDeviceToHost));
// Free memory
CUDA_ERROR(cudaFree(gpu_pixels));
CUDA_ERROR(cudaFree(gpu_pixels_ssaa));
CUDA_ERROR(cudaFree(gpu_polygons));
return 0;
}
int main(int argc, char* argv[]) {
string mode;
if (argv[1])
mode = argv[1];
bool is_gpu = true;
if (argc > 2) {
cout << "Incorrect params. Please use '--help' for help\n";
return 0;
}
if (argc == 1 || mode == "--gpu")
is_gpu = true;
if (mode == "--cpu")
is_gpu = false;
if (mode == "--default") {
cout << "100\n"
"./frames_data\n"
"640 480 120\n"
"7.0 3.0 0.0 2.0 1.0 2.0 6.0 1.0 0.0 0.0\n"
"2.0 0.0 0.0 0.5 0.1 1.0 4.0 1.0 0.0 0.0\n"
"4.0 4.0 0.0 1.0 0.0 1.0 2.0 0.0 0.0 0.0\n"
"1.0 1.0 0.0 1.0 1.0 0.0 2.0 0.0 0.0 0.0\n"
"-2.5 -2.5 0.0 0.0 1.0 1.0 2.0 0.0 0.0 0.0\n"
"-10.0 -10.0 -1.0 -10.0 10.0 -1.0 10.0 10.0 -1.0 10.0 -10.0 -1.0 ./folder 0.0 0.9 0.0 0.5\n"
"1\n"
"100 100 100 1.0 1.0 1.0\n"
"1 3\n";
return 0;
}
if (mode == "--help") {
cout << "<--------------- HELP --------------->\n"
"Start program without args will cause computation in gpu mode\n"
"--cpu For computation with using cpu\n"
"--gpu For computation with using gpu\n"
"--default Print best configuration for input data\n"
"--help For help\n"
"<---------------END OF HELP--------------->\n";
return 0;
}
int total_frames, width, height, fov;
string path_to_frames;
double r_0c, z_0c, phi_0c;
double A_rc, A_zc;
double w_rc, w_zc, w_phic;
double p_rc, p_zc;
double r_0v, z_0v, phi_0v;
double A_rv, A_zv;
double w_rv, w_zv, w_phiv;
double p_rv, p_zv;
vector_cords color;
vector_cords cube_center, cube_color;
vector_cords octahedron_center, octahedron_color;
vector_cords dodecahedron_center, dodecahedron_color;
double cube_radius, octahedron_radius, dodecahedron_radius;
string unused;
vector_cords scene_a, scene_b, scene_c, scene_d;
vector_cords light_pos, light_col;
vector<polygon> polygons;
polygon *polygons_as_array;
uchar4 *pixels = nullptr;
uchar4 *pixels_ssaa = nullptr;
int n_lights; // Should be 1 (1 light)
int recursion_step; // Should be 1 (unused)
int ssaa_multiplier;
// MPI variables
char proc_name[MPI_MAX_PROCESSOR_NAME];
int numproc, proc_name_len, id;
// Get properties of device
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
// MPI initialise
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
MPI_Get_processor_name(proc_name, &proc_name_len);
fprintf(stderr, "proc %2d(%d) on %s(%s)\n", id, numproc, proc_name, devProp.name);
fflush(stderr);
// Each process using available video card
// int device_cnt;
// cudaGetDeviceCount(&device_cnt);
// cudaSetDevice(id % device_cnt);
// Read data only from 0 process
if (id == 0) {
// Frames
cin >> total_frames;
cin >> path_to_frames;
cin >> width >> height >> fov;
// Camera trajectory
cin >> r_0c >> z_0c >> phi_0c;
cin >> A_rc >> A_zc;
cin >> w_rc >> w_zc >> w_phic;
cin >> p_rc >> p_zc;
// cerr << r_0c << " " << z_0c << " " << phi_0c << "\n";
// cerr << A_rc << " " << A_zc << "\n";
// cerr << w_rc << " " << w_zc<< " " << w_phic << "\n";
// cerr << p_rc << " " << p_zc << "\n";
cin >> r_0v >> z_0v >> phi_0v;
cin >> A_rv >> A_zv;
cin >> w_rv >> w_zv >> w_phiv;
cin >> p_rv >> p_zv;
// cerr << r_0v << " " << z_0v << " " << phi_0v << "\n";
// cerr << A_rv << " " << A_zv << "\n";
// cerr << w_rv << " " << w_zv << " " << w_phiv << "\n";
// cerr << p_rv << " " << p_zv << "\n";
// Figures params without creating
cin >> cube_center.x >> cube_center.y >> cube_center.z;
cin >> cube_color.x >> cube_color.y >> cube_color.z;
cin >> cube_radius >> unused >> unused >> unused;
// cube(center, radius, color, polygons);
cin >> octahedron_center.x >> octahedron_center.y >> octahedron_center.z;
cin >> octahedron_color.x >> octahedron_color.y >> octahedron_color.z;
cin >> octahedron_radius >> unused >> unused >> unused;
// octahedron(center, radius, color, polygons);
cin >> dodecahedron_center.x >> dodecahedron_center.y >> dodecahedron_center.z;
cin >> dodecahedron_color.x >> dodecahedron_color.y >> dodecahedron_color.z;
cin >> dodecahedron_radius >> unused >> unused >> unused;
// dodecahedron(center, radius, color, polygons);
// cin >> center.x >> center.y >> center.z >> color.x >> color.y >> color.z >> radius >> unused >> unused
// >> unused;
// cube(center, radius, color, polygons);
// cin >> center.x >> center.y >> center.z >> color.x >> color.y >> color.z >> radius >> unused >> unused
// >> unused;
// octahedron(center, radius, color, polygons);
// cin >> center.x >> center.y >> center.z >> color.x >> color.y >> color.z >> radius >> unused >> unused
// >> unused;
// dodecahedron(center, radius, color, polygons);
// Scene
cin >> scene_a.x >> scene_a.y >> scene_a.z;
cin >> scene_b.x >> scene_b.y >> scene_b.z;
cin >> scene_c.x >> scene_c.y >> scene_c.z;
cin >> scene_d.x >> scene_d.y >> scene_d.z;
cin >> unused;
cin >> color.x >> color.y >> color.z;
cin >> unused;
// scene(scene_a, scene_b, scene_c, scene_d, color, polygons);
// Lights
cin >> n_lights;
cin >> light_pos.x >> light_pos.y >> light_pos.z;
cin >> light_col.x >> light_col.y >> light_col.z;
// Recursion
cin >> recursion_step;
// cerr << recursion_step << "\n";
// SSAA params
cin >> ssaa_multiplier;
// cerr << ssaa_multiplier << "\n";
}
char filename_as_array[256];
int filename_size = path_to_frames.size();
strcpy(filename_as_array, path_to_frames.c_str());
// Send info to all processes
MPI_Bcast(&total_frames, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(filename_as_array, 256, MPI_CHAR, 0, MPI_COMM_WORLD);
MPI_Bcast(&filename_size, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&width, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&height, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&fov, 1, MPI_INT, 0, MPI_COMM_WORLD);
// Camera
MPI_Bcast(&r_0c, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&z_0c, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&phi_0c, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&A_rc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&A_zc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_rc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_zc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_phic, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&p_rc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&p_zc, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&r_0v, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&z_0v, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&phi_0v, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&A_rv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&A_zv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_rv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_zv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&w_phiv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&p_rv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&p_zv, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Cube params
MPI_Bcast(&cube_center.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_center.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_center.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_color.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_color.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_color.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&cube_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Octahedron params
MPI_Bcast(&octahedron_center.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_center.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_center.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_color.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_color.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_color.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&octahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Dodecahedron params
MPI_Bcast(&dodecahedron_center.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_center.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_center.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_color.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_color.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_color.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dodecahedron_radius, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Scene params
MPI_Bcast(&scene_a.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_a.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_a.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_b.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_b.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_b.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_c.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_c.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_c.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_d.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_d.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&scene_d.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&color.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&color.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&color.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Lights
MPI_Bcast(&n_lights, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_pos.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_pos.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_pos.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_col.x, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_col.y, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&light_col.z, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
int ssaa_width = width * ssaa_multiplier;
int ssaa_height = height * ssaa_multiplier;
// SSAA params
MPI_Bcast(&ssaa_multiplier, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&ssaa_width, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&ssaa_height, 1, MPI_INT, 0, MPI_COMM_WORLD);
pixels = new uchar4[ssaa_width * ssaa_height];
pixels_ssaa = new uchar4[ssaa_width * ssaa_height]; // cpu
MPI_Barrier(MPI_COMM_WORLD);
// Figures and scene creating
cube(cube_center, cube_radius, cube_color, polygons);
octahedron(octahedron_center, octahedron_radius, octahedron_color, polygons);
dodecahedron(dodecahedron_center, dodecahedron_radius, dodecahedron_color, polygons);
scene(scene_a, scene_b, scene_c, scene_d, color, polygons);
polygons_as_array = polygons.data();
int total_polygons = polygons.size();
// for (int i = 0; i < polygons.size(); ++i) {
// cerr << "p1:" << polygons[i].p1.x << " " << polygons[i].p1.y << " " << polygons[i].p1.z << "\n";
// cerr << "p2:" << polygons[i].p2.x << " " << polygons[i].p2.y << " " << polygons[i].p2.z << "\n";
// cerr << "p3:" << polygons[i].p3.x << " " << polygons[i].p3.y << " " << polygons[i].p3.z << "\n";
// cerr << "color:" << polygons[i].color.x << " " << polygons[i].color.y << " " << polygons[i].color.z << "\n";
// }
if (id == 0) {
cout << "Start rendering. Total polygons: " << total_polygons << ". Frame size: " << width << "x" << height;
cout << ". Total frames: " << total_frames << "\n";
cout << "|\tIteration number\t|\t time in ms\t|\ttotal rays |\n";
}
double r_c, z_c, phi_c , r_v, z_v, phi_v;
vector_cords p_c, p_v;
int sum_of_rays;
double total_duration_time = 0;
for (int i = id; i < total_frames; i += numproc) {
auto start = chrono::steady_clock::now();
double time_step = 2.0 * M_PI / total_frames;
double cur_time = i * time_step;
// Movement
r_c = r_0c + A_rc * sin(w_rc * cur_time + p_rc);
z_c = z_0c + A_zc * sin(w_zc * cur_time + p_zc);
phi_c = phi_0c + w_phic * cur_time;
r_v = r_0v + A_rv * sin(w_rv * cur_time + p_rv);
z_v = z_0v + A_zv * sin(w_zv * cur_time + p_zv);
phi_v = phi_0v + w_phiv * cur_time;
p_c = { r_c * cos(phi_c), r_c * sin(phi_c), z_c };
p_v = { r_v * cos(phi_v), r_v * sin(phi_v), z_v };
// Total sum of rays (will be the same coz of recursion)
sum_of_rays = ssaa_width * ssaa_height;
int res;
if (is_gpu)
res = gpu_mode(p_c, p_v, width, ssaa_width, height, ssaa_height, (double)fov, pixels, pixels_ssaa,
light_pos, light_col, polygons_as_array, total_polygons, ssaa_multiplier);
else
res = cpu_mode(p_c, p_v, width, ssaa_width, height, ssaa_height, (double)fov, pixels, pixels_ssaa,
light_pos, light_col, polygons_as_array, total_polygons, ssaa_multiplier);
if (res)
cout << "An error occurred. Check output\n";
auto end = chrono::steady_clock::now();
cout << "|\tIteration " << i + 1 << " of " << total_frames << "\t|\t";
double iteration_time = ((double)chrono::duration_cast<chrono::microseconds>(end - start).count()) / 1000.0;
total_duration_time += iteration_time;
cout << iteration_time << "ms\t|\t";
cout << sum_of_rays << "\t|\n";
string frame_name = string(filename_as_array);
frame_name += "/" + to_string(i) + ".data";
FILE* f = fopen(frame_name.c_str(), "wb");
// fwrite(&ssaa_width, sizeof(int), 1, f);
// fwrite(&ssaa_height, sizeof(int), 1, f);
// fwrite(pixels_ssaa, sizeof(uchar4), ssaa_width * ssaa_height, f);
fwrite(&width, sizeof(int), 1, f);
fwrite(&height, sizeof(int), 1, f);
fwrite(pixels, sizeof(uchar4), width * height, f);
fclose(f);
}
if (pixels)
delete[] pixels;
if (pixels_ssaa)
delete[] pixels_ssaa;
if (id == 0)
cout << "Done with total duration: " << total_duration_time << "ms\n";
MPI_Finalize();
return 0;
} |
2d9f6e15f4f5d3d267d174f7edfa565f2db300ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#define DIM 128
/*
* An example of using shared memory to optimize performance of a parallel
* reduction by constructing partial results for a thread block in shared memory
* before flushing to global memory.
*/
// Recursive Implementation of Interleaved Pair Approach
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
data[i] += data[i + stride];
return recursiveReduce(data, stride);
}
// unroll4 + complete unroll for loop + gmem
__global__ void reduceGmem(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceSmem(int *g_idata, int *g_odata, unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = DIM; // initial block size
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
h_idata[i] = (int)( rand() & 0xFF );
memcpy (tmp, h_idata, bytes);
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **) &d_idata, bytes));
CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
int cpu_sum = recursiveReduce (tmp, size);
printf("cpu reduce : %d\n", cpu_sum);
// reduce gmem
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceGmem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceGmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// reduce smem
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reduceSmem), dim3(grid.x), dim3(block), DIM, 0, d_idata, d_odata, size);
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceSmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// reset device
CHECK(hipDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
| 2d9f6e15f4f5d3d267d174f7edfa565f2db300ee.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#define DIM 128
/*
* An example of using shared memory to optimize performance of a parallel
* reduction by constructing partial results for a thread block in shared memory
* before flushing to global memory.
*/
// Recursive Implementation of Interleaved Pair Approach
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
data[i] += data[i + stride];
return recursiveReduce(data, stride);
}
// unroll4 + complete unroll for loop + gmem
__global__ void reduceGmem(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceSmem(int *g_idata, int *g_odata, unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = DIM; // initial block size
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
h_idata[i] = (int)( rand() & 0xFF );
memcpy (tmp, h_idata, bytes);
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **) &d_idata, bytes));
CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
int cpu_sum = recursiveReduce (tmp, size);
printf("cpu reduce : %d\n", cpu_sum);
// reduce gmem
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceGmem<<<grid.x, block>>>(d_idata, d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceGmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// reduce smem
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
reduceSmem<<<grid.x, block, DIM>>>(d_idata, d_odata, size);
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("reduceSmem : %d <<<grid %d block %d>>>\n", gpu_sum, grid.x,
block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// reset device
CHECK(cudaDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.