hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
b3f07fdcfcc2b5619631ee92d370817da5ac22ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <memory>
#include <queue>
#include <utility>
#include <vector>
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/host_device_vector.h"
#include "../common/timer.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace xgboost {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
using GradientPairSumT = GradientPairPrecise;
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT>
__device__ GradientPairSumT ReduceFeature(const GradientPairSumT* begin,
const GradientPairSumT* end,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientPairSumT> uninitialized_sum;
GradientPairSumT& shared_sum = uninitialized_sum.Alias();
GradientPairSumT local_sum = GradientPairSumT();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientPairSumT bin = thread_active ? *(itr + threadIdx.x) : GradientPairSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum());
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
template <int BLOCK_THREADS, typename ReduceT, typename scan_t,
typename max_ReduceT, typename TempStorageT>
__device__ void EvaluateFeature(int fidx, const GradientPairSumT* hist,
const int* feature_segments, float min_fvalue,
const float* gidx_fvalue_map,
DeviceSplitCandidate* best_split,
const DeviceNodeStats& node,
const GPUTrainingParam& param,
TempStorageT* temp_storage, int constraint,
const ValueConstraint& value_constraint) {
int gidx_begin = feature_segments[fidx];
int gidx_end = feature_segments[fidx + 1];
GradientPairSumT feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
hist + gidx_begin, hist + gidx_end, temp_storage);
auto prefix_op = SumCallbackOp<GradientPairSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = scan_begin + threadIdx.x < gidx_end;
GradientPairSumT bin =
thread_active ? hist[scan_begin + threadIdx.x] : GradientPairSumT();
scan_t(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Calculate gain
GradientPairSumT parent_sum = GradientPairSumT(node.sum_gradients);
GradientPairSumT missing = parent_sum - feature_sum;
bool missing_left = true;
const float null_gain = -FLT_MAX;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
hipcub::KeyValuePair<int, float> best =
max_ReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax());
__shared__ hipcub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int gidx = scan_begin + threadIdx.x;
float fvalue =
gidx == gidx_begin ? min_fvalue : gidx_fvalue_map[gidx - 1];
GradientPairSumT left = missing_left ? bin + missing : bin;
GradientPairSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx,
GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS>
__global__ void evaluate_split_kernel(
const GradientPairSumT* d_hist, int nidx, uint64_t n_features,
DeviceNodeStats nodes, const int* d_feature_segments,
const float* d_fidx_min_map, const float* d_gidx_fvalue_map,
GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split,
ValueConstraint value_constraint, int* d_monotonic_constraints) {
typedef hipcub::KeyValuePair<int, float> ArgMaxT;
typedef hipcub::BlockScan<GradientPairSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT;
typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef hipcub::BlockReduce<GradientPairSumT, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
auto fidx = blockIdx.x;
auto constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map,
&best_split, nodes, gpu_param, &temp_storage, constraint,
value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss
d_split[fidx] = best_split;
}
}
// Find a gidx value for a given feature otherwise return -1 if not found
template <typename GidxIterT>
__device__ int BinarySearchRow(bst_uint begin, bst_uint end, GidxIterT data,
int fidx_begin, int fidx_end) {
bst_uint previous_middle = UINT32_MAX;
while (end != begin) {
auto middle = begin + (end - begin) / 2;
if (middle == previous_middle) {
break;
}
previous_middle = middle;
auto gidx = data[middle];
if (gidx >= fidx_begin && gidx < fidx_end) {
return gidx;
} else if (gidx < fidx_begin) {
begin = middle;
} else {
end = middle;
}
}
// Value is missing
return -1;
}
struct DeviceHistogram {
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
dh::DVec<GradientPairSumT> data;
int n_bins;
void Init(int device_idx, int max_nodes, int n_bins, bool silent) {
this->n_bins = n_bins;
ba.Allocate(device_idx, silent, &data, size_t(max_nodes) * size_t(n_bins));
}
void Reset() { data.Fill(GradientPairSumT()); }
GradientPairSumT* GetHistPtr(int nidx) { return data.Data() + nidx * n_bins; }
void PrintNidx(int nidx) const {
auto h_data = data.AsVector();
std::cout << "nidx " << nidx << ":\n";
for (int i = n_bins * nidx; i < n_bins * (nidx + 1); i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// index of the first element in cuts greater than v, or n if none;
// cuts are ordered, and binary search is used
__device__ int upper_bound(const float* __restrict__ cuts, int n, float v) {
if (n == 0)
return 0;
if (cuts[n - 1] <= v)
return n;
if (cuts[0] > v)
return 0;
int left = 0, right = n - 1;
while (right - left > 1) {
int middle = left + (right - left) / 2;
if (cuts[middle] > v)
right = middle;
else
left = middle;
}
return right;
}
__global__ void compress_bin_ellpack_k
(common::CompressedBufferWriter wr, common::CompressedByteT* __restrict__ buffer,
const size_t* __restrict__ row_ptrs,
const Entry* __restrict__ entries,
const float* __restrict__ cuts, const size_t* __restrict__ cut_rows,
size_t base_row, size_t n_rows, size_t row_ptr_begin, size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + size_t(blockIdx.x) * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride)
return;
int row_size = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_size) {
Entry entry = entries[row_ptrs[irow] - row_ptr_begin + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
const float *feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
bin = upper_bound(feature_cuts, ncuts, fvalue);
if (bin >= ncuts)
bin = ncuts - 1;
bin += cut_rows[feature];
}
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
__global__ void sharedMemHistKernel(size_t row_stride,
const bst_uint* d_ridx,
common::CompressedIterator<uint32_t> d_gidx,
int null_gidx_value,
GradientPairSumT* d_node_hist,
const GradientPair* d_gpair,
size_t segment_begin,
size_t n_elements) {
extern __shared__ char smem[];
GradientPairSumT* smem_arr = reinterpret_cast<GradientPairSumT*>(smem); // NOLINT
for (auto i : dh::BlockStrideRange(0, null_gidx_value)) {
smem_arr[i] = GradientPairSumT();
}
__syncthreads();
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / row_stride + segment_begin];
int gidx = d_gidx[ridx * row_stride + idx % row_stride];
if (gidx != null_gidx_value) {
AtomicAddGpair(smem_arr + gidx, d_gpair[ridx]);
}
}
__syncthreads();
for (auto i : dh::BlockStrideRange(0, null_gidx_value)) {
AtomicAddGpair(d_node_hist + i, smem_arr[i]);
}
}
// Manage memory for a single GPU
struct DeviceShard {
struct Segment {
size_t begin;
size_t end;
Segment() : begin(0), end(0) {}
Segment(size_t begin, size_t end) : begin(begin), end(end) {
CHECK_GE(end, begin);
}
size_t Size() const { return end - begin; }
};
int device_idx;
int normalised_device_idx; // Device index counting from param.gpu_id
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
dh::DVec<common::CompressedByteT> gidx_buffer;
dh::DVec<GradientPair> gpair;
dh::DVec2<bst_uint> ridx; // Row index relative to this shard
dh::DVec2<int> position;
std::vector<Segment> ridx_segments;
dh::DVec<int> feature_segments;
dh::DVec<float> gidx_fvalue_map;
dh::DVec<float> min_fvalue;
dh::DVec<int> monotone_constraints;
dh::DVec<bst_float> prediction_cache;
std::vector<GradientPair> node_sum_gradients;
dh::DVec<GradientPair> node_sum_gradients_d;
common::CompressedIterator<uint32_t> gidx;
size_t row_stride;
bst_uint row_begin_idx; // The row offset for this shard
bst_uint row_end_idx;
bst_uint n_rows;
int n_bins;
int null_gidx_value;
DeviceHistogram hist;
TrainParam param;
bool prediction_cache_initialised;
bool can_use_smem_atomics;
int64_t* tmp_pinned; // Small amount of staging memory
std::vector<hipStream_t> streams;
dh::CubMemory temp_memory;
DeviceShard(int device_idx, int normalised_device_idx,
bst_uint row_begin, bst_uint row_end, int n_bins, TrainParam param)
: device_idx(device_idx),
normalised_device_idx(normalised_device_idx),
row_begin_idx(row_begin),
row_end_idx(row_end),
n_rows(row_end - row_begin),
n_bins(n_bins),
null_gidx_value(n_bins),
param(param),
prediction_cache_initialised(false),
can_use_smem_atomics(false) {}
void Init(const common::HistCutMatrix& hmat, const SparsePage& row_batch) {
// copy cuts to the GPU
dh::safe_cuda(hipSetDevice(device_idx));
thrust::device_vector<float> cuts_d(hmat.cut);
thrust::device_vector<size_t> cut_row_ptrs_d(hmat.row_ptr);
// find the maximum row size
thrust::device_vector<size_t> row_ptr_d(
row_batch.offset.data() + row_begin_idx, row_batch.offset.data() + row_end_idx + 1);
auto row_iter = row_ptr_d.begin();
auto get_size = [=] __device__(size_t row) {
return row_iter[row + 1] - row_iter[row];
}; // NOLINT
auto counting = thrust::make_counting_iterator(size_t(0));
using TransformT = thrust::transform_iterator<decltype(get_size),
decltype(counting), size_t>;
TransformT row_size_iter = TransformT(counting, get_size);
row_stride = thrust::reduce(row_size_iter, row_size_iter + n_rows, 0,
thrust::maximum<size_t>());
int num_symbols =
n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
CHECK(!(param.max_leaves == 0 && param.max_depth == 0))
<< "Max leaves and max depth cannot both be unconstrained for "
"gpu_hist.";
ba.Allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes);
gidx_buffer.Fill(0);
// bin and compress entries in batches of rows
// use no more than 1/16th of GPU memory per batch
size_t gpu_batch_nrows = dh::TotalMemory(device_idx) /
(16 * row_stride * sizeof(Entry));
if (gpu_batch_nrows > n_rows) {
gpu_batch_nrows = n_rows;
}
thrust::device_vector<Entry> entries_d(gpu_batch_nrows * row_stride);
size_t gpu_nbatches = dh::DivRoundUp(n_rows, gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows;
if (batch_row_end > n_rows) {
batch_row_end = n_rows;
}
size_t batch_nrows = batch_row_end - batch_row_begin;
size_t n_entries =
row_batch.offset[row_begin_idx + batch_row_end] -
row_batch.offset[row_begin_idx + batch_row_begin];
dh::safe_cuda
(hipMemcpy
(entries_d.data().get(),
&row_batch.data[row_batch.offset[row_begin_idx + batch_row_begin]],
n_entries * sizeof(Entry), hipMemcpyDefault));
dim3 block3(32, 8, 1);
dim3 grid3(dh::DivRoundUp(n_rows, block3.x),
dh::DivRoundUp(row_stride, block3.y), 1);
hipLaunchKernelGGL(( compress_bin_ellpack_k), dim3(grid3), dim3(block3), 0, 0,
common::CompressedBufferWriter(num_symbols), gidx_buffer.Data(),
row_ptr_d.data().get() + batch_row_begin,
entries_d.data().get(), cuts_d.data().get(), cut_row_ptrs_d.data().get(),
batch_row_begin, batch_nrows,
row_batch.offset[row_begin_idx + batch_row_begin],
row_stride, null_gidx_value);
dh::safe_cuda(hipGetLastError());
dh::safe_cuda(hipDeviceSynchronize());
}
// free the memory that is no longer needed
row_ptr_d.resize(0);
row_ptr_d.shrink_to_fit();
entries_d.resize(0);
entries_d.shrink_to_fit();
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.Data(), num_symbols);
// allocate the rest
int max_nodes =
param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth);
ba.Allocate(device_idx, param.silent,
&gpair, n_rows, &ridx, n_rows, &position, n_rows,
&prediction_cache, n_rows, &node_sum_gradients_d, max_nodes,
&feature_segments, hmat.row_ptr.size(), &gidx_fvalue_map,
hmat.cut.size(), &min_fvalue, hmat.min_val.size(),
&monotone_constraints, param.monotone_constraints.size());
gidx_fvalue_map = hmat.cut;
min_fvalue = hmat.min_val;
feature_segments = hmat.row_ptr;
monotone_constraints = param.monotone_constraints;
node_sum_gradients.resize(max_nodes);
ridx_segments.resize(max_nodes);
// check if we can use shared memory for building histograms
// (assuming atleast we need 2 CTAs per SM to maintain decent latency hiding)
auto histogram_size = sizeof(GradientPairSumT) * null_gidx_value;
auto max_smem = dh::MaxSharedMemory(device_idx);
can_use_smem_atomics = histogram_size <= max_smem;
// Init histogram
hist.Init(device_idx, max_nodes, hmat.row_ptr.back(), param.silent);
dh::safe_cuda(hipHostMalloc(&tmp_pinned, sizeof(int64_t)));
}
~DeviceShard() {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
dh::safe_cuda(hipHostFree(tmp_pinned));
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
void Reset(HostDeviceVector<GradientPair>* dh_gpair) {
dh::safe_cuda(hipSetDevice(device_idx));
position.CurrentDVec().Fill(0);
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
thrust::sequence(ridx.CurrentDVec().tbegin(), ridx.CurrentDVec().tend());
std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0));
ridx_segments.front() = Segment(0, ridx.Size());
this->gpair.copy(dh_gpair->tbegin(device_idx), dh_gpair->tend(device_idx));
SubsampleGradientPair(&gpair, param.subsample, row_begin_idx);
hist.Reset();
}
void BuildHistUsingGlobalMem(int nidx) {
auto segment = ridx_segments[nidx];
auto d_node_hist = hist.GetHistPtr(nidx);
auto d_gidx = gidx;
auto d_ridx = ridx.Current();
auto d_gpair = gpair.Data();
auto row_stride = this->row_stride;
auto null_gidx_value = this->null_gidx_value;
auto n_elements = segment.Size() * row_stride;
dh::LaunchN(device_idx, n_elements, [=] __device__(size_t idx) {
int ridx = d_ridx[(idx / row_stride) + segment.begin];
int gidx = d_gidx[ridx * row_stride + idx % row_stride];
if (gidx != null_gidx_value) {
AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]);
}
});
}
void BuildHistUsingSharedMem(int nidx) {
auto segment = ridx_segments[nidx];
auto segment_begin = segment.begin;
auto d_node_hist = hist.GetHistPtr(nidx);
auto d_gidx = gidx;
auto d_ridx = ridx.Current();
auto d_gpair = gpair.Data();
auto row_stride = this->row_stride;
auto null_gidx_value = this->null_gidx_value;
auto n_elements = segment.Size() * row_stride;
const size_t smem_size = sizeof(GradientPairSumT) * null_gidx_value;
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size =
static_cast<int>(dh::DivRoundUp(n_elements,
items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
dh::safe_cuda(hipSetDevice(device_idx));
hipLaunchKernelGGL(( sharedMemHistKernel), dim3(grid_size), dim3(block_threads), smem_size, 0,
row_stride, d_ridx, d_gidx, null_gidx_value, d_node_hist, d_gpair,
segment_begin, n_elements);
}
void BuildHist(int nidx) {
if (can_use_smem_atomics) {
BuildHistUsingSharedMem(nidx);
} else {
BuildHistUsingGlobalMem(nidx);
}
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetHistPtr(nidx_parent);
auto d_node_hist_histogram = hist.GetHistPtr(nidx_histogram);
auto d_node_hist_subtraction = hist.GetHistPtr(nidx_subtraction);
dh::LaunchN(device_idx, hist.n_bins, [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
__device__ void CountLeft(int64_t* d_count, int val, int left_nidx) {
unsigned ballot = __ballot(val == left_nidx);
if (threadIdx.x % 32 == 0) {
atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT
static_cast<unsigned long long>(__popc(ballot))); // NOLINT
}
}
void UpdatePosition(int nidx, int left_nidx, int right_nidx, int fidx,
int split_gidx, bool default_dir_left, bool is_dense,
int fidx_begin, int fidx_end) {
dh::safe_cuda(hipSetDevice(device_idx));
temp_memory.LazyAllocate(sizeof(int64_t));
auto d_left_count = temp_memory.Pointer<int64_t>();
dh::safe_cuda(hipMemset(d_left_count, 0, sizeof(int64_t)));
auto segment = ridx_segments[nidx];
auto d_ridx = ridx.Current();
auto d_position = position.Current();
auto d_gidx = gidx;
auto row_stride = this->row_stride;
dh::LaunchN<1, 512>(
device_idx, segment.Size(), [=] __device__(bst_uint idx) {
idx += segment.begin;
auto ridx = d_ridx[idx];
auto row_begin = row_stride * ridx;
auto row_end = row_begin + row_stride;
auto gidx = -1;
if (is_dense) {
gidx = d_gidx[row_begin + fidx];
} else {
gidx = BinarySearchRow(row_begin, row_end, d_gidx, fidx_begin,
fidx_end);
}
int position;
if (gidx >= 0) {
// Feature is found
position = gidx <= split_gidx ? left_nidx : right_nidx;
} else {
// Feature is missing
position = default_dir_left ? left_nidx : right_nidx;
}
CountLeft(d_left_count, position, left_nidx);
d_position[idx] = position;
});
dh::safe_cuda(hipMemcpy(tmp_pinned, d_left_count, sizeof(int64_t),
hipMemcpyDeviceToHost));
auto left_count = *tmp_pinned;
SortPosition(segment, left_nidx, right_nidx);
// dh::safe_cuda(hipStreamSynchronize(stream));
ridx_segments[left_nidx] =
Segment(segment.begin, segment.begin + left_count);
ridx_segments[right_nidx] =
Segment(segment.begin + left_count, segment.end);
}
void SortPosition(const Segment& segment, int left_nidx, int right_nidx) {
int min_bits = 0;
int max_bits = static_cast<int>(
::ceil(std::log2((std::max)(left_nidx, right_nidx) + 1)));
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, position.Current() + segment.begin,
position.other() + segment.begin, ridx.Current() + segment.begin,
ridx.other() + segment.begin, segment.Size(), min_bits, max_bits);
temp_memory.LazyAllocate(temp_storage_bytes);
hipcub::DeviceRadixSort::SortPairs(
temp_memory.d_temp_storage, temp_memory.temp_storage_bytes,
position.Current() + segment.begin, position.other() + segment.begin,
ridx.Current() + segment.begin, ridx.other() + segment.begin,
segment.Size(), min_bits, max_bits);
dh::safe_cuda(hipMemcpy(
position.Current() + segment.begin, position.other() + segment.begin,
segment.Size() * sizeof(int), hipMemcpyDeviceToDevice));
dh::safe_cuda(hipMemcpy(
ridx.Current() + segment.begin, ridx.other() + segment.begin,
segment.Size() * sizeof(bst_uint), hipMemcpyDeviceToDevice));
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_idx));
if (!prediction_cache_initialised) {
dh::safe_cuda(hipMemcpy(
prediction_cache.Data(), out_preds_d,
prediction_cache.Size() * sizeof(bst_float), hipMemcpyDefault));
}
prediction_cache_initialised = true;
CalcWeightTrainParam param_d(param);
dh::safe_cuda(hipMemcpy(node_sum_gradients_d.Data(),
node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = position.Current();
auto d_ridx = ridx.Current();
auto d_node_sum_gradients = node_sum_gradients_d.Data();
auto d_prediction_cache = prediction_cache.Data();
dh::LaunchN(
device_idx, prediction_cache.Size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.Data(),
prediction_cache.Size() * sizeof(bst_float), hipMemcpyDefault));
}
};
class GPUHistMaker : public TreeUpdater {
public:
struct ExpandEntry;
GPUHistMaker() : initialised_(false), p_last_fmat_(nullptr) {}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
param_.InitAllowUnknown(args);
CHECK(param_.n_gpus != 0) << "Must have at least one device";
n_devices_ = param_.n_gpus;
devices_ = GPUSet::Range(param_.gpu_id, dh::NDevicesAll(param_.n_gpus));
dh::CheckComputeCapability();
if (param_.grow_policy == TrainParam::kLossGuide) {
qexpand_.reset(new ExpandQueue(LossGuide));
} else {
qexpand_.reset(new ExpandQueue(DepthWise));
}
monitor_.Init("updater_gpu_hist", param_.debug_verbose);
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update", device_list_);
GradStats::CheckInfo(dmat->Info());
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (size_t i = 0; i < trees.size(); ++i) {
this->UpdateTree(gpair, dmat, trees[i]);
}
} catch (const std::exception& e) {
LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update", device_list_);
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
monitor_.Start("Quantiles", device_list_);
hmat_.Init(dmat, param_.max_bin);
monitor_.Stop("Quantiles", device_list_);
n_bins_ = hmat_.row_ptr.back();
int n_devices = dh::NDevices(param_.n_gpus, info_->num_row_);
bst_uint row_begin = 0;
bst_uint shard_size =
::ceil(static_cast<double>(info_->num_row_) / n_devices);
device_list_.resize(n_devices);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
int device_idx = (param_.gpu_id + d_idx) % dh::NVisibleDevices();
device_list_[d_idx] = device_idx;
}
reducer_.Init(device_list_);
// Partition input matrix into row segments
std::vector<size_t> row_segments;
shards_.resize(n_devices);
row_segments.push_back(0);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
bst_uint row_end =
::min(static_cast<size_t>(row_begin + shard_size), info_->num_row_);
row_segments.push_back(row_end);
row_begin = row_end;
}
monitor_.Start("BinningCompression", device_list_);
{
dmlc::DataIter<SparsePage>* iter = dmat->RowIterator();
iter->BeforeFirst();
CHECK(iter->Next()) << "Empty batches are not supported";
const SparsePage& batch = iter->Value();
// Create device shards
dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard>& shard) {
shard = std::unique_ptr<DeviceShard>
(new DeviceShard(device_list_[i], i,
row_segments[i], row_segments[i + 1], n_bins_, param_));
shard->Init(hmat_, batch);
});
CHECK(!iter->Next()) << "External memory not supported";
}
monitor_.Stop("BinningCompression", device_list_);
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const RegTree& tree) {
monitor_.Start("InitDataOnce", device_list_);
if (!initialised_) {
this->InitDataOnce(dmat);
}
monitor_.Stop("InitDataOnce", device_list_);
column_sampler_.Init(info_->num_col_, param_);
// Copy gpair & reset memory
monitor_.Start("InitDataReset", device_list_);
gpair->Reshard(devices_);
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {shard->Reset(gpair); });
monitor_.Stop("InitDataReset", device_list_);
}
void AllReduceHist(int nidx) {
reducer_.GroupStart();
for (auto& shard : shards_) {
auto d_node_hist = shard->hist.GetHistPtr(nidx);
reducer_.AllReduceSum(
shard->normalised_device_idx,
reinterpret_cast<GradientPairSumT::ValueT*>(d_node_hist),
reinterpret_cast<GradientPairSumT::ValueT*>(d_node_hist),
n_bins_ * (sizeof(GradientPairSumT) / sizeof(GradientPairSumT::ValueT)));
}
reducer_.GroupEnd();
reducer_.Synchronize();
}
void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right) {
size_t left_node_max_elements = 0;
size_t right_node_max_elements = 0;
for (auto& shard : shards_) {
left_node_max_elements = (std::max)(
left_node_max_elements, shard->ridx_segments[nidx_left].Size());
right_node_max_elements = (std::max)(
right_node_max_elements, shard->ridx_segments[nidx_right].Size());
}
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
if (right_node_max_elements < left_node_max_elements) {
build_hist_nidx = nidx_right;
subtraction_trick_nidx = nidx_left;
}
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->BuildHist(build_hist_nidx);
});
this->AllReduceHist(build_hist_nidx);
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
});
}
// Returns best loss
std::vector<DeviceSplitCandidate> EvaluateSplits(
const std::vector<int>& nidx_set, RegTree* p_tree) {
auto columns = info_->num_col_;
std::vector<DeviceSplitCandidate> best_splits(nidx_set.size());
std::vector<DeviceSplitCandidate> candidate_splits(nidx_set.size() *
columns);
// Use first device
auto& shard = shards_.front();
dh::safe_cuda(hipSetDevice(shard->device_idx));
shard->temp_memory.LazyAllocate(sizeof(DeviceSplitCandidate) * columns *
nidx_set.size());
auto d_split = shard->temp_memory.Pointer<DeviceSplitCandidate>();
auto& streams = shard->GetStreams(static_cast<int>(nidx_set.size()));
// Use streams to process nodes concurrently
for (auto i = 0; i < nidx_set.size(); i++) {
auto nidx = nidx_set[i];
DeviceNodeStats node(shard->node_sum_gradients[nidx], nidx, param_);
const int BLOCK_THREADS = 256;
hipLaunchKernelGGL(( evaluate_split_kernel<BLOCK_THREADS>)
, dim3(uint32_t(columns)), dim3(BLOCK_THREADS), 0, streams[i],
shard->hist.GetHistPtr(nidx), nidx, info_->num_col_, node,
shard->feature_segments.Data(), shard->min_fvalue.Data(),
shard->gidx_fvalue_map.Data(), GPUTrainingParam(param_),
d_split + i * columns, node_value_constraints_[nidx],
shard->monotone_constraints.Data());
}
dh::safe_cuda(
hipMemcpy(candidate_splits.data(), shard->temp_memory.d_temp_storage,
sizeof(DeviceSplitCandidate) * columns * nidx_set.size(),
hipMemcpyDeviceToHost));
for (auto i = 0; i < nidx_set.size(); i++) {
auto nidx = nidx_set[i];
DeviceSplitCandidate nidx_best;
for (auto fidx = 0; fidx < columns; fidx++) {
auto& candidate = candidate_splits[i * columns + fidx];
if (column_sampler_.ColumnUsed(candidate.findex,
p_tree->GetDepth(nidx))) {
nidx_best.Update(candidate_splits[i * columns + fidx], param_);
}
}
best_splits[i] = nidx_best;
}
return std::move(best_splits);
}
void InitRoot(RegTree* p_tree) {
auto root_nidx = 0;
// Sum gradients
std::vector<GradientPair> tmp_sums(shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard>& shard) {
dh::safe_cuda(hipSetDevice(shard->device_idx));
tmp_sums[i] =
dh::SumReduction(shard->temp_memory, shard->gpair.Data(),
shard->gpair.Size());
});
auto sum_gradient =
std::accumulate(tmp_sums.begin(), tmp_sums.end(), GradientPair());
// Generate root histogram
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->BuildHist(root_nidx);
});
this->AllReduceHist(root_nidx);
// Remember root stats
p_tree->Stat(root_nidx).sum_hess = sum_gradient.GetHess();
auto weight = CalcWeight(param_, sum_gradient);
p_tree->Stat(root_nidx).base_weight = weight;
(*p_tree)[root_nidx].SetLeaf(param_.learning_rate * weight);
// Store sum gradients
for (auto& shard : shards_) {
shard->node_sum_gradients[root_nidx] = sum_gradient;
}
// Initialise root constraint
node_value_constraints_.resize(p_tree->GetNodes().size());
// Generate first split
auto splits = this->EvaluateSplits({root_nidx}, p_tree);
qexpand_->push(
ExpandEntry(root_nidx, p_tree->GetDepth(root_nidx), splits.front(), 0));
}
void UpdatePosition(const ExpandEntry& candidate, RegTree* p_tree) {
auto nidx = candidate.nid;
auto left_nidx = (*p_tree)[nidx].LeftChild();
auto right_nidx = (*p_tree)[nidx].RightChild();
// convert floating-point split_pt into corresponding bin_id
// split_cond = -1 indicates that split_pt is less than all known cut points
auto split_gidx = -1;
auto fidx = candidate.split.findex;
auto default_dir_left = candidate.split.dir == kLeftDir;
auto fidx_begin = hmat_.row_ptr[fidx];
auto fidx_end = hmat_.row_ptr[fidx + 1];
for (auto i = fidx_begin; i < fidx_end; ++i) {
if (candidate.split.fvalue == hmat_.cut[i]) {
split_gidx = static_cast<int32_t>(i);
}
}
auto is_dense = info_->num_nonzero_ == info_->num_row_ * info_->num_col_;
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->UpdatePosition(nidx, left_nidx, right_nidx, fidx,
split_gidx, default_dir_left,
is_dense, fidx_begin, fidx_end);
});
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
// Add new leaves
RegTree& tree = *p_tree;
tree.AddChilds(candidate.nid);
auto& parent = tree[candidate.nid];
parent.SetSplit(candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir);
tree.Stat(candidate.nid).loss_chg = candidate.split.loss_chg;
// Set up child constraints
node_value_constraints_.resize(tree.GetNodes().size());
GradStats left_stats(param_);
left_stats.Add(candidate.split.left_sum);
GradStats right_stats(param_);
right_stats.Add(candidate.split.right_sum);
node_value_constraints_[candidate.nid].SetChild(
param_, parent.SplitIndex(), left_stats, right_stats,
&node_value_constraints_[parent.LeftChild()],
&node_value_constraints_[parent.RightChild()]);
// Configure left child
auto left_weight =
node_value_constraints_[parent.LeftChild()].CalcWeight(param_, left_stats);
tree[parent.LeftChild()].SetLeaf(left_weight * param_.learning_rate, 0);
tree.Stat(parent.LeftChild()).base_weight = left_weight;
tree.Stat(parent.LeftChild()).sum_hess = candidate.split.left_sum.GetHess();
// Configure right child
auto right_weight =
node_value_constraints_[parent.RightChild()].CalcWeight(param_, right_stats);
tree[parent.RightChild()].SetLeaf(right_weight * param_.learning_rate, 0);
tree.Stat(parent.RightChild()).base_weight = right_weight;
tree.Stat(parent.RightChild()).sum_hess = candidate.split.right_sum.GetHess();
// Store sum gradients
for (auto& shard : shards_) {
shard->node_sum_gradients[parent.LeftChild()] = candidate.split.left_sum;
shard->node_sum_gradients[parent.RightChild()] = candidate.split.right_sum;
}
this->UpdatePosition(candidate, p_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
// Temporarily store number of threads so we can change it back later
int nthread = omp_get_max_threads();
auto& tree = *p_tree;
monitor_.Start("InitData", device_list_);
this->InitData(gpair, p_fmat, *p_tree);
monitor_.Stop("InitData", device_list_);
monitor_.Start("InitRoot", device_list_);
this->InitRoot(p_tree);
monitor_.Stop("InitRoot", device_list_);
auto timestamp = qexpand_->size();
auto num_leaves = 1;
while (!qexpand_->empty()) {
auto candidate = qexpand_->top();
qexpand_->pop();
if (!candidate.IsValid(param_, num_leaves)) continue;
// std::cout << candidate;
monitor_.Start("ApplySplit", device_list_);
this->ApplySplit(candidate, p_tree);
monitor_.Stop("ApplySplit", device_list_);
num_leaves++;
auto left_child_nidx = tree[candidate.nid].LeftChild();
auto right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param_, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor_.Start("BuildHist", device_list_);
this->BuildHistLeftRight(candidate.nid, left_child_nidx,
right_child_nidx);
monitor_.Stop("BuildHist", device_list_);
monitor_.Start("EvaluateSplits", device_list_);
auto splits =
this->EvaluateSplits({left_child_nidx, right_child_nidx}, p_tree);
qexpand_->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits[0],
timestamp++));
qexpand_->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx), splits[1],
timestamp++));
monitor_.Stop("EvaluateSplits", device_list_);
}
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
monitor_.Start("UpdatePredictionCache", device_list_);
if (shards_.empty() || p_last_fmat_ == nullptr || p_last_fmat_ != data)
return false;
p_out_preds->Reshard(devices_);
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->UpdatePredictionCache(p_out_preds->DevicePointer(shard->device_idx));
});
monitor_.Stop("UpdatePredictionCache", device_list_);
return true;
}
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry(int nid, int depth, const DeviceSplitCandidate& split,
uint64_t timestamp)
: nid(nid), depth(depth), split(split), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0)
return false;
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth,
int num_leaves) {
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
TrainParam param_;
common::HistCutMatrix hmat_;
common::GHistIndexMatrix gmat_;
MetaInfo* info_;
bool initialised_;
int n_devices_;
int n_bins_;
std::vector<std::unique_ptr<DeviceShard>> shards_;
ColumnSampler column_sampler_;
typedef std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>
ExpandQueue;
std::unique_ptr<ExpandQueue> qexpand_;
common::Monitor monitor_;
dh::AllReducer reducer_;
std::vector<ValueConstraint> node_value_constraints_;
std::vector<int> device_list_;
DMatrix* p_last_fmat_;
GPUSet devices_;
};
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
} // namespace tree
} // namespace xgboost
| b3f07fdcfcc2b5619631ee92d370817da5ac22ad.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <memory>
#include <queue>
#include <utility>
#include <vector>
#include "../common/compressed_iterator.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/host_device_vector.h"
#include "../common/timer.h"
#include "param.h"
#include "updater_gpu_common.cuh"
namespace xgboost {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
using GradientPairSumT = GradientPairPrecise;
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT>
__device__ GradientPairSumT ReduceFeature(const GradientPairSumT* begin,
const GradientPairSumT* end,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientPairSumT> uninitialized_sum;
GradientPairSumT& shared_sum = uninitialized_sum.Alias();
GradientPairSumT local_sum = GradientPairSumT();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientPairSumT bin = thread_active ? *(itr + threadIdx.x) : GradientPairSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum());
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
template <int BLOCK_THREADS, typename ReduceT, typename scan_t,
typename max_ReduceT, typename TempStorageT>
__device__ void EvaluateFeature(int fidx, const GradientPairSumT* hist,
const int* feature_segments, float min_fvalue,
const float* gidx_fvalue_map,
DeviceSplitCandidate* best_split,
const DeviceNodeStats& node,
const GPUTrainingParam& param,
TempStorageT* temp_storage, int constraint,
const ValueConstraint& value_constraint) {
int gidx_begin = feature_segments[fidx];
int gidx_end = feature_segments[fidx + 1];
GradientPairSumT feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
hist + gidx_begin, hist + gidx_end, temp_storage);
auto prefix_op = SumCallbackOp<GradientPairSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = scan_begin + threadIdx.x < gidx_end;
GradientPairSumT bin =
thread_active ? hist[scan_begin + threadIdx.x] : GradientPairSumT();
scan_t(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Calculate gain
GradientPairSumT parent_sum = GradientPairSumT(node.sum_gradients);
GradientPairSumT missing = parent_sum - feature_sum;
bool missing_left = true;
const float null_gain = -FLT_MAX;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
cub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
cub::KeyValuePair<int, float> best =
max_ReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax());
__shared__ cub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int gidx = scan_begin + threadIdx.x;
float fvalue =
gidx == gidx_begin ? min_fvalue : gidx_fvalue_map[gidx - 1];
GradientPairSumT left = missing_left ? bin + missing : bin;
GradientPairSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue, fidx,
GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS>
__global__ void evaluate_split_kernel(
const GradientPairSumT* d_hist, int nidx, uint64_t n_features,
DeviceNodeStats nodes, const int* d_feature_segments,
const float* d_fidx_min_map, const float* d_gidx_fvalue_map,
GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split,
ValueConstraint value_constraint, int* d_monotonic_constraints) {
typedef cub::KeyValuePair<int, float> ArgMaxT;
typedef cub::BlockScan<GradientPairSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>
BlockScanT;
typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT;
typedef cub::BlockReduce<GradientPairSumT, BLOCK_THREADS> SumReduceT;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
auto fidx = blockIdx.x;
auto constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map,
&best_split, nodes, gpu_param, &temp_storage, constraint,
value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss
d_split[fidx] = best_split;
}
}
// Find a gidx value for a given feature otherwise return -1 if not found
template <typename GidxIterT>
__device__ int BinarySearchRow(bst_uint begin, bst_uint end, GidxIterT data,
int fidx_begin, int fidx_end) {
bst_uint previous_middle = UINT32_MAX;
while (end != begin) {
auto middle = begin + (end - begin) / 2;
if (middle == previous_middle) {
break;
}
previous_middle = middle;
auto gidx = data[middle];
if (gidx >= fidx_begin && gidx < fidx_end) {
return gidx;
} else if (gidx < fidx_begin) {
begin = middle;
} else {
end = middle;
}
}
// Value is missing
return -1;
}
struct DeviceHistogram {
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
dh::DVec<GradientPairSumT> data;
int n_bins;
void Init(int device_idx, int max_nodes, int n_bins, bool silent) {
this->n_bins = n_bins;
ba.Allocate(device_idx, silent, &data, size_t(max_nodes) * size_t(n_bins));
}
void Reset() { data.Fill(GradientPairSumT()); }
GradientPairSumT* GetHistPtr(int nidx) { return data.Data() + nidx * n_bins; }
void PrintNidx(int nidx) const {
auto h_data = data.AsVector();
std::cout << "nidx " << nidx << ":\n";
for (int i = n_bins * nidx; i < n_bins * (nidx + 1); i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// index of the first element in cuts greater than v, or n if none;
// cuts are ordered, and binary search is used
__device__ int upper_bound(const float* __restrict__ cuts, int n, float v) {
if (n == 0)
return 0;
if (cuts[n - 1] <= v)
return n;
if (cuts[0] > v)
return 0;
int left = 0, right = n - 1;
while (right - left > 1) {
int middle = left + (right - left) / 2;
if (cuts[middle] > v)
right = middle;
else
left = middle;
}
return right;
}
__global__ void compress_bin_ellpack_k
(common::CompressedBufferWriter wr, common::CompressedByteT* __restrict__ buffer,
const size_t* __restrict__ row_ptrs,
const Entry* __restrict__ entries,
const float* __restrict__ cuts, const size_t* __restrict__ cut_rows,
size_t base_row, size_t n_rows, size_t row_ptr_begin, size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + size_t(blockIdx.x) * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride)
return;
int row_size = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_size) {
Entry entry = entries[row_ptrs[irow] - row_ptr_begin + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
const float *feature_cuts = &cuts[cut_rows[feature]];
int ncuts = cut_rows[feature + 1] - cut_rows[feature];
bin = upper_bound(feature_cuts, ncuts, fvalue);
if (bin >= ncuts)
bin = ncuts - 1;
bin += cut_rows[feature];
}
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
__global__ void sharedMemHistKernel(size_t row_stride,
const bst_uint* d_ridx,
common::CompressedIterator<uint32_t> d_gidx,
int null_gidx_value,
GradientPairSumT* d_node_hist,
const GradientPair* d_gpair,
size_t segment_begin,
size_t n_elements) {
extern __shared__ char smem[];
GradientPairSumT* smem_arr = reinterpret_cast<GradientPairSumT*>(smem); // NOLINT
for (auto i : dh::BlockStrideRange(0, null_gidx_value)) {
smem_arr[i] = GradientPairSumT();
}
__syncthreads();
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / row_stride + segment_begin];
int gidx = d_gidx[ridx * row_stride + idx % row_stride];
if (gidx != null_gidx_value) {
AtomicAddGpair(smem_arr + gidx, d_gpair[ridx]);
}
}
__syncthreads();
for (auto i : dh::BlockStrideRange(0, null_gidx_value)) {
AtomicAddGpair(d_node_hist + i, smem_arr[i]);
}
}
// Manage memory for a single GPU
struct DeviceShard {
struct Segment {
size_t begin;
size_t end;
Segment() : begin(0), end(0) {}
Segment(size_t begin, size_t end) : begin(begin), end(end) {
CHECK_GE(end, begin);
}
size_t Size() const { return end - begin; }
};
int device_idx;
int normalised_device_idx; // Device index counting from param.gpu_id
dh::BulkAllocator<dh::MemoryType::kDevice> ba;
dh::DVec<common::CompressedByteT> gidx_buffer;
dh::DVec<GradientPair> gpair;
dh::DVec2<bst_uint> ridx; // Row index relative to this shard
dh::DVec2<int> position;
std::vector<Segment> ridx_segments;
dh::DVec<int> feature_segments;
dh::DVec<float> gidx_fvalue_map;
dh::DVec<float> min_fvalue;
dh::DVec<int> monotone_constraints;
dh::DVec<bst_float> prediction_cache;
std::vector<GradientPair> node_sum_gradients;
dh::DVec<GradientPair> node_sum_gradients_d;
common::CompressedIterator<uint32_t> gidx;
size_t row_stride;
bst_uint row_begin_idx; // The row offset for this shard
bst_uint row_end_idx;
bst_uint n_rows;
int n_bins;
int null_gidx_value;
DeviceHistogram hist;
TrainParam param;
bool prediction_cache_initialised;
bool can_use_smem_atomics;
int64_t* tmp_pinned; // Small amount of staging memory
std::vector<cudaStream_t> streams;
dh::CubMemory temp_memory;
DeviceShard(int device_idx, int normalised_device_idx,
bst_uint row_begin, bst_uint row_end, int n_bins, TrainParam param)
: device_idx(device_idx),
normalised_device_idx(normalised_device_idx),
row_begin_idx(row_begin),
row_end_idx(row_end),
n_rows(row_end - row_begin),
n_bins(n_bins),
null_gidx_value(n_bins),
param(param),
prediction_cache_initialised(false),
can_use_smem_atomics(false) {}
void Init(const common::HistCutMatrix& hmat, const SparsePage& row_batch) {
// copy cuts to the GPU
dh::safe_cuda(cudaSetDevice(device_idx));
thrust::device_vector<float> cuts_d(hmat.cut);
thrust::device_vector<size_t> cut_row_ptrs_d(hmat.row_ptr);
// find the maximum row size
thrust::device_vector<size_t> row_ptr_d(
row_batch.offset.data() + row_begin_idx, row_batch.offset.data() + row_end_idx + 1);
auto row_iter = row_ptr_d.begin();
auto get_size = [=] __device__(size_t row) {
return row_iter[row + 1] - row_iter[row];
}; // NOLINT
auto counting = thrust::make_counting_iterator(size_t(0));
using TransformT = thrust::transform_iterator<decltype(get_size),
decltype(counting), size_t>;
TransformT row_size_iter = TransformT(counting, get_size);
row_stride = thrust::reduce(row_size_iter, row_size_iter + n_rows, 0,
thrust::maximum<size_t>());
int num_symbols =
n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
CHECK(!(param.max_leaves == 0 && param.max_depth == 0))
<< "Max leaves and max depth cannot both be unconstrained for "
"gpu_hist.";
ba.Allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes);
gidx_buffer.Fill(0);
// bin and compress entries in batches of rows
// use no more than 1/16th of GPU memory per batch
size_t gpu_batch_nrows = dh::TotalMemory(device_idx) /
(16 * row_stride * sizeof(Entry));
if (gpu_batch_nrows > n_rows) {
gpu_batch_nrows = n_rows;
}
thrust::device_vector<Entry> entries_d(gpu_batch_nrows * row_stride);
size_t gpu_nbatches = dh::DivRoundUp(n_rows, gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end = (gpu_batch + 1) * gpu_batch_nrows;
if (batch_row_end > n_rows) {
batch_row_end = n_rows;
}
size_t batch_nrows = batch_row_end - batch_row_begin;
size_t n_entries =
row_batch.offset[row_begin_idx + batch_row_end] -
row_batch.offset[row_begin_idx + batch_row_begin];
dh::safe_cuda
(cudaMemcpy
(entries_d.data().get(),
&row_batch.data[row_batch.offset[row_begin_idx + batch_row_begin]],
n_entries * sizeof(Entry), cudaMemcpyDefault));
dim3 block3(32, 8, 1);
dim3 grid3(dh::DivRoundUp(n_rows, block3.x),
dh::DivRoundUp(row_stride, block3.y), 1);
compress_bin_ellpack_k<<<grid3, block3>>>
(common::CompressedBufferWriter(num_symbols), gidx_buffer.Data(),
row_ptr_d.data().get() + batch_row_begin,
entries_d.data().get(), cuts_d.data().get(), cut_row_ptrs_d.data().get(),
batch_row_begin, batch_nrows,
row_batch.offset[row_begin_idx + batch_row_begin],
row_stride, null_gidx_value);
dh::safe_cuda(cudaGetLastError());
dh::safe_cuda(cudaDeviceSynchronize());
}
// free the memory that is no longer needed
row_ptr_d.resize(0);
row_ptr_d.shrink_to_fit();
entries_d.resize(0);
entries_d.shrink_to_fit();
gidx = common::CompressedIterator<uint32_t>(gidx_buffer.Data(), num_symbols);
// allocate the rest
int max_nodes =
param.max_leaves > 0 ? param.max_leaves * 2 : MaxNodesDepth(param.max_depth);
ba.Allocate(device_idx, param.silent,
&gpair, n_rows, &ridx, n_rows, &position, n_rows,
&prediction_cache, n_rows, &node_sum_gradients_d, max_nodes,
&feature_segments, hmat.row_ptr.size(), &gidx_fvalue_map,
hmat.cut.size(), &min_fvalue, hmat.min_val.size(),
&monotone_constraints, param.monotone_constraints.size());
gidx_fvalue_map = hmat.cut;
min_fvalue = hmat.min_val;
feature_segments = hmat.row_ptr;
monotone_constraints = param.monotone_constraints;
node_sum_gradients.resize(max_nodes);
ridx_segments.resize(max_nodes);
// check if we can use shared memory for building histograms
// (assuming atleast we need 2 CTAs per SM to maintain decent latency hiding)
auto histogram_size = sizeof(GradientPairSumT) * null_gidx_value;
auto max_smem = dh::MaxSharedMemory(device_idx);
can_use_smem_atomics = histogram_size <= max_smem;
// Init histogram
hist.Init(device_idx, max_nodes, hmat.row_ptr.back(), param.silent);
dh::safe_cuda(cudaMallocHost(&tmp_pinned, sizeof(int64_t)));
}
~DeviceShard() {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
dh::safe_cuda(cudaFreeHost(tmp_pinned));
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
void Reset(HostDeviceVector<GradientPair>* dh_gpair) {
dh::safe_cuda(cudaSetDevice(device_idx));
position.CurrentDVec().Fill(0);
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
thrust::sequence(ridx.CurrentDVec().tbegin(), ridx.CurrentDVec().tend());
std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0));
ridx_segments.front() = Segment(0, ridx.Size());
this->gpair.copy(dh_gpair->tbegin(device_idx), dh_gpair->tend(device_idx));
SubsampleGradientPair(&gpair, param.subsample, row_begin_idx);
hist.Reset();
}
void BuildHistUsingGlobalMem(int nidx) {
auto segment = ridx_segments[nidx];
auto d_node_hist = hist.GetHistPtr(nidx);
auto d_gidx = gidx;
auto d_ridx = ridx.Current();
auto d_gpair = gpair.Data();
auto row_stride = this->row_stride;
auto null_gidx_value = this->null_gidx_value;
auto n_elements = segment.Size() * row_stride;
dh::LaunchN(device_idx, n_elements, [=] __device__(size_t idx) {
int ridx = d_ridx[(idx / row_stride) + segment.begin];
int gidx = d_gidx[ridx * row_stride + idx % row_stride];
if (gidx != null_gidx_value) {
AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]);
}
});
}
void BuildHistUsingSharedMem(int nidx) {
auto segment = ridx_segments[nidx];
auto segment_begin = segment.begin;
auto d_node_hist = hist.GetHistPtr(nidx);
auto d_gidx = gidx;
auto d_ridx = ridx.Current();
auto d_gpair = gpair.Data();
auto row_stride = this->row_stride;
auto null_gidx_value = this->null_gidx_value;
auto n_elements = segment.Size() * row_stride;
const size_t smem_size = sizeof(GradientPairSumT) * null_gidx_value;
const int items_per_thread = 8;
const int block_threads = 256;
const int grid_size =
static_cast<int>(dh::DivRoundUp(n_elements,
items_per_thread * block_threads));
if (grid_size <= 0) {
return;
}
dh::safe_cuda(cudaSetDevice(device_idx));
sharedMemHistKernel<<<grid_size, block_threads, smem_size>>>
(row_stride, d_ridx, d_gidx, null_gidx_value, d_node_hist, d_gpair,
segment_begin, n_elements);
}
void BuildHist(int nidx) {
if (can_use_smem_atomics) {
BuildHistUsingSharedMem(nidx);
} else {
BuildHistUsingGlobalMem(nidx);
}
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetHistPtr(nidx_parent);
auto d_node_hist_histogram = hist.GetHistPtr(nidx_histogram);
auto d_node_hist_subtraction = hist.GetHistPtr(nidx_subtraction);
dh::LaunchN(device_idx, hist.n_bins, [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
__device__ void CountLeft(int64_t* d_count, int val, int left_nidx) {
unsigned ballot = __ballot(val == left_nidx);
if (threadIdx.x % 32 == 0) {
atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT
static_cast<unsigned long long>(__popc(ballot))); // NOLINT
}
}
void UpdatePosition(int nidx, int left_nidx, int right_nidx, int fidx,
int split_gidx, bool default_dir_left, bool is_dense,
int fidx_begin, int fidx_end) {
dh::safe_cuda(cudaSetDevice(device_idx));
temp_memory.LazyAllocate(sizeof(int64_t));
auto d_left_count = temp_memory.Pointer<int64_t>();
dh::safe_cuda(cudaMemset(d_left_count, 0, sizeof(int64_t)));
auto segment = ridx_segments[nidx];
auto d_ridx = ridx.Current();
auto d_position = position.Current();
auto d_gidx = gidx;
auto row_stride = this->row_stride;
dh::LaunchN<1, 512>(
device_idx, segment.Size(), [=] __device__(bst_uint idx) {
idx += segment.begin;
auto ridx = d_ridx[idx];
auto row_begin = row_stride * ridx;
auto row_end = row_begin + row_stride;
auto gidx = -1;
if (is_dense) {
gidx = d_gidx[row_begin + fidx];
} else {
gidx = BinarySearchRow(row_begin, row_end, d_gidx, fidx_begin,
fidx_end);
}
int position;
if (gidx >= 0) {
// Feature is found
position = gidx <= split_gidx ? left_nidx : right_nidx;
} else {
// Feature is missing
position = default_dir_left ? left_nidx : right_nidx;
}
CountLeft(d_left_count, position, left_nidx);
d_position[idx] = position;
});
dh::safe_cuda(cudaMemcpy(tmp_pinned, d_left_count, sizeof(int64_t),
cudaMemcpyDeviceToHost));
auto left_count = *tmp_pinned;
SortPosition(segment, left_nidx, right_nidx);
// dh::safe_cuda(cudaStreamSynchronize(stream));
ridx_segments[left_nidx] =
Segment(segment.begin, segment.begin + left_count);
ridx_segments[right_nidx] =
Segment(segment.begin + left_count, segment.end);
}
void SortPosition(const Segment& segment, int left_nidx, int right_nidx) {
int min_bits = 0;
int max_bits = static_cast<int>(
std::ceil(std::log2((std::max)(left_nidx, right_nidx) + 1)));
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_bytes, position.Current() + segment.begin,
position.other() + segment.begin, ridx.Current() + segment.begin,
ridx.other() + segment.begin, segment.Size(), min_bits, max_bits);
temp_memory.LazyAllocate(temp_storage_bytes);
cub::DeviceRadixSort::SortPairs(
temp_memory.d_temp_storage, temp_memory.temp_storage_bytes,
position.Current() + segment.begin, position.other() + segment.begin,
ridx.Current() + segment.begin, ridx.other() + segment.begin,
segment.Size(), min_bits, max_bits);
dh::safe_cuda(cudaMemcpy(
position.Current() + segment.begin, position.other() + segment.begin,
segment.Size() * sizeof(int), cudaMemcpyDeviceToDevice));
dh::safe_cuda(cudaMemcpy(
ridx.Current() + segment.begin, ridx.other() + segment.begin,
segment.Size() * sizeof(bst_uint), cudaMemcpyDeviceToDevice));
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_idx));
if (!prediction_cache_initialised) {
dh::safe_cuda(cudaMemcpy(
prediction_cache.Data(), out_preds_d,
prediction_cache.Size() * sizeof(bst_float), cudaMemcpyDefault));
}
prediction_cache_initialised = true;
CalcWeightTrainParam param_d(param);
dh::safe_cuda(cudaMemcpy(node_sum_gradients_d.Data(),
node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = position.Current();
auto d_ridx = ridx.Current();
auto d_node_sum_gradients = node_sum_gradients_d.Data();
auto d_prediction_cache = prediction_cache.Data();
dh::LaunchN(
device_idx, prediction_cache.Size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.Data(),
prediction_cache.Size() * sizeof(bst_float), cudaMemcpyDefault));
}
};
class GPUHistMaker : public TreeUpdater {
public:
struct ExpandEntry;
GPUHistMaker() : initialised_(false), p_last_fmat_(nullptr) {}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
param_.InitAllowUnknown(args);
CHECK(param_.n_gpus != 0) << "Must have at least one device";
n_devices_ = param_.n_gpus;
devices_ = GPUSet::Range(param_.gpu_id, dh::NDevicesAll(param_.n_gpus));
dh::CheckComputeCapability();
if (param_.grow_policy == TrainParam::kLossGuide) {
qexpand_.reset(new ExpandQueue(LossGuide));
} else {
qexpand_.reset(new ExpandQueue(DepthWise));
}
monitor_.Init("updater_gpu_hist", param_.debug_verbose);
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update", device_list_);
GradStats::CheckInfo(dmat->Info());
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (size_t i = 0; i < trees.size(); ++i) {
this->UpdateTree(gpair, dmat, trees[i]);
}
} catch (const std::exception& e) {
LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update", device_list_);
}
void InitDataOnce(DMatrix* dmat) {
info_ = &dmat->Info();
monitor_.Start("Quantiles", device_list_);
hmat_.Init(dmat, param_.max_bin);
monitor_.Stop("Quantiles", device_list_);
n_bins_ = hmat_.row_ptr.back();
int n_devices = dh::NDevices(param_.n_gpus, info_->num_row_);
bst_uint row_begin = 0;
bst_uint shard_size =
std::ceil(static_cast<double>(info_->num_row_) / n_devices);
device_list_.resize(n_devices);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
int device_idx = (param_.gpu_id + d_idx) % dh::NVisibleDevices();
device_list_[d_idx] = device_idx;
}
reducer_.Init(device_list_);
// Partition input matrix into row segments
std::vector<size_t> row_segments;
shards_.resize(n_devices);
row_segments.push_back(0);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
bst_uint row_end =
std::min(static_cast<size_t>(row_begin + shard_size), info_->num_row_);
row_segments.push_back(row_end);
row_begin = row_end;
}
monitor_.Start("BinningCompression", device_list_);
{
dmlc::DataIter<SparsePage>* iter = dmat->RowIterator();
iter->BeforeFirst();
CHECK(iter->Next()) << "Empty batches are not supported";
const SparsePage& batch = iter->Value();
// Create device shards
dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard>& shard) {
shard = std::unique_ptr<DeviceShard>
(new DeviceShard(device_list_[i], i,
row_segments[i], row_segments[i + 1], n_bins_, param_));
shard->Init(hmat_, batch);
});
CHECK(!iter->Next()) << "External memory not supported";
}
monitor_.Stop("BinningCompression", device_list_);
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const RegTree& tree) {
monitor_.Start("InitDataOnce", device_list_);
if (!initialised_) {
this->InitDataOnce(dmat);
}
monitor_.Stop("InitDataOnce", device_list_);
column_sampler_.Init(info_->num_col_, param_);
// Copy gpair & reset memory
monitor_.Start("InitDataReset", device_list_);
gpair->Reshard(devices_);
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {shard->Reset(gpair); });
monitor_.Stop("InitDataReset", device_list_);
}
void AllReduceHist(int nidx) {
reducer_.GroupStart();
for (auto& shard : shards_) {
auto d_node_hist = shard->hist.GetHistPtr(nidx);
reducer_.AllReduceSum(
shard->normalised_device_idx,
reinterpret_cast<GradientPairSumT::ValueT*>(d_node_hist),
reinterpret_cast<GradientPairSumT::ValueT*>(d_node_hist),
n_bins_ * (sizeof(GradientPairSumT) / sizeof(GradientPairSumT::ValueT)));
}
reducer_.GroupEnd();
reducer_.Synchronize();
}
void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right) {
size_t left_node_max_elements = 0;
size_t right_node_max_elements = 0;
for (auto& shard : shards_) {
left_node_max_elements = (std::max)(
left_node_max_elements, shard->ridx_segments[nidx_left].Size());
right_node_max_elements = (std::max)(
right_node_max_elements, shard->ridx_segments[nidx_right].Size());
}
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
if (right_node_max_elements < left_node_max_elements) {
build_hist_nidx = nidx_right;
subtraction_trick_nidx = nidx_left;
}
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->BuildHist(build_hist_nidx);
});
this->AllReduceHist(build_hist_nidx);
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->SubtractionTrick(nidx_parent, build_hist_nidx,
subtraction_trick_nidx);
});
}
// Returns best loss
std::vector<DeviceSplitCandidate> EvaluateSplits(
const std::vector<int>& nidx_set, RegTree* p_tree) {
auto columns = info_->num_col_;
std::vector<DeviceSplitCandidate> best_splits(nidx_set.size());
std::vector<DeviceSplitCandidate> candidate_splits(nidx_set.size() *
columns);
// Use first device
auto& shard = shards_.front();
dh::safe_cuda(cudaSetDevice(shard->device_idx));
shard->temp_memory.LazyAllocate(sizeof(DeviceSplitCandidate) * columns *
nidx_set.size());
auto d_split = shard->temp_memory.Pointer<DeviceSplitCandidate>();
auto& streams = shard->GetStreams(static_cast<int>(nidx_set.size()));
// Use streams to process nodes concurrently
for (auto i = 0; i < nidx_set.size(); i++) {
auto nidx = nidx_set[i];
DeviceNodeStats node(shard->node_sum_gradients[nidx], nidx, param_);
const int BLOCK_THREADS = 256;
evaluate_split_kernel<BLOCK_THREADS>
<<<uint32_t(columns), BLOCK_THREADS, 0, streams[i]>>>(
shard->hist.GetHistPtr(nidx), nidx, info_->num_col_, node,
shard->feature_segments.Data(), shard->min_fvalue.Data(),
shard->gidx_fvalue_map.Data(), GPUTrainingParam(param_),
d_split + i * columns, node_value_constraints_[nidx],
shard->monotone_constraints.Data());
}
dh::safe_cuda(
cudaMemcpy(candidate_splits.data(), shard->temp_memory.d_temp_storage,
sizeof(DeviceSplitCandidate) * columns * nidx_set.size(),
cudaMemcpyDeviceToHost));
for (auto i = 0; i < nidx_set.size(); i++) {
auto nidx = nidx_set[i];
DeviceSplitCandidate nidx_best;
for (auto fidx = 0; fidx < columns; fidx++) {
auto& candidate = candidate_splits[i * columns + fidx];
if (column_sampler_.ColumnUsed(candidate.findex,
p_tree->GetDepth(nidx))) {
nidx_best.Update(candidate_splits[i * columns + fidx], param_);
}
}
best_splits[i] = nidx_best;
}
return std::move(best_splits);
}
void InitRoot(RegTree* p_tree) {
auto root_nidx = 0;
// Sum gradients
std::vector<GradientPair> tmp_sums(shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard>& shard) {
dh::safe_cuda(cudaSetDevice(shard->device_idx));
tmp_sums[i] =
dh::SumReduction(shard->temp_memory, shard->gpair.Data(),
shard->gpair.Size());
});
auto sum_gradient =
std::accumulate(tmp_sums.begin(), tmp_sums.end(), GradientPair());
// Generate root histogram
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->BuildHist(root_nidx);
});
this->AllReduceHist(root_nidx);
// Remember root stats
p_tree->Stat(root_nidx).sum_hess = sum_gradient.GetHess();
auto weight = CalcWeight(param_, sum_gradient);
p_tree->Stat(root_nidx).base_weight = weight;
(*p_tree)[root_nidx].SetLeaf(param_.learning_rate * weight);
// Store sum gradients
for (auto& shard : shards_) {
shard->node_sum_gradients[root_nidx] = sum_gradient;
}
// Initialise root constraint
node_value_constraints_.resize(p_tree->GetNodes().size());
// Generate first split
auto splits = this->EvaluateSplits({root_nidx}, p_tree);
qexpand_->push(
ExpandEntry(root_nidx, p_tree->GetDepth(root_nidx), splits.front(), 0));
}
void UpdatePosition(const ExpandEntry& candidate, RegTree* p_tree) {
auto nidx = candidate.nid;
auto left_nidx = (*p_tree)[nidx].LeftChild();
auto right_nidx = (*p_tree)[nidx].RightChild();
// convert floating-point split_pt into corresponding bin_id
// split_cond = -1 indicates that split_pt is less than all known cut points
auto split_gidx = -1;
auto fidx = candidate.split.findex;
auto default_dir_left = candidate.split.dir == kLeftDir;
auto fidx_begin = hmat_.row_ptr[fidx];
auto fidx_end = hmat_.row_ptr[fidx + 1];
for (auto i = fidx_begin; i < fidx_end; ++i) {
if (candidate.split.fvalue == hmat_.cut[i]) {
split_gidx = static_cast<int32_t>(i);
}
}
auto is_dense = info_->num_nonzero_ == info_->num_row_ * info_->num_col_;
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->UpdatePosition(nidx, left_nidx, right_nidx, fidx,
split_gidx, default_dir_left,
is_dense, fidx_begin, fidx_end);
});
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
// Add new leaves
RegTree& tree = *p_tree;
tree.AddChilds(candidate.nid);
auto& parent = tree[candidate.nid];
parent.SetSplit(candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir);
tree.Stat(candidate.nid).loss_chg = candidate.split.loss_chg;
// Set up child constraints
node_value_constraints_.resize(tree.GetNodes().size());
GradStats left_stats(param_);
left_stats.Add(candidate.split.left_sum);
GradStats right_stats(param_);
right_stats.Add(candidate.split.right_sum);
node_value_constraints_[candidate.nid].SetChild(
param_, parent.SplitIndex(), left_stats, right_stats,
&node_value_constraints_[parent.LeftChild()],
&node_value_constraints_[parent.RightChild()]);
// Configure left child
auto left_weight =
node_value_constraints_[parent.LeftChild()].CalcWeight(param_, left_stats);
tree[parent.LeftChild()].SetLeaf(left_weight * param_.learning_rate, 0);
tree.Stat(parent.LeftChild()).base_weight = left_weight;
tree.Stat(parent.LeftChild()).sum_hess = candidate.split.left_sum.GetHess();
// Configure right child
auto right_weight =
node_value_constraints_[parent.RightChild()].CalcWeight(param_, right_stats);
tree[parent.RightChild()].SetLeaf(right_weight * param_.learning_rate, 0);
tree.Stat(parent.RightChild()).base_weight = right_weight;
tree.Stat(parent.RightChild()).sum_hess = candidate.split.right_sum.GetHess();
// Store sum gradients
for (auto& shard : shards_) {
shard->node_sum_gradients[parent.LeftChild()] = candidate.split.left_sum;
shard->node_sum_gradients[parent.RightChild()] = candidate.split.right_sum;
}
this->UpdatePosition(candidate, p_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
// Temporarily store number of threads so we can change it back later
int nthread = omp_get_max_threads();
auto& tree = *p_tree;
monitor_.Start("InitData", device_list_);
this->InitData(gpair, p_fmat, *p_tree);
monitor_.Stop("InitData", device_list_);
monitor_.Start("InitRoot", device_list_);
this->InitRoot(p_tree);
monitor_.Stop("InitRoot", device_list_);
auto timestamp = qexpand_->size();
auto num_leaves = 1;
while (!qexpand_->empty()) {
auto candidate = qexpand_->top();
qexpand_->pop();
if (!candidate.IsValid(param_, num_leaves)) continue;
// std::cout << candidate;
monitor_.Start("ApplySplit", device_list_);
this->ApplySplit(candidate, p_tree);
monitor_.Stop("ApplySplit", device_list_);
num_leaves++;
auto left_child_nidx = tree[candidate.nid].LeftChild();
auto right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param_, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor_.Start("BuildHist", device_list_);
this->BuildHistLeftRight(candidate.nid, left_child_nidx,
right_child_nidx);
monitor_.Stop("BuildHist", device_list_);
monitor_.Start("EvaluateSplits", device_list_);
auto splits =
this->EvaluateSplits({left_child_nidx, right_child_nidx}, p_tree);
qexpand_->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits[0],
timestamp++));
qexpand_->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx), splits[1],
timestamp++));
monitor_.Stop("EvaluateSplits", device_list_);
}
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
monitor_.Start("UpdatePredictionCache", device_list_);
if (shards_.empty() || p_last_fmat_ == nullptr || p_last_fmat_ != data)
return false;
p_out_preds->Reshard(devices_);
dh::ExecuteShards(&shards_, [&](std::unique_ptr<DeviceShard>& shard) {
shard->UpdatePredictionCache(p_out_preds->DevicePointer(shard->device_idx));
});
monitor_.Stop("UpdatePredictionCache", device_list_);
return true;
}
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry(int nid, int depth, const DeviceSplitCandidate& split,
uint64_t timestamp)
: nid(nid), depth(depth), split(split), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0)
return false;
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth,
int num_leaves) {
if (param.max_depth > 0 && depth == param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(ExpandEntry lhs, ExpandEntry rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
TrainParam param_;
common::HistCutMatrix hmat_;
common::GHistIndexMatrix gmat_;
MetaInfo* info_;
bool initialised_;
int n_devices_;
int n_bins_;
std::vector<std::unique_ptr<DeviceShard>> shards_;
ColumnSampler column_sampler_;
typedef std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>
ExpandQueue;
std::unique_ptr<ExpandQueue> qexpand_;
common::Monitor monitor_;
dh::AllReducer reducer_;
std::vector<ValueConstraint> node_value_constraints_;
std::vector<int> device_list_;
DMatrix* p_last_fmat_;
GPUSet devices_;
};
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
} // namespace tree
} // namespace xgboost
|
4030dc8c329b65e3f606cb8d117236f44555cae2.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <ctime>
#include <algorithm>
#include <vector>
#include <numeric>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <helper_timer.h>
#include "naive_kernel.hu"
#include "batchrng_kernel.hu"
#include "misc.hu"
#include "cudaResourceWrapper.hu"
double compute_naivest(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread);
double compute_naive(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread);
double compute_batchrng(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread,
hipDeviceProp_t *const deviceProp);
int main (int argc, char ** argv)
{
unsigned int kernel = 0;
double piest;
hipDeviceProp_t deviceProp;
int device = 0;
unsigned int iterationsPerThread = 1000 * 1000;
dim3 grid = 16;
dim3 block = 64;
parseArgs(argc, argv, &iterationsPerThread, &deviceProp,
&grid.x, &block.x, &kernel, &device);
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
switch (kernel) {
case 0:
piest = compute_naivest(grid, block, device, iterationsPerThread);
break;
case 1:
piest = compute_naive(grid, block, device, iterationsPerThread);
break;
case 2:
piest = compute_batchrng(grid, block, device, iterationsPerThread,
&deviceProp);
break;
}
sdkStopTimer(&timer);
float elapsedTime = sdkGetAverageTimerValue(&timer)/1000.0f;
reportResults(piest, iterationsPerThread, grid.x, block.x, &deviceProp, elapsedTime);
return 0;
}
double compute_naivest(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread)
{
handleCudaErrors(hipSetDevice(device));
CudaResWrapper<hiprandState_t> d_rngStates(grid.x * block.x);
CudaResWrapper<float> d_res(grid.x);
hipLaunchKernelGGL(( initRNG), dim3(grid), dim3(block), 0, 0, d_rngStates.getPtr(), time(NULL));
hipLaunchKernelGGL(( naivest_kernel), dim3(grid), dim3(block), block.x * sizeof(unsigned int), 0,
d_res.getPtr(), d_rngStates.getPtr(), iterationsperThread);
std::vector<float> res(grid.x);
handleCudaErrors(hipMemcpy(&res[0], d_res.getPtr(), grid.x * sizeof(float),
hipMemcpyDeviceToHost));
double estimate = std::accumulate(res.begin(), res.end(), 0.0);
return estimate;
}
double compute_naive(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread)
{
handleCudaErrors(hipSetDevice(device));
CudaResWrapper<hiprandState_t> d_rngStates(grid.x * block.x);
CudaResWrapper<float> d_res(grid.x);
hipLaunchKernelGGL(( initRNG), dim3(grid), dim3(block), 0, 0, d_rngStates.getPtr(), time(NULL));
hipLaunchKernelGGL(( naive_kernel), dim3(grid), dim3(block), block.x * sizeof(unsigned int), 0,
d_res.getPtr(), d_rngStates.getPtr(), iterationsperThread);
std::vector<float> res(grid.x);
handleCudaErrors(hipMemcpy(&res[0], d_res.getPtr(), grid.x * sizeof(float),
hipMemcpyDeviceToHost));
double estimate = std::accumulate(res.begin(), res.end(), 0.0);
return estimate;
}
double compute_batchrng(dim3 grid, dim3 block, unsigned int device,
unsigned int its,
hipDeviceProp_t *const deviceProp)
{
//Set up the RNG
hiprandGenerator_t generator;
hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(generator, time(NULL));
//For partial results
CudaResWrapper<float> d_res(grid.x);
//To calculate the final result
double runningEstimate = 0;
//Random number vector allocation strategy
unsigned int numThreads = grid.x * block.x;
//Total size of *1* vector, I need *2*
unsigned long int totalSize = sizeof(float) * its * numThreads;
//Get device's free and total global memory
size_t freeMem = 0;
size_t totalMem = 0;
handleCudaErrors(hipMemGetInfo(&freeMem, &totalMem));
size_t vecSize = 0;
//Allocate everything at once if we can get away with it
if (2 * totalSize <= freeMem * 0.9) {
vecSize = totalSize;
}
else {
//Spare 10% of the device's free memory(not because this program will likely need it, but because I have only one GPU and I don't feel like locking my system)
vecSize = static_cast<unsigned int>(freeMem * 0.9 / 2 );
}
size_t vecCount = vecSize / sizeof(float);
size_t remainSize = totalSize;
CudaResWrapper<float> d_angleVec(vecCount);
CudaResWrapper<float> d_distVec(vecCount);
unsigned int numRuns = 0;
std::vector<float> res(grid.x);
//Here we go!
while (remainSize > sizeof(float)) {
numRuns++;
if (remainSize < vecSize) {
vecCount = remainSize / sizeof(float);
}
hiprandGenerateUniform(generator, d_angleVec.getPtr(), vecCount);
hiprandGenerateUniform(generator, d_distVec.getPtr(), vecCount);
hipLaunchKernelGGL(( batchrng_kernel), dim3(grid), dim3(block), block.x * sizeof(float), 0,
d_res.getPtr(), d_angleVec.getPtr(), d_distVec.getPtr(), vecCount);
handleCudaErrors(hipMemcpy(&res[0], d_res.getPtr(),
grid.x * sizeof(float),
hipMemcpyDeviceToHost));
runningEstimate += std::accumulate(res.begin(), res.end(), 0.0);
if (remainSize > vecSize) {
remainSize -= vecSize;
}
else {
break;
}
}
return runningEstimate / numRuns;
}
| 4030dc8c329b65e3f606cb8d117236f44555cae2.cu | #include <iostream>
#include <ctime>
#include <algorithm>
#include <vector>
#include <numeric>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <helper_timer.h>
#include "naive_kernel.hu"
#include "batchrng_kernel.hu"
#include "misc.hu"
#include "cudaResourceWrapper.hu"
double compute_naivest(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread);
double compute_naive(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread);
double compute_batchrng(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread,
cudaDeviceProp *const deviceProp);
int main (int argc, char ** argv)
{
unsigned int kernel = 0;
double piest;
cudaDeviceProp deviceProp;
int device = 0;
unsigned int iterationsPerThread = 1000 * 1000;
dim3 grid = 16;
dim3 block = 64;
parseArgs(argc, argv, &iterationsPerThread, &deviceProp,
&grid.x, &block.x, &kernel, &device);
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
switch (kernel) {
case 0:
piest = compute_naivest(grid, block, device, iterationsPerThread);
break;
case 1:
piest = compute_naive(grid, block, device, iterationsPerThread);
break;
case 2:
piest = compute_batchrng(grid, block, device, iterationsPerThread,
&deviceProp);
break;
}
sdkStopTimer(&timer);
float elapsedTime = sdkGetAverageTimerValue(&timer)/1000.0f;
reportResults(piest, iterationsPerThread, grid.x, block.x, &deviceProp, elapsedTime);
return 0;
}
double compute_naivest(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread)
{
handleCudaErrors(cudaSetDevice(device));
CudaResWrapper<curandState> d_rngStates(grid.x * block.x);
CudaResWrapper<float> d_res(grid.x);
initRNG<<<grid, block>>>(d_rngStates.getPtr(), time(NULL));
naivest_kernel<<<grid, block, block.x * sizeof(unsigned int)>>>
(d_res.getPtr(), d_rngStates.getPtr(), iterationsperThread);
std::vector<float> res(grid.x);
handleCudaErrors(cudaMemcpy(&res[0], d_res.getPtr(), grid.x * sizeof(float),
cudaMemcpyDeviceToHost));
double estimate = std::accumulate(res.begin(), res.end(), 0.0);
return estimate;
}
double compute_naive(dim3 grid, dim3 block, unsigned int device,
unsigned int iterationsperThread)
{
handleCudaErrors(cudaSetDevice(device));
CudaResWrapper<curandState> d_rngStates(grid.x * block.x);
CudaResWrapper<float> d_res(grid.x);
initRNG<<<grid, block>>>(d_rngStates.getPtr(), time(NULL));
naive_kernel<<<grid, block, block.x * sizeof(unsigned int)>>>
(d_res.getPtr(), d_rngStates.getPtr(), iterationsperThread);
std::vector<float> res(grid.x);
handleCudaErrors(cudaMemcpy(&res[0], d_res.getPtr(), grid.x * sizeof(float),
cudaMemcpyDeviceToHost));
double estimate = std::accumulate(res.begin(), res.end(), 0.0);
return estimate;
}
double compute_batchrng(dim3 grid, dim3 block, unsigned int device,
unsigned int its,
cudaDeviceProp *const deviceProp)
{
//Set up the RNG
curandGenerator_t generator;
curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(generator, time(NULL));
//For partial results
CudaResWrapper<float> d_res(grid.x);
//To calculate the final result
double runningEstimate = 0;
//Random number vector allocation strategy
unsigned int numThreads = grid.x * block.x;
//Total size of *1* vector, I need *2*
unsigned long int totalSize = sizeof(float) * its * numThreads;
//Get device's free and total global memory
size_t freeMem = 0;
size_t totalMem = 0;
handleCudaErrors(cudaMemGetInfo(&freeMem, &totalMem));
size_t vecSize = 0;
//Allocate everything at once if we can get away with it
if (2 * totalSize <= freeMem * 0.9) {
vecSize = totalSize;
}
else {
//Spare 10% of the device's free memory(not because this program will likely need it, but because I have only one GPU and I don't feel like locking my system)
vecSize = static_cast<unsigned int>(freeMem * 0.9 / 2 );
}
size_t vecCount = vecSize / sizeof(float);
size_t remainSize = totalSize;
CudaResWrapper<float> d_angleVec(vecCount);
CudaResWrapper<float> d_distVec(vecCount);
unsigned int numRuns = 0;
std::vector<float> res(grid.x);
//Here we go!
while (remainSize > sizeof(float)) {
numRuns++;
if (remainSize < vecSize) {
vecCount = remainSize / sizeof(float);
}
curandGenerateUniform(generator, d_angleVec.getPtr(), vecCount);
curandGenerateUniform(generator, d_distVec.getPtr(), vecCount);
batchrng_kernel<<<grid, block, block.x * sizeof(float)>>>
(d_res.getPtr(), d_angleVec.getPtr(), d_distVec.getPtr(), vecCount);
handleCudaErrors(cudaMemcpy(&res[0], d_res.getPtr(),
grid.x * sizeof(float),
cudaMemcpyDeviceToHost));
runningEstimate += std::accumulate(res.begin(), res.end(), 0.0);
if (remainSize > vecSize) {
remainSize -= vecSize;
}
else {
break;
}
}
return runningEstimate / numRuns;
}
|
253606236cf9e0e2c9eddedf6a6bffa8ceebacf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgetf2.cu normal z -> c, Tue Sep 2 12:38:17 2014
*/
#include "common_magma.h"
#define PRECISION_c
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
#define cswap_bs 64
//#if (GPUSHMEM < 200)
#define cgeru_bs 512 // 512 is max threads for 1.x cards
//#else
//#define cgeru_bs 1024
//#endif
void magma_cswap(
magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx);
void magma_cscal_cgeru(
magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda);
/**
CGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
A COMPLEX array, dimension (LDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_cgesv_aux
********************************************************************/
extern "C" magma_int_t
magma_cgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda,
magma_int_t *ipiv,
magma_int_t* info )
{
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > cgeru_bs) {
*info = -2;
} else if (lda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for( j=0; j < min_mn; j++ ) {
hipDeviceSetCacheConfig( hipFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_icamax(m-j, A(j,j), 1);
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of A since it is on GPU
//if ( A(jp, j) != 0.0) {
hipDeviceSetCacheConfig( hipFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_cswap(n, A, j, jp, lda);
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_cscal_cgeru(m-j, n-j, A(j, j), lda);
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
__global__
void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx)
{
int id = blockIdx.x * cswap_bs + threadIdx.x;
if (id < n) {
magmaFloatComplex tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
void magma_cswap(magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx)
{
/*
cswap two row vectors: ith and jth
*/
dim3 threads(cswap_bs, 1, 1);
int num_blocks = (n - 1)/cswap_bs + 1;
dim3 grid(num_blocks,1);
hipLaunchKernelGGL(( kernel_cswap), dim3(grid), dim3(threads), 0, magma_stream , n, x, i, j, incx);
}
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaFloatComplex shared_data[];
__global__
void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda)
{
magmaFloatComplex *shared_y = shared_data;
int tid = blockIdx.x * cgeru_bs + threadIdx.x;
magmaFloatComplex reg = MAGMA_C_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for(int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg;
}
}
}
void magma_cscal_cgeru(magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda)
{
/*
Specialized kernel which merged cscal and cgeru the two kernels
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads(cgeru_bs, 1, 1);
int num_blocks = (m - 1)/cgeru_bs + 1;
dim3 grid(num_blocks,1);
size_t shared_size = sizeof(magmaFloatComplex)*(n);
hipLaunchKernelGGL(( kernel_cscal_cgeru), dim3(grid), dim3(threads), shared_size, magma_stream, m, n, A, lda);
}
| 253606236cf9e0e2c9eddedf6a6bffa8ceebacf6.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgetf2.cu normal z -> c, Tue Sep 2 12:38:17 2014
*/
#include "common_magma.h"
#define PRECISION_c
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
#define cswap_bs 64
//#if (GPUSHMEM < 200)
#define cgeru_bs 512 // 512 is max threads for 1.x cards
//#else
//#define cgeru_bs 1024
//#endif
void magma_cswap(
magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx);
void magma_cscal_cgeru(
magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda);
/**
CGETF2 computes an LU factorization of a general m-by-n matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 2 BLAS version of the algorithm.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0 and N <= 1024.
On CUDA architecture 1.x cards, N <= 512.
@param[in,out]
A COMPLEX array, dimension (LDA,N)
On entry, the m by n matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
ipiv INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -k, the k-th argument had an illegal value
- > 0: if INFO = k, U(k,k) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@ingroup magma_cgesv_aux
********************************************************************/
extern "C" magma_int_t
magma_cgetf2_gpu(
magma_int_t m, magma_int_t n,
magmaFloatComplex *A, magma_int_t lda,
magma_int_t *ipiv,
magma_int_t* info )
{
*info = 0;
if (m < 0) {
*info = -1;
} else if (n < 0 || n > cgeru_bs) {
*info = -2;
} else if (lda < max(1,m)) {
*info = -4;
}
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return *info;
}
// Quick return if possible
if (m == 0 || n == 0) {
return *info;
}
magma_int_t min_mn = min(m, n);
magma_int_t j, jp;
for( j=0; j < min_mn; j++ ) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferShared );
// Find pivot and test for singularity.
jp = j - 1 + magma_icamax(m-j, A(j,j), 1);
ipiv[j] = jp + 1; // ipiv uses Fortran one-based index
// Can't check value of A since it is on GPU
//if ( A(jp, j) != 0.0) {
cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 );
// Apply the interchange to columns 1:N.
if (jp != j) {
magma_cswap(n, A, j, jp, lda);
}
// Compute elements J+1:M of J-th column.
if (j < m) {
magma_cscal_cgeru(m-j, n-j, A(j, j), lda);
}
//}
//else if (*info == 0) {
// *info = j;
//}
}
return *info;
}
__global__
void kernel_cswap(int n, magmaFloatComplex *x, int i, int j, int incx)
{
int id = blockIdx.x * cswap_bs + threadIdx.x;
if (id < n) {
magmaFloatComplex tmp = x[i + incx*id];
x[i + incx*id] = x[j + incx*id];
x[j + incx*id] = tmp;
}
}
void magma_cswap(magma_int_t n, magmaFloatComplex *x, magma_int_t i, magma_int_t j, magma_int_t incx)
{
/*
cswap two row vectors: ith and jth
*/
dim3 threads(cswap_bs, 1, 1);
int num_blocks = (n - 1)/cswap_bs + 1;
dim3 grid(num_blocks,1);
kernel_cswap<<< grid, threads, 0, magma_stream >>>(n, x, i, j, incx);
}
// dynamically allocated shared memory, set to size n when the kernel is launched.
// See CUDA Guide B.2.3
extern __shared__ magmaFloatComplex shared_data[];
__global__
void kernel_cscal_cgeru(int m, int n, magmaFloatComplex *A, int lda)
{
magmaFloatComplex *shared_y = shared_data;
int tid = blockIdx.x * cgeru_bs + threadIdx.x;
magmaFloatComplex reg = MAGMA_C_ZERO;
if (threadIdx.x < n) {
shared_y[threadIdx.x] = A[lda * threadIdx.x];
}
__syncthreads();
if (tid < m && tid > 0) {
reg = A[tid];
reg *= MAGMA_C_DIV(MAGMA_C_ONE, shared_y[0]);
A[tid] = reg;
#pragma unroll
for(int i=1; i < n; i++) {
A[tid + i*lda] += (MAGMA_C_NEG_ONE) * shared_y[i] * reg;
}
}
}
void magma_cscal_cgeru(magma_int_t m, magma_int_t n, magmaFloatComplex *A, magma_int_t lda)
{
/*
Specialized kernel which merged cscal and cgeru the two kernels
1) cscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a cgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
dim3 threads(cgeru_bs, 1, 1);
int num_blocks = (m - 1)/cgeru_bs + 1;
dim3 grid(num_blocks,1);
size_t shared_size = sizeof(magmaFloatComplex)*(n);
kernel_cscal_cgeru<<< grid, threads, shared_size, magma_stream>>>(m, n, A, lda);
}
|
0636f486a333ad0eaf00a37440397ea2b6dd57f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _MATMUL_KERNEL_H_
#define _MATMUL_KERNEL_H_
#include <stdio.h>
#include "matrix.h"
#include "math.h"
////////////////////////UTILITY FUNCTIONS////////////////////////
__device__ float mat_get(Matrix& m, int y, int x)
{
return m.elements[y * m.width + x];
}
__device__ void mat_set(Matrix& m, int y, int x, float value)
{
m.elements[y * m.width + x] = value;
}
////////////////////GPU-BASED IMPLEMENTATION//////////////////////
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc((void**)&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
// Load B to device memory
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc((void**)&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc((void**)&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(min(B.width, 16), min(A.height, 16));
dim3 dimGrid((int)ceil(1.0 * B.width / dimBlock.x), (int)ceil(1.0 * A.height / dimBlock.y));
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
// Read C from device memory
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
}
// Matrix multiplication kernel called by MatrixMul()
// also see comments to Playground.Conflux\SampleKernels\MatMulKernel.cs
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (A.height <= row || B.width <= col) return;
float Cvalue = 0;
for (int dim = 0; dim < A.width; ++dim)
{
Cvalue += mat_get(A, row, dim) * mat_get(B, dim, col);
}
mat_set(C, row, col, Cvalue);
}
#endif | 0636f486a333ad0eaf00a37440397ea2b6dd57f4.cu | #ifndef _MATMUL_KERNEL_H_
#define _MATMUL_KERNEL_H_
#include <stdio.h>
#include "matrix.h"
#include "math.h"
////////////////////////UTILITY FUNCTIONS////////////////////////
__device__ float mat_get(Matrix& m, int y, int x)
{
return m.elements[y * m.width + x];
}
__device__ void mat_set(Matrix& m, int y, int x, float value)
{
m.elements[y * m.width + x] = value;
}
////////////////////GPU-BASED IMPLEMENTATION//////////////////////
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void**)&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
// Load B to device memory
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void**)&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc((void**)&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(min(B.width, 16), min(A.height, 16));
dim3 dimGrid((int)ceil(1.0 * B.width / dimBlock.x), (int)ceil(1.0 * A.height / dimBlock.y));
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
}
// Matrix multiplication kernel called by MatrixMul()
// also see comments to Playground.Conflux\SampleKernels\MatMulKernel.cs
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (A.height <= row || B.width <= col) return;
float Cvalue = 0;
for (int dim = 0; dim < A.width; ++dim)
{
Cvalue += mat_get(A, row, dim) * mat_get(B, dim, col);
}
mat_set(C, row, col, Cvalue);
}
#endif |
9a3b708ef3f4a92dce534b54ce44498d9ba13696.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include "helper_math.h"
#include "math_constants.h"
#include "poiseuilleFlowKernel.cuh"
#if USE_TEX
texture<float4, 1, hipReadModeElementType> oldPosTex;
texture<float4, 1, hipReadModeElementType> oldVelTex;
texture<float4, 1, hipReadModeElementType> oldMeasuresTex;
texture<uint, 1, hipReadModeElementType> gridParticleHashTex;
texture<uint, 1, hipReadModeElementType> cellStartTex;
texture<uint, 1, hipReadModeElementType> cellEndTex;
#endif
__constant__ PoiseuilleParams params;
__device__ int3 calcGridPos(float3 p){
int3 gridPos;
gridPos.x = floor((p.x - params.worldOrigin.x) * 0.5f / params.particleRadius);
gridPos.y = floor((p.y - params.worldOrigin.y) * 0.5f / params.particleRadius);
gridPos.z = floor((p.z - params.worldOrigin.z) * 0.5f / params.particleRadius);
return gridPos;
}
__device__ uint calcGridHash(int3 gridPos){
gridPos.x = gridPos.x & (params.gridSize.x-1);
gridPos.y = gridPos.y & (params.gridSize.y-1);
gridPos.z = gridPos.z & (params.gridSize.z-1);
return __umul24(__umul24(gridPos.z, params.gridSize.y), params.gridSize.x) + __umul24(gridPos.y, params.gridSize.x) + gridPos.x;
}
__global__ void calculatePoiseuilleHashD(
uint* gridParticleHash, // output
uint* gridParticleIndex, // output
float4* pos, // input
uint numParticles){
uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
volatile float4 p = pos[index];
int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z));
uint hash = calcGridHash(gridPos);
gridParticleHash[index] = hash;
gridParticleIndex[index] = index;
}
__global__ void reorderPoiseuilleDataD(
uint* cellStart, // output
uint* cellEnd, // output
float4* sortedPos, // output
float4* sortedVel, // output
uint * gridParticleHash, // input
uint * gridParticleIndex,// input
float4* oldPos, // input
float4* oldVel, // input
uint numParticles){
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint hash;
if (index < numParticles) {
hash = gridParticleHash[index];
sharedHash[threadIdx.x+1] = hash;
if (index > 0 && threadIdx.x == 0)
{
sharedHash[0] = gridParticleHash[index-1];
}
}
__syncthreads();
if (index < numParticles) {
if (index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)
cellEnd[sharedHash[threadIdx.x]] = index;
}
if (index == numParticles - 1)
{
cellEnd[hash] = index + 1;
}
uint sortedIndex = gridParticleIndex[index];
float4 pos = FETCH(oldPos, sortedIndex);
float4 vel = FETCH(oldVel, sortedIndex);
sortedPos[index] = pos;
sortedVel[index] = vel;
}
}
__device__ float sumParticlesInDomain(
int3 gridPos,
uint index,
float4 pos,
float4* oldPos,
float4 vel,
float4* oldVel,
float4* measures,
uint* cellStart,
uint* cellEnd){
uint gridHash = calcGridHash(gridPos);
uint startIndex = FETCH(cellStart, gridHash);
float sum = 0.0f;
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = FETCH(cellEnd, gridHash);
for(uint j=startIndex; j<endIndex; j++) {
float4 pos2 = FETCH(oldPos, j);
float4 vel2 = FETCH(oldVel, j);
float density2 = measures[j].x;
float temp = 0.0f;
float worldXSize= params.gridSize.x * 2.0f * params.particleRadius;
float3 relPos = make_float3(pos - pos2);
if(gridPos.x < 0)
relPos = make_float3(pos.x - (pos2.x - worldXSize),pos.y - pos2.y,pos.z - pos2.z);
else
if(gridPos.x > params.gridSize.x - 1)
relPos = make_float3(pos.x - (pos2.x + worldXSize),pos.y - pos2.y,pos.z - pos2.z);
float dist = length(relPos);
float q = dist / params.smoothingRadius;
float coeff = 7.0f / 4 / CUDART_PI_F / powf(params.smoothingRadius, 2);
if(q < 2){
sum += coeff *(powf(1 - 0.5f * q, 4) * (2 * q + 1));
}
}
}
return sum;
}
__global__ void calculatePoiseuilleDensityD(
float4* measures, //output
float4* oldPos, //input
float4* oldVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles){
uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
float4 pos = FETCH(oldPos, index);
float4 vel = FETCH(oldVel, index);
if(pos.w == 1.0f){
measures[index].x = params.restDensity;
measures[index].y = powf(params.soundspeed, 2) * params.restDensity;
return;
}
int3 gridPos = calcGridPos(make_float3(pos));
float sum = 0.0f;
for(int z=-params.cellcount; z<=params.cellcount; z++) {
for(int y=-params.cellcount; y<=params.cellcount; y++) {
for(int x=-params.cellcount; x<=params.cellcount; x++) {
int3 neighbourPos = gridPos + make_int3(x, y, z);
sum += sumParticlesInDomain(
neighbourPos,
index,
pos,
oldPos,
vel,
oldVel,
measures,
cellStart,
cellEnd);
}
}
}
float dens = sum * params.particleMass;
measures[index].x = dens;
measures[index].y = powf(params.soundspeed, 2) * dens;
}
__device__ float4 getVelocityDiff(
float4 iVelocity,
float4 iPosition,
float4 jVelocity,
float4 jPosition)
{
float bottomBoundary = params.worldOrigin.y + params.boundaryOffset * 2.0f * params.particleRadius;
float topBoundary = bottomBoundary + params.fluidParticlesSize.y * 2.0f * params.particleRadius;
if((jPosition.w == 1.0f) && (jPosition.y > topBoundary))
{
float distanceA = topBoundary - iPosition.y;
float distanceB = jPosition.y - topBoundary;
float beta = fmin(1000.0f, 1 + distanceB / distanceA);
return beta * iVelocity;
}
if((jPosition.w == 1.0f) && (jPosition.y < bottomBoundary))
{
float distanceA = iPosition.y - bottomBoundary;
float distanceB = bottomBoundary - jPosition.y;
float beta = fmin(1000.0f, 1 + distanceB / distanceA);
return beta * iVelocity;
}
return iVelocity - jVelocity;
}
__device__ float3 sumNavierStokesForces(
int3 gridPos,
uint index,
float4 pos,
float4* oldPos,
float4 vel,
float4* oldVel,
float density,
float pressure,
float4* oldMeasures,
uint* cellStart,
uint* cellEnd){
uint gridHash = calcGridHash(gridPos);
uint startIndex = FETCH(cellStart, gridHash);
float3 tmpForce = make_float3(0.0f);
float texp = 0.0f;
float pexp = 0.0f;
if (startIndex != 0xffffffff) {
uint endIndex = FETCH(cellEnd, gridHash);
for(uint j=startIndex; j<endIndex; j++) {
if (j != index) {
float4 pos2 = FETCH(oldPos, j);
float4 vel2 = FETCH(oldVel, j);
float4 measure = FETCH(oldMeasures, j);
float density2 = measure.x;
float pressure2 = measure.y;
float tempExpr = 0.0f;
float worldXSize= params.gridSize.x * 2.0f * params.particleRadius;
float3 relPos = make_float3(pos - pos2);
if(gridPos.x < 0)
relPos = make_float3(pos) - make_float3(pos2.x - worldXSize, pos2.y, pos2.z);
else
if(gridPos.x > params.gridSize.x - 1)
relPos = make_float3(pos) - make_float3(pos2.x + worldXSize, pos2.y, pos2.z);
float dist = length(relPos);
float q = dist / params.smoothingRadius;
float coeff = 7.0f / 2 / CUDART_PI_F / powf(params.smoothingRadius, 3);
float temp = 0.0f;
float4 Vab = getVelocityDiff(vel, pos, vel2, pos2);
if(q < 2){
temp = coeff * (-powf(1 - 0.5f * q,3) * (2 * q + 1) +powf(1 - 0.5f * q, 4));
tmpForce += -1.0f * params.particleMass *
(pressure / powf(density,2) + pressure2 / powf(density2,2)) *
normalize(relPos) * temp +
params.particleMass * (params.mu + params.mu) *
make_float3(Vab) / (density * density2) * 1.0f / dist * temp;
}
}
}
}
return tmpForce;
}
__global__ void calculatePoiseuilleAccelerationD(
float4* acceleration,
float4* oldMeasures,
float4* oldPos,
float4* oldVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles){
uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
float4 pos = FETCH(oldPos, index);
float4 vel = FETCH(oldVel, index);
float4 measure = FETCH(oldMeasures,index);
float density = measure.x;
float pressure = measure.y;
int3 gridPos = calcGridPos(make_float3(pos));
float3 force = make_float3(0.0f);
for(int z=-params.cellcount; z<=params.cellcount; z++) {
for(int y=-params.cellcount; y<=params.cellcount; y++) {
for(int x=-params.cellcount; x<=params.cellcount; x++) {
int3 neighbourPos = gridPos + make_int3(x, y, z);
force += sumNavierStokesForces(neighbourPos,
index,
pos,
oldPos,
vel,
oldVel,
density,
pressure,
oldMeasures,
cellStart,
cellEnd);
}
}
}
uint originalIndex = gridParticleIndex[index];
float3 acc = force;
acceleration[originalIndex] = make_float4(acc, 0.0f);
}
__global__ void integratePoiseuilleSystemD(
float4* posArray, // input, output
float4* velArray, // input, output
float4* velLeapFrogArray, // output
float4* acceleration, // input
uint numParticles){
uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
volatile float4 posData = posArray[index];
if(posData.w == 1.0f) return;//skip boundary particle
volatile float4 velData = velArray[index];
volatile float4 accData = acceleration[index];
volatile float4 velLeapFrogData = velLeapFrogArray[index];
float3 pos = make_float3(posData.x, posData.y, posData.z);
float3 vel = make_float3(velData.x, velData.y, velData.z);
float3 acc = make_float3(accData.x, accData.y, accData.z);
float3 nextVel = vel + (params.gravity + acc) * params.deltaTime;
float3 velLeapFrog = vel + nextVel;
velLeapFrog *= 0.5f;
vel = nextVel;
pos += vel * params.deltaTime;
float halfWorldXSize = params.gridSize.x * params.particleRadius;
if(pos.x > halfWorldXSize){
pos.x -= 2 * halfWorldXSize;
}
posArray[index] = make_float4(pos, posData.w);
velArray[index] = make_float4(vel, velData.w);
velLeapFrogArray[index] = make_float4(velLeapFrog, velLeapFrogData.w);
}
| 9a3b708ef3f4a92dce534b54ce44498d9ba13696.cu | #include <stdio.h>
#include <math.h>
#include "helper_math.h"
#include "math_constants.h"
#include "poiseuilleFlowKernel.cuh"
#if USE_TEX
texture<float4, 1, cudaReadModeElementType> oldPosTex;
texture<float4, 1, cudaReadModeElementType> oldVelTex;
texture<float4, 1, cudaReadModeElementType> oldMeasuresTex;
texture<uint, 1, cudaReadModeElementType> gridParticleHashTex;
texture<uint, 1, cudaReadModeElementType> cellStartTex;
texture<uint, 1, cudaReadModeElementType> cellEndTex;
#endif
__constant__ PoiseuilleParams params;
__device__ int3 calcGridPos(float3 p){
int3 gridPos;
gridPos.x = floor((p.x - params.worldOrigin.x) * 0.5f / params.particleRadius);
gridPos.y = floor((p.y - params.worldOrigin.y) * 0.5f / params.particleRadius);
gridPos.z = floor((p.z - params.worldOrigin.z) * 0.5f / params.particleRadius);
return gridPos;
}
__device__ uint calcGridHash(int3 gridPos){
gridPos.x = gridPos.x & (params.gridSize.x-1);
gridPos.y = gridPos.y & (params.gridSize.y-1);
gridPos.z = gridPos.z & (params.gridSize.z-1);
return __umul24(__umul24(gridPos.z, params.gridSize.y), params.gridSize.x) + __umul24(gridPos.y, params.gridSize.x) + gridPos.x;
}
__global__ void calculatePoiseuilleHashD(
uint* gridParticleHash, // output
uint* gridParticleIndex, // output
float4* pos, // input
uint numParticles){
uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
volatile float4 p = pos[index];
int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z));
uint hash = calcGridHash(gridPos);
gridParticleHash[index] = hash;
gridParticleIndex[index] = index;
}
__global__ void reorderPoiseuilleDataD(
uint* cellStart, // output
uint* cellEnd, // output
float4* sortedPos, // output
float4* sortedVel, // output
uint * gridParticleHash, // input
uint * gridParticleIndex,// input
float4* oldPos, // input
float4* oldVel, // input
uint numParticles){
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint hash;
if (index < numParticles) {
hash = gridParticleHash[index];
sharedHash[threadIdx.x+1] = hash;
if (index > 0 && threadIdx.x == 0)
{
sharedHash[0] = gridParticleHash[index-1];
}
}
__syncthreads();
if (index < numParticles) {
if (index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)
cellEnd[sharedHash[threadIdx.x]] = index;
}
if (index == numParticles - 1)
{
cellEnd[hash] = index + 1;
}
uint sortedIndex = gridParticleIndex[index];
float4 pos = FETCH(oldPos, sortedIndex);
float4 vel = FETCH(oldVel, sortedIndex);
sortedPos[index] = pos;
sortedVel[index] = vel;
}
}
__device__ float sumParticlesInDomain(
int3 gridPos,
uint index,
float4 pos,
float4* oldPos,
float4 vel,
float4* oldVel,
float4* measures,
uint* cellStart,
uint* cellEnd){
uint gridHash = calcGridHash(gridPos);
uint startIndex = FETCH(cellStart, gridHash);
float sum = 0.0f;
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = FETCH(cellEnd, gridHash);
for(uint j=startIndex; j<endIndex; j++) {
float4 pos2 = FETCH(oldPos, j);
float4 vel2 = FETCH(oldVel, j);
float density2 = measures[j].x;
float temp = 0.0f;
float worldXSize= params.gridSize.x * 2.0f * params.particleRadius;
float3 relPos = make_float3(pos - pos2);
if(gridPos.x < 0)
relPos = make_float3(pos.x - (pos2.x - worldXSize),pos.y - pos2.y,pos.z - pos2.z);
else
if(gridPos.x > params.gridSize.x - 1)
relPos = make_float3(pos.x - (pos2.x + worldXSize),pos.y - pos2.y,pos.z - pos2.z);
float dist = length(relPos);
float q = dist / params.smoothingRadius;
float coeff = 7.0f / 4 / CUDART_PI_F / powf(params.smoothingRadius, 2);
if(q < 2){
sum += coeff *(powf(1 - 0.5f * q, 4) * (2 * q + 1));
}
}
}
return sum;
}
__global__ void calculatePoiseuilleDensityD(
float4* measures, //output
float4* oldPos, //input
float4* oldVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles){
uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
float4 pos = FETCH(oldPos, index);
float4 vel = FETCH(oldVel, index);
if(pos.w == 1.0f){
measures[index].x = params.restDensity;
measures[index].y = powf(params.soundspeed, 2) * params.restDensity;
return;
}
int3 gridPos = calcGridPos(make_float3(pos));
float sum = 0.0f;
for(int z=-params.cellcount; z<=params.cellcount; z++) {
for(int y=-params.cellcount; y<=params.cellcount; y++) {
for(int x=-params.cellcount; x<=params.cellcount; x++) {
int3 neighbourPos = gridPos + make_int3(x, y, z);
sum += sumParticlesInDomain(
neighbourPos,
index,
pos,
oldPos,
vel,
oldVel,
measures,
cellStart,
cellEnd);
}
}
}
float dens = sum * params.particleMass;
measures[index].x = dens;
measures[index].y = powf(params.soundspeed, 2) * dens;
}
__device__ float4 getVelocityDiff(
float4 iVelocity,
float4 iPosition,
float4 jVelocity,
float4 jPosition)
{
float bottomBoundary = params.worldOrigin.y + params.boundaryOffset * 2.0f * params.particleRadius;
float topBoundary = bottomBoundary + params.fluidParticlesSize.y * 2.0f * params.particleRadius;
if((jPosition.w == 1.0f) && (jPosition.y > topBoundary))
{
float distanceA = topBoundary - iPosition.y;
float distanceB = jPosition.y - topBoundary;
float beta = fmin(1000.0f, 1 + distanceB / distanceA);
return beta * iVelocity;
}
if((jPosition.w == 1.0f) && (jPosition.y < bottomBoundary))
{
float distanceA = iPosition.y - bottomBoundary;
float distanceB = bottomBoundary - jPosition.y;
float beta = fmin(1000.0f, 1 + distanceB / distanceA);
return beta * iVelocity;
}
return iVelocity - jVelocity;
}
__device__ float3 sumNavierStokesForces(
int3 gridPos,
uint index,
float4 pos,
float4* oldPos,
float4 vel,
float4* oldVel,
float density,
float pressure,
float4* oldMeasures,
uint* cellStart,
uint* cellEnd){
uint gridHash = calcGridHash(gridPos);
uint startIndex = FETCH(cellStart, gridHash);
float3 tmpForce = make_float3(0.0f);
float texp = 0.0f;
float pexp = 0.0f;
if (startIndex != 0xffffffff) {
uint endIndex = FETCH(cellEnd, gridHash);
for(uint j=startIndex; j<endIndex; j++) {
if (j != index) {
float4 pos2 = FETCH(oldPos, j);
float4 vel2 = FETCH(oldVel, j);
float4 measure = FETCH(oldMeasures, j);
float density2 = measure.x;
float pressure2 = measure.y;
float tempExpr = 0.0f;
float worldXSize= params.gridSize.x * 2.0f * params.particleRadius;
float3 relPos = make_float3(pos - pos2);
if(gridPos.x < 0)
relPos = make_float3(pos) - make_float3(pos2.x - worldXSize, pos2.y, pos2.z);
else
if(gridPos.x > params.gridSize.x - 1)
relPos = make_float3(pos) - make_float3(pos2.x + worldXSize, pos2.y, pos2.z);
float dist = length(relPos);
float q = dist / params.smoothingRadius;
float coeff = 7.0f / 2 / CUDART_PI_F / powf(params.smoothingRadius, 3);
float temp = 0.0f;
float4 Vab = getVelocityDiff(vel, pos, vel2, pos2);
if(q < 2){
temp = coeff * (-powf(1 - 0.5f * q,3) * (2 * q + 1) +powf(1 - 0.5f * q, 4));
tmpForce += -1.0f * params.particleMass *
(pressure / powf(density,2) + pressure2 / powf(density2,2)) *
normalize(relPos) * temp +
params.particleMass * (params.mu + params.mu) *
make_float3(Vab) / (density * density2) * 1.0f / dist * temp;
}
}
}
}
return tmpForce;
}
__global__ void calculatePoiseuilleAccelerationD(
float4* acceleration,
float4* oldMeasures,
float4* oldPos,
float4* oldVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles){
uint index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
float4 pos = FETCH(oldPos, index);
float4 vel = FETCH(oldVel, index);
float4 measure = FETCH(oldMeasures,index);
float density = measure.x;
float pressure = measure.y;
int3 gridPos = calcGridPos(make_float3(pos));
float3 force = make_float3(0.0f);
for(int z=-params.cellcount; z<=params.cellcount; z++) {
for(int y=-params.cellcount; y<=params.cellcount; y++) {
for(int x=-params.cellcount; x<=params.cellcount; x++) {
int3 neighbourPos = gridPos + make_int3(x, y, z);
force += sumNavierStokesForces(neighbourPos,
index,
pos,
oldPos,
vel,
oldVel,
density,
pressure,
oldMeasures,
cellStart,
cellEnd);
}
}
}
uint originalIndex = gridParticleIndex[index];
float3 acc = force;
acceleration[originalIndex] = make_float4(acc, 0.0f);
}
__global__ void integratePoiseuilleSystemD(
float4* posArray, // input, output
float4* velArray, // input, output
float4* velLeapFrogArray, // output
float4* acceleration, // input
uint numParticles){
uint index = __umul24(blockIdx.x,blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
volatile float4 posData = posArray[index];
if(posData.w == 1.0f) return;//skip boundary particle
volatile float4 velData = velArray[index];
volatile float4 accData = acceleration[index];
volatile float4 velLeapFrogData = velLeapFrogArray[index];
float3 pos = make_float3(posData.x, posData.y, posData.z);
float3 vel = make_float3(velData.x, velData.y, velData.z);
float3 acc = make_float3(accData.x, accData.y, accData.z);
float3 nextVel = vel + (params.gravity + acc) * params.deltaTime;
float3 velLeapFrog = vel + nextVel;
velLeapFrog *= 0.5f;
vel = nextVel;
pos += vel * params.deltaTime;
float halfWorldXSize = params.gridSize.x * params.particleRadius;
if(pos.x > halfWorldXSize){
pos.x -= 2 * halfWorldXSize;
}
posArray[index] = make_float4(pos, posData.w);
velArray[index] = make_float4(vel, velData.w);
velLeapFrogArray[index] = make_float4(velLeapFrog, velLeapFrogData.w);
}
|
dd0d9eace01b3e35a5d3983eb281339f76839504.hip | // !!! This is a file automatically generated by hipify!!!
#include <af/LinearSolver.cuh>
namespace af {
hipsolverDnHandle_t createHandleDn() {
hipsolverDnHandle_t handleDn = NULL;
hipsolverDnCreate(&handleDn);
CUDA_CHECK;
return handleDn;
}
bool linSolvCholDn(hipsolverDnHandle_t handleDn, float* ADense, float* bDense, const int dim) {
if (ADense == NULL || bDense == NULL)
return false;
// throw std::runtime_error("af::linSolvCholDn(): input arrays cannot be empty.");
Timer timer;
const hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER;
const int nrhs = 1;
const int lda = dim;
const int ldb = dim;
int workSize = 0;
hipsolverDnSpotrf_bufferSize(handleDn, uplo, dim, ADense, lda, &workSize);
CUDA_CHECK;
thrust::device_vector<float> workspace_d(workSize);
thrust::device_vector<int> devInfo_d(1, 0);
// Cholesky Factorization
hipsolverDnSpotrf(handleDn, uplo, dim, ADense, lda, workspace_d.data().get(), workSize, devInfo_d.data().get());
if (devInfo_d[0] != 0) {
std::cout << "af::linSolvCholDn(): hipsolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]) << "\n";
return false;
// throw std::runtime_error("af::linSolvCholDn(): hipsolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]));
}
CUDA_CHECK;
// Solve Ax = b
hipsolverDnSpotrs(handleDn, uplo, dim, nrhs, ADense, lda, bDense, ldb, devInfo_d.data().get());
if (devInfo_d[0] != 0) {
std::cout << "af::hipsolverDnSpotrs(): hipsolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]) << "\n";
return false;
// throw std::runtime_error("af::linSolvCholDn(): hipsolverDnSpotrs failed, devinfo : " + std::to_string(devInfo_d[0]));
}
CUDA_CHECK;
return true;
}
bool linSolvLUDn(hipsolverDnHandle_t handleDn, float* ADense, float* bDense, const int dim) {
if (ADense == NULL || bDense == NULL)
return false;
// throw std::runtime_error("af::linSolvLUDn(): input arrays cannot be empty.");
hipStream_t stream = NULL;
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
hipsolverDnSetStream(handleDn, stream);
const int nrhs = 1;
const int lda = dim;
const int ldb = dim;
int workSize = 0;
hipsolverDnSgetrf_bufferSize(handleDn, dim, dim, ADense, lda, &workSize);
hipDeviceSynchronize();
CUDA_CHECK;
thrust::device_vector<float> workspace_d(workSize);
thrust::device_vector<int> devInfo_d(1, 0);
// LU Factorization
hipsolverDnSgetrf(handleDn, dim, dim, ADense, lda, workspace_d.data().get(), NULL, devInfo_d.data().get());
hipDeviceSynchronize();
if (devInfo_d[0] != 0)
return false;
// throw std::runtime_error("af::linSolvLUDn(): hipsolverDnSgetrf failed, devinfo : " + std::to_string(devInfo_d[0]));
CUDA_CHECK;
// Solve Ax = b
hipsolverDnSgetrs(handleDn, HIPBLAS_OP_N, dim, nrhs, ADense, lda, NULL, bDense, ldb, devInfo_d.data().get());
hipDeviceSynchronize();
if (devInfo_d[0] != 0)
throw std::runtime_error("af::linSolvLUDn(): hipsolverDnSgetrs failed, devinfo : " + std::to_string(devInfo_d[0]));
CUDA_CHECK;
if (stream)
hipStreamDestroy(stream);
return true;
}
bool linSolvQRDn(hipsolverDnHandle_t handleDn, hipblasHandle_t handleCublas, float* ADense, float* bDense, const int dim) {
if (ADense == NULL || bDense == NULL)
return false;
const int m = dim;
const int lda = m;
const int ldb = m;
const int nrhs = 1;
thrust::device_vector<float> tau_d(m);
int workSize = 0;
hipsolverDnSgeqrf_bufferSize(handleDn, dim, dim, ADense, lda, &workSize);
hipDeviceSynchronize();
CUDA_CHECK;
thrust::device_vector<float> workspace_d(workSize);
thrust::device_vector<int> devInfo_d(1, 0);
hipsolverDnSgeqrf(handleDn, dim, dim, ADense, lda, tau_d.data().get(), workspace_d.data().get(), workSize,
devInfo_d.data().get());
hipDeviceSynchronize();
if (devInfo_d[0] != 0) {
std::cout << "af::linSolvQRDn(): hipsolverDnSgeqrf failed, devinfo : " + std::to_string(devInfo_d[0]) << "\n";
return false;
// throw std::runtime_error("af::linSolvCholDn(): hipsolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]));
}
CUDA_CHECK;
hipsolverDnSormqr(handleDn, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_T, m, nrhs, m, ADense, lda, tau_d.data().get(), bDense, ldb,
workspace_d.data().get(), workSize, devInfo_d.data().get());
hipDeviceSynchronize();
if (devInfo_d[0] != 0) {
std::cout << "af::linSolvQRDn(): hipsolverDnSormqr failed, devinfo : " + std::to_string(devInfo_d[0]) << "\n";
return false;
// throw std::runtime_error("af::linSolvCholDn(): hipsolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]));
}
CUDA_CHECK;
const float one = 1.f;
hipblasStrsm(handleCublas, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, m, nrhs, &one, ADense,
lda, bDense, ldb);
hipDeviceSynchronize();
return true;
}
/*
* solve A*x = b by LU with partial pivoting
*
*/
bool linearSolverLU(hipsolverDnHandle_t handle, int n, float* A, int lda, float* b, int* ipiv) {
int bufferSize = 0;
int* info = NULL;
float* buffer = NULL;
int h_info = 0;
hipsolverDnSgetrf_bufferSize(handle, n, n, (float*)A, lda, &bufferSize);
hipMalloc(&info, sizeof(int));
hipMalloc(&buffer, sizeof(float) * bufferSize);
hipMemset(info, 0, sizeof(int));
// getrf will overwrite A with L
hipsolverDnSgetrf(handle, n, n, A, lda, buffer, ipiv, info);
hipMemcpy(&h_info, info, sizeof(int), hipMemcpyDeviceToHost);
if (0 != h_info) {
fprintf(stderr, "Error: LU factorization failed\n");
return false;
}
hipsolverDnSgetrs(handle, HIPBLAS_OP_N, n, 1, A, lda, ipiv, b, n, info);
hipDeviceSynchronize();
if (info) {
hipFree(info);
}
if (buffer) {
hipFree(buffer);
}
return true;
}
bool linearSolverLUStable(hipsolverDnHandle_t handle, int n, const float* Acopy, int lda, const float* b, int* ipiv, float* x) {
int bufferSize = 0;
int* info = NULL;
float* buffer = NULL;
float* A = NULL;
// int* ipiv = NULL; // pivoting sequence
int h_info = 0;
hipsolverDnSgetrf_bufferSize(handle, n, n, (float*)Acopy, lda, &bufferSize);
hipMalloc(&info, sizeof(int));
hipMalloc(&buffer, sizeof(float) * bufferSize);
hipMalloc(&A, sizeof(float) * lda * n);
hipMalloc(&ipiv, sizeof(int) * n);
// prepare a copy of A because getrf will overwrite A with L
hipMemcpy(A, Acopy, sizeof(float) * lda * n, hipMemcpyDeviceToDevice);
hipMemset(info, 0, sizeof(int));
hipsolverDnSgetrf(handle, n, n, A, lda, buffer, ipiv, info);
hipMemcpy(&h_info, info, sizeof(int), hipMemcpyDeviceToHost);
if (0 != h_info) {
fprintf(stderr, "Error: LU factorization failed\n");
return false;
}
hipMemcpy(x, b, sizeof(float) * n, hipMemcpyDeviceToDevice);
hipsolverDnSgetrs(handle, HIPBLAS_OP_N, n, 1, A, lda, ipiv, x, n, info);
hipDeviceSynchronize();
if (info) {
hipFree(info);
}
if (buffer) {
hipFree(buffer);
}
if (A) {
hipFree(A);
}
// if (ipiv) {
// hipFree(ipiv);
// }
return true;
}
} // namespace af | dd0d9eace01b3e35a5d3983eb281339f76839504.cu | #include <af/LinearSolver.cuh>
namespace af {
cusolverDnHandle_t createHandleDn() {
cusolverDnHandle_t handleDn = NULL;
cusolverDnCreate(&handleDn);
CUDA_CHECK;
return handleDn;
}
bool linSolvCholDn(cusolverDnHandle_t handleDn, float* ADense, float* bDense, const int dim) {
if (ADense == NULL || bDense == NULL)
return false;
// throw std::runtime_error("af::linSolvCholDn(): input arrays cannot be empty.");
Timer timer;
const cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER;
const int nrhs = 1;
const int lda = dim;
const int ldb = dim;
int workSize = 0;
cusolverDnSpotrf_bufferSize(handleDn, uplo, dim, ADense, lda, &workSize);
CUDA_CHECK;
thrust::device_vector<float> workspace_d(workSize);
thrust::device_vector<int> devInfo_d(1, 0);
// Cholesky Factorization
cusolverDnSpotrf(handleDn, uplo, dim, ADense, lda, workspace_d.data().get(), workSize, devInfo_d.data().get());
if (devInfo_d[0] != 0) {
std::cout << "af::linSolvCholDn(): cusolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]) << "\n";
return false;
// throw std::runtime_error("af::linSolvCholDn(): cusolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]));
}
CUDA_CHECK;
// Solve Ax = b
cusolverDnSpotrs(handleDn, uplo, dim, nrhs, ADense, lda, bDense, ldb, devInfo_d.data().get());
if (devInfo_d[0] != 0) {
std::cout << "af::cusolverDnSpotrs(): cusolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]) << "\n";
return false;
// throw std::runtime_error("af::linSolvCholDn(): cusolverDnSpotrs failed, devinfo : " + std::to_string(devInfo_d[0]));
}
CUDA_CHECK;
return true;
}
bool linSolvLUDn(cusolverDnHandle_t handleDn, float* ADense, float* bDense, const int dim) {
if (ADense == NULL || bDense == NULL)
return false;
// throw std::runtime_error("af::linSolvLUDn(): input arrays cannot be empty.");
cudaStream_t stream = NULL;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
cusolverDnSetStream(handleDn, stream);
const int nrhs = 1;
const int lda = dim;
const int ldb = dim;
int workSize = 0;
cusolverDnSgetrf_bufferSize(handleDn, dim, dim, ADense, lda, &workSize);
cudaDeviceSynchronize();
CUDA_CHECK;
thrust::device_vector<float> workspace_d(workSize);
thrust::device_vector<int> devInfo_d(1, 0);
// LU Factorization
cusolverDnSgetrf(handleDn, dim, dim, ADense, lda, workspace_d.data().get(), NULL, devInfo_d.data().get());
cudaDeviceSynchronize();
if (devInfo_d[0] != 0)
return false;
// throw std::runtime_error("af::linSolvLUDn(): cusolverDnSgetrf failed, devinfo : " + std::to_string(devInfo_d[0]));
CUDA_CHECK;
// Solve Ax = b
cusolverDnSgetrs(handleDn, CUBLAS_OP_N, dim, nrhs, ADense, lda, NULL, bDense, ldb, devInfo_d.data().get());
cudaDeviceSynchronize();
if (devInfo_d[0] != 0)
throw std::runtime_error("af::linSolvLUDn(): cusolverDnSgetrs failed, devinfo : " + std::to_string(devInfo_d[0]));
CUDA_CHECK;
if (stream)
cudaStreamDestroy(stream);
return true;
}
bool linSolvQRDn(cusolverDnHandle_t handleDn, cublasHandle_t handleCublas, float* ADense, float* bDense, const int dim) {
if (ADense == NULL || bDense == NULL)
return false;
const int m = dim;
const int lda = m;
const int ldb = m;
const int nrhs = 1;
thrust::device_vector<float> tau_d(m);
int workSize = 0;
cusolverDnSgeqrf_bufferSize(handleDn, dim, dim, ADense, lda, &workSize);
cudaDeviceSynchronize();
CUDA_CHECK;
thrust::device_vector<float> workspace_d(workSize);
thrust::device_vector<int> devInfo_d(1, 0);
cusolverDnSgeqrf(handleDn, dim, dim, ADense, lda, tau_d.data().get(), workspace_d.data().get(), workSize,
devInfo_d.data().get());
cudaDeviceSynchronize();
if (devInfo_d[0] != 0) {
std::cout << "af::linSolvQRDn(): cusolverDnSgeqrf failed, devinfo : " + std::to_string(devInfo_d[0]) << "\n";
return false;
// throw std::runtime_error("af::linSolvCholDn(): cusolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]));
}
CUDA_CHECK;
cusolverDnSormqr(handleDn, CUBLAS_SIDE_LEFT, CUBLAS_OP_T, m, nrhs, m, ADense, lda, tau_d.data().get(), bDense, ldb,
workspace_d.data().get(), workSize, devInfo_d.data().get());
cudaDeviceSynchronize();
if (devInfo_d[0] != 0) {
std::cout << "af::linSolvQRDn(): cusolverDnSormqr failed, devinfo : " + std::to_string(devInfo_d[0]) << "\n";
return false;
// throw std::runtime_error("af::linSolvCholDn(): cusolverDnSpotrf failed, devinfo : " + std::to_string(devInfo_d[0]));
}
CUDA_CHECK;
const float one = 1.f;
cublasStrsm(handleCublas, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, m, nrhs, &one, ADense,
lda, bDense, ldb);
cudaDeviceSynchronize();
return true;
}
/*
* solve A*x = b by LU with partial pivoting
*
*/
bool linearSolverLU(cusolverDnHandle_t handle, int n, float* A, int lda, float* b, int* ipiv) {
int bufferSize = 0;
int* info = NULL;
float* buffer = NULL;
int h_info = 0;
cusolverDnSgetrf_bufferSize(handle, n, n, (float*)A, lda, &bufferSize);
cudaMalloc(&info, sizeof(int));
cudaMalloc(&buffer, sizeof(float) * bufferSize);
cudaMemset(info, 0, sizeof(int));
// getrf will overwrite A with L
cusolverDnSgetrf(handle, n, n, A, lda, buffer, ipiv, info);
cudaMemcpy(&h_info, info, sizeof(int), cudaMemcpyDeviceToHost);
if (0 != h_info) {
fprintf(stderr, "Error: LU factorization failed\n");
return false;
}
cusolverDnSgetrs(handle, CUBLAS_OP_N, n, 1, A, lda, ipiv, b, n, info);
cudaDeviceSynchronize();
if (info) {
cudaFree(info);
}
if (buffer) {
cudaFree(buffer);
}
return true;
}
bool linearSolverLUStable(cusolverDnHandle_t handle, int n, const float* Acopy, int lda, const float* b, int* ipiv, float* x) {
int bufferSize = 0;
int* info = NULL;
float* buffer = NULL;
float* A = NULL;
// int* ipiv = NULL; // pivoting sequence
int h_info = 0;
cusolverDnSgetrf_bufferSize(handle, n, n, (float*)Acopy, lda, &bufferSize);
cudaMalloc(&info, sizeof(int));
cudaMalloc(&buffer, sizeof(float) * bufferSize);
cudaMalloc(&A, sizeof(float) * lda * n);
cudaMalloc(&ipiv, sizeof(int) * n);
// prepare a copy of A because getrf will overwrite A with L
cudaMemcpy(A, Acopy, sizeof(float) * lda * n, cudaMemcpyDeviceToDevice);
cudaMemset(info, 0, sizeof(int));
cusolverDnSgetrf(handle, n, n, A, lda, buffer, ipiv, info);
cudaMemcpy(&h_info, info, sizeof(int), cudaMemcpyDeviceToHost);
if (0 != h_info) {
fprintf(stderr, "Error: LU factorization failed\n");
return false;
}
cudaMemcpy(x, b, sizeof(float) * n, cudaMemcpyDeviceToDevice);
cusolverDnSgetrs(handle, CUBLAS_OP_N, n, 1, A, lda, ipiv, x, n, info);
cudaDeviceSynchronize();
if (info) {
cudaFree(info);
}
if (buffer) {
cudaFree(buffer);
}
if (A) {
cudaFree(A);
}
// if (ipiv) {
// cudaFree(ipiv);
// }
return true;
}
} // namespace af |
931bca53423b677131aedc8fbc0fa89353cec9cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <iostream>
#include <metrics/scores.cuh>
#include <raft/mr/device/allocator.hpp>
#include <raft/random/rng.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Score {
class ScoreTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
};
typedef ScoreTest ScoreTestHighScore;
TEST(ScoreTestHighScore, Result)
{
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.12, 0.22, 0.32, 0.42, 0.52};
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
float* d_y;
raft::allocate(d_y, 5);
float* d_y_hat;
raft::allocate(d_y_hat, 5);
raft::update_device(d_y_hat, y_hat, 5, stream);
raft::update_device(d_y, y, 5, stream);
float result = MLCommon::Score::r2_score(d_y, d_y_hat, 5, stream);
ASSERT_TRUE(result == 0.98f);
CUDA_CHECK(hipStreamDestroy(stream));
}
typedef ScoreTest ScoreTestLowScore;
TEST(ScoreTestLowScore, Result)
{
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.012, 0.022, 0.032, 0.042, 0.052};
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
float* d_y;
raft::allocate(d_y, 5);
float* d_y_hat;
raft::allocate(d_y_hat, 5);
raft::update_device(d_y_hat, y_hat, 5, stream);
raft::update_device(d_y, y, 5, stream);
float result = MLCommon::Score::r2_score(d_y, d_y_hat, 5, stream);
std::cout << "Result: " << result - -3.4012f << std::endl;
ASSERT_TRUE(result - -3.4012f < 0.00001);
CUDA_CHECK(hipStreamDestroy(stream));
}
// Tests for accuracy_score
struct AccuracyInputs {
/**
* Number of predictions.
*/
int n;
/**
* Number of predictions w/ different values than their corresponding element in reference
* predictions. Valid range [0, n]. changed_n in [0, n] will yield accuracy of (n - changed_n) /
* n.
*/
int changed_n;
/**
* Seed for randomly generated predictions.
*/
unsigned long long int seed;
};
std::ostream& operator<<(::std::ostream& os, const AccuracyInputs& acc_inputs)
{
os << "AccuracyInputs are {" << acc_inputs.n << ", " << acc_inputs.changed_n << ", "
<< acc_inputs.seed << "}" << std::endl;
return os;
}
template <typename T>
__global__ void change_vals(T* predictions, T* ref_predictions, const int changed_n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < changed_n) {
predictions[tid] = ref_predictions[tid] + 1; // change first changed_n predictions
}
}
template <typename T>
class AccuracyTest : public ::testing::TestWithParam<AccuracyInputs> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<AccuracyInputs>::GetParam();
ASSERT((params.changed_n <= params.n) && (params.changed_n >= 0), "Invalid params.");
raft::random::Rng r(params.seed);
CUDA_CHECK(hipStreamCreate(&stream));
std::shared_ptr<raft::mr::device::allocator> d_allocator(
new raft::mr::device::default_allocator);
raft::allocate(predictions, params.n);
raft::allocate(ref_predictions, params.n);
r.normal(ref_predictions, params.n, (T)0.0, (T)1.0, stream);
raft::copy_async(predictions, ref_predictions, params.n, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
// Modify params.changed_n unique predictions to a different value. New value is irrelevant.
if (params.changed_n > 0) {
int threads = 64;
int blocks = raft::ceildiv(params.changed_n, threads);
//@todo Could also generate params.changed_n unique random positions in [0, n) range, instead
// of changing the first ones.
hipLaunchKernelGGL(( change_vals<T>)
, dim3(blocks), dim3(threads), 0, stream, predictions, ref_predictions, params.changed_n);
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipStreamSynchronize(stream));
}
computed_accuracy = MLCommon::Score::accuracy_score<T>(
predictions, ref_predictions, params.n, d_allocator, stream);
ref_accuracy = (params.n - params.changed_n) * 1.0f / params.n;
// std::cout << "computed_accuracy is " << computed_accuracy << " ref_accuracy is " <<
// ref_accuracy << std::endl;
}
void TearDown() override
{
CUDA_CHECK(hipFree(predictions));
CUDA_CHECK(hipFree(ref_predictions));
CUDA_CHECK(hipStreamDestroy(stream));
computed_accuracy = -1.0f;
ref_accuracy = -1.0f;
}
AccuracyInputs params;
T *predictions, *ref_predictions;
float computed_accuracy, ref_accuracy;
hipStream_t stream;
};
const std::vector<AccuracyInputs> inputs = {
{1, 1, 1234ULL}, // single element, wrong prediction
{1, 0, 1234ULL}, // single element, perfect prediction
{2, 1, 1234ULL}, // multiple elements, 0.5 accuracy
{1000, 0, 1234ULL}, // multiple elements, perfect predictions
{1000, 1000, 1234ULL}, // multiple elements, no correct predictions
{1000, 80, 1234ULL}, // multiple elements, prediction mix
{1000, 45, 1234ULL} // multiple elements, prediction mix
};
typedef AccuracyTest<float> AccuracyTestF;
TEST_P(AccuracyTestF, Result) { ASSERT_TRUE(computed_accuracy == ref_accuracy); }
typedef AccuracyTest<double> AccuracyTestD;
TEST_P(AccuracyTestD, Result) { ASSERT_TRUE(computed_accuracy == ref_accuracy); }
INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestF, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestD, ::testing::ValuesIn(inputs));
// Tests for regression_metrics
template <typename T>
struct RegressionInputs {
T tolerance;
int n; // number of predictions
bool hardcoded_preds; // (hardcoded_preds) ? use predictions, ref_predictions : use randomly
// generated arrays.
std::vector<T> predictions;
std::vector<T> ref_predictions;
T predictions_range[2]; // predictions in predictions_range if not hardcoded_preds
T ref_predictions_range[2]; // predictions in ref_predictions_range if not hardcoded_preds
unsigned long long int seed;
};
template <typename T>
std::ostream& operator<<(std::ostream& os, const RegressionInputs<T>& reg_inputs)
{
os << "RegressionInputs are {" << reg_inputs.tolerance << ", " << reg_inputs.n << ", "
<< reg_inputs.hardcoded_preds << ", ";
if (reg_inputs.hardcoded_preds) {
os << "{";
for (int i = 0; i < reg_inputs.n; i++)
os << reg_inputs.predictions[i] << ", ";
os << "}, {";
for (int i = 0; i < reg_inputs.n; i++)
os << reg_inputs.ref_predictions[i] << ", ";
os << "}";
os << "{" << reg_inputs.predictions_range[0] << ", " << reg_inputs.predictions_range[1]
<< "}, ";
os << "{" << reg_inputs.ref_predictions_range[0] << ", " << reg_inputs.ref_predictions_range[1]
<< "}";
} else {
os << "{}, {}, {}, {}";
}
os << ", " << reg_inputs.seed;
return os;
}
template <typename T>
void host_regression_computations(std::vector<T>& predictions,
std::vector<T>& ref_predictions,
const int n,
std::vector<double>& regression_metrics)
{
double abs_difference_sum = 0;
double mse_sum = 0;
std::vector<double> abs_diffs(n);
for (int i = 0; i < n; i++) {
double abs_diff = raft::abs(predictions[i] - ref_predictions[i]);
abs_difference_sum += abs_diff;
mse_sum += pow(predictions[i] - ref_predictions[i], 2);
abs_diffs[i] = abs_diff;
}
regression_metrics[0] = abs_difference_sum / n;
regression_metrics[1] = mse_sum / n;
std::sort(abs_diffs.begin(), abs_diffs.end());
int middle = n / 2;
if (n % 2 == 1) {
regression_metrics[2] = abs_diffs[middle];
} else {
regression_metrics[2] = (abs_diffs[middle] + abs_diffs[middle - 1]) / 2;
}
}
template <typename T>
class RegressionMetricsTest : public ::testing::TestWithParam<RegressionInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<RegressionInputs<T>>::GetParam();
computed_regression_metrics.assign(3, -1.0);
ref_regression_metrics.assign(3, -1.0);
CUDA_CHECK(hipStreamCreate(&stream));
std::shared_ptr<raft::mr::device::allocator> d_allocator(
new raft::mr::device::default_allocator);
raft::allocate(d_predictions, params.n);
raft::allocate(d_ref_predictions, params.n);
if (params.hardcoded_preds) {
raft::update_device(d_predictions, params.predictions.data(), params.n, stream);
raft::update_device(d_ref_predictions, params.ref_predictions.data(), params.n, stream);
} else {
params.predictions.resize(params.n);
params.ref_predictions.resize(params.n);
raft::random::Rng r(params.seed);
// randomly generate arrays
r.uniform(
d_predictions, params.n, params.predictions_range[0], params.predictions_range[1], stream);
r.uniform(d_ref_predictions,
params.n,
params.ref_predictions_range[0],
params.ref_predictions_range[1],
stream);
// copy to host to compute reference regression metrics
raft::update_host(params.predictions.data(), d_predictions, params.n, stream);
raft::update_host(params.ref_predictions.data(), d_ref_predictions, params.n, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
MLCommon::Score::regression_metrics(d_predictions,
d_ref_predictions,
params.n,
d_allocator,
stream,
computed_regression_metrics[0],
computed_regression_metrics[1],
computed_regression_metrics[2]);
host_regression_computations(
params.predictions, params.ref_predictions, params.n, ref_regression_metrics);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override
{
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(d_predictions));
CUDA_CHECK(hipFree(d_ref_predictions));
}
RegressionInputs<T> params;
T *d_predictions, *d_ref_predictions;
std::vector<double> computed_regression_metrics;
std::vector<double> ref_regression_metrics;
hipStream_t stream;
};
const std::vector<RegressionInputs<float>> regression_inputs_float = {
{0.00001f, 1, true, {10.2f}, {20.2f}, {}, {}, 1234ULL}, // single element
{0.00001f, 2, true, {10.2f, 40.2f}, {20.2f, 80.2f}, {}, {}, 1234ULL}, // two elements, mean same
// as median
// next three inputs should result in identical regression metrics values
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{20.5f, 40.5f, 55.5f, 80.5f, 100.5f, 120.5f},
{},
{},
1234ULL}, // diffs all negative, reverse sorted
{0.00001f,
6,
true,
{20.5f, 40.5f, 55.5f, 80.5f, 100.5f, 120.5f},
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{},
{},
1234ULL}, // diffs all positive, already sorted
{0.00001f,
6,
true,
{40.5f, 55.5f, 20.5f, 120.5f, 100.5f, 80.5f},
{20.5f, 30.5f, 10.5f, 60.5f, 50.5f, 40.5f},
{},
{},
1234ULL}, // mix
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{},
{},
1234ULL}, // identical predictions (0 error)
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{20.5f, 30.5f, 40.5f, 50.5f, 60.5f, 70.5f},
{},
{},
1234ULL}, // predictions[i] - ref_predictions[i] const for each i
{0.00001f,
2048,
false,
{},
{},
{-2048.0f, 2048.0f},
{-2048.0f, 2048.0f},
1234ULL}, // random mix, even number of elements
{0.00001f,
2049,
false,
{},
{},
{-2048.0f, 2048.0f},
{-2048.0f, 2048.0f},
1234ULL}, // random mix, odd number of elements
{0.00001f,
1024,
false,
{},
{},
{0.0f, 2048.0f},
{8192.0f, 16384.0f},
1234ULL}, // random mix, diffs are all negative
{0.00001f,
1024,
false,
{},
{},
{8192.0f, 16384.0f},
{0.0f, 2048.0f},
1234ULL} // random mix, diffs are all positive
};
const std::vector<RegressionInputs<double>> regression_inputs_double = {
{0.0000001, 1, true, {10.2}, {20.2}, {}, {}, 1234ULL}, // single element
{0.0000001, 2, true, {10.2, 40.2}, {20.2, 80.2}, {}, {}, 1234ULL}, // two elements
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{20.5, 40.5, 55.5, 80.5, 100.5, 120.5},
{},
{},
1234ULL}, // diffs all negative, reverse sorted
{0.0000001,
6,
true,
{20.5, 40.5, 55.5, 80.5, 100.5, 120.5},
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{},
{},
1234ULL}, // diffs all positive, already sorted
{0.0000001,
6,
true,
{40.5, 55.5, 20.5, 120.5, 100.5, 80.5},
{20.5, 30.5, 10.5, 60.5, 50.5, 40.5},
{},
{},
1234ULL}, // mix
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{},
{},
1234ULL}, // identical predictions (0 error)
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{20.5, 30.5, 40.5, 50.5, 60.5, 70.5},
{},
{},
1234ULL}, // predictions[i] - ref_predictions[i] const for each i
{0.0000001,
2048,
false,
{},
{},
{-2048.0, 2048.0},
{-2048.0, 2048.0},
1234ULL}, // random mix, even number of elements
{0.0000001,
2049,
false,
{},
{},
{-2048.0, 2048.0},
{-2048.0, 2048.0},
1234ULL}, // random mix, odd number of elements
{0.0000001, 1024, false, {}, {}, {0, 2048}, {8192.0, 16384.0}, 1234ULL}, // random mix, diffs are
// all negative
{0.0000001, 1024, false, {}, {}, {8192.0, 16384.0}, {0.0, 2048}, 1234ULL} // random mix, diffs
// are all positive
};
typedef RegressionMetricsTest<float> RegressionMetricsTestF;
TEST_P(RegressionMetricsTestF, Result)
{
for (int i = 0; i < 3; i++) {
ASSERT_TRUE(match(computed_regression_metrics[i],
ref_regression_metrics[i],
raft::CompareApprox<float>(params.tolerance)));
}
}
typedef RegressionMetricsTest<double> RegressionMetricsTestD;
TEST_P(RegressionMetricsTestD, Result)
{
for (int i = 0; i < 3; i++) {
ASSERT_TRUE(match(computed_regression_metrics[i],
ref_regression_metrics[i],
raft::CompareApprox<double>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(RegressionMetricsTests,
RegressionMetricsTestF,
::testing::ValuesIn(regression_inputs_float));
INSTANTIATE_TEST_CASE_P(RegressionMetricsTests,
RegressionMetricsTestD,
::testing::ValuesIn(regression_inputs_double));
} // end namespace Score
} // end namespace MLCommon
| 931bca53423b677131aedc8fbc0fa89353cec9cf.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <iostream>
#include <metrics/scores.cuh>
#include <raft/mr/device/allocator.hpp>
#include <raft/random/rng.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Score {
class ScoreTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
};
typedef ScoreTest ScoreTestHighScore;
TEST(ScoreTestHighScore, Result)
{
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.12, 0.22, 0.32, 0.42, 0.52};
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
float* d_y;
raft::allocate(d_y, 5);
float* d_y_hat;
raft::allocate(d_y_hat, 5);
raft::update_device(d_y_hat, y_hat, 5, stream);
raft::update_device(d_y, y, 5, stream);
float result = MLCommon::Score::r2_score(d_y, d_y_hat, 5, stream);
ASSERT_TRUE(result == 0.98f);
CUDA_CHECK(cudaStreamDestroy(stream));
}
typedef ScoreTest ScoreTestLowScore;
TEST(ScoreTestLowScore, Result)
{
float y[5] = {0.1, 0.2, 0.3, 0.4, 0.5};
float y_hat[5] = {0.012, 0.022, 0.032, 0.042, 0.052};
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
float* d_y;
raft::allocate(d_y, 5);
float* d_y_hat;
raft::allocate(d_y_hat, 5);
raft::update_device(d_y_hat, y_hat, 5, stream);
raft::update_device(d_y, y, 5, stream);
float result = MLCommon::Score::r2_score(d_y, d_y_hat, 5, stream);
std::cout << "Result: " << result - -3.4012f << std::endl;
ASSERT_TRUE(result - -3.4012f < 0.00001);
CUDA_CHECK(cudaStreamDestroy(stream));
}
// Tests for accuracy_score
struct AccuracyInputs {
/**
* Number of predictions.
*/
int n;
/**
* Number of predictions w/ different values than their corresponding element in reference
* predictions. Valid range [0, n]. changed_n in [0, n] will yield accuracy of (n - changed_n) /
* n.
*/
int changed_n;
/**
* Seed for randomly generated predictions.
*/
unsigned long long int seed;
};
std::ostream& operator<<(::std::ostream& os, const AccuracyInputs& acc_inputs)
{
os << "AccuracyInputs are {" << acc_inputs.n << ", " << acc_inputs.changed_n << ", "
<< acc_inputs.seed << "}" << std::endl;
return os;
}
template <typename T>
__global__ void change_vals(T* predictions, T* ref_predictions, const int changed_n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < changed_n) {
predictions[tid] = ref_predictions[tid] + 1; // change first changed_n predictions
}
}
template <typename T>
class AccuracyTest : public ::testing::TestWithParam<AccuracyInputs> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<AccuracyInputs>::GetParam();
ASSERT((params.changed_n <= params.n) && (params.changed_n >= 0), "Invalid params.");
raft::random::Rng r(params.seed);
CUDA_CHECK(cudaStreamCreate(&stream));
std::shared_ptr<raft::mr::device::allocator> d_allocator(
new raft::mr::device::default_allocator);
raft::allocate(predictions, params.n);
raft::allocate(ref_predictions, params.n);
r.normal(ref_predictions, params.n, (T)0.0, (T)1.0, stream);
raft::copy_async(predictions, ref_predictions, params.n, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
// Modify params.changed_n unique predictions to a different value. New value is irrelevant.
if (params.changed_n > 0) {
int threads = 64;
int blocks = raft::ceildiv(params.changed_n, threads);
//@todo Could also generate params.changed_n unique random positions in [0, n) range, instead
// of changing the first ones.
change_vals<T>
<<<blocks, threads, 0, stream>>>(predictions, ref_predictions, params.changed_n);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaStreamSynchronize(stream));
}
computed_accuracy = MLCommon::Score::accuracy_score<T>(
predictions, ref_predictions, params.n, d_allocator, stream);
ref_accuracy = (params.n - params.changed_n) * 1.0f / params.n;
// std::cout << "computed_accuracy is " << computed_accuracy << " ref_accuracy is " <<
// ref_accuracy << std::endl;
}
void TearDown() override
{
CUDA_CHECK(cudaFree(predictions));
CUDA_CHECK(cudaFree(ref_predictions));
CUDA_CHECK(cudaStreamDestroy(stream));
computed_accuracy = -1.0f;
ref_accuracy = -1.0f;
}
AccuracyInputs params;
T *predictions, *ref_predictions;
float computed_accuracy, ref_accuracy;
cudaStream_t stream;
};
const std::vector<AccuracyInputs> inputs = {
{1, 1, 1234ULL}, // single element, wrong prediction
{1, 0, 1234ULL}, // single element, perfect prediction
{2, 1, 1234ULL}, // multiple elements, 0.5 accuracy
{1000, 0, 1234ULL}, // multiple elements, perfect predictions
{1000, 1000, 1234ULL}, // multiple elements, no correct predictions
{1000, 80, 1234ULL}, // multiple elements, prediction mix
{1000, 45, 1234ULL} // multiple elements, prediction mix
};
typedef AccuracyTest<float> AccuracyTestF;
TEST_P(AccuracyTestF, Result) { ASSERT_TRUE(computed_accuracy == ref_accuracy); }
typedef AccuracyTest<double> AccuracyTestD;
TEST_P(AccuracyTestD, Result) { ASSERT_TRUE(computed_accuracy == ref_accuracy); }
INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestF, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AccuracyTests, AccuracyTestD, ::testing::ValuesIn(inputs));
// Tests for regression_metrics
template <typename T>
struct RegressionInputs {
T tolerance;
int n; // number of predictions
bool hardcoded_preds; // (hardcoded_preds) ? use predictions, ref_predictions : use randomly
// generated arrays.
std::vector<T> predictions;
std::vector<T> ref_predictions;
T predictions_range[2]; // predictions in predictions_range if not hardcoded_preds
T ref_predictions_range[2]; // predictions in ref_predictions_range if not hardcoded_preds
unsigned long long int seed;
};
template <typename T>
std::ostream& operator<<(std::ostream& os, const RegressionInputs<T>& reg_inputs)
{
os << "RegressionInputs are {" << reg_inputs.tolerance << ", " << reg_inputs.n << ", "
<< reg_inputs.hardcoded_preds << ", ";
if (reg_inputs.hardcoded_preds) {
os << "{";
for (int i = 0; i < reg_inputs.n; i++)
os << reg_inputs.predictions[i] << ", ";
os << "}, {";
for (int i = 0; i < reg_inputs.n; i++)
os << reg_inputs.ref_predictions[i] << ", ";
os << "}";
os << "{" << reg_inputs.predictions_range[0] << ", " << reg_inputs.predictions_range[1]
<< "}, ";
os << "{" << reg_inputs.ref_predictions_range[0] << ", " << reg_inputs.ref_predictions_range[1]
<< "}";
} else {
os << "{}, {}, {}, {}";
}
os << ", " << reg_inputs.seed;
return os;
}
template <typename T>
void host_regression_computations(std::vector<T>& predictions,
std::vector<T>& ref_predictions,
const int n,
std::vector<double>& regression_metrics)
{
double abs_difference_sum = 0;
double mse_sum = 0;
std::vector<double> abs_diffs(n);
for (int i = 0; i < n; i++) {
double abs_diff = raft::abs(predictions[i] - ref_predictions[i]);
abs_difference_sum += abs_diff;
mse_sum += pow(predictions[i] - ref_predictions[i], 2);
abs_diffs[i] = abs_diff;
}
regression_metrics[0] = abs_difference_sum / n;
regression_metrics[1] = mse_sum / n;
std::sort(abs_diffs.begin(), abs_diffs.end());
int middle = n / 2;
if (n % 2 == 1) {
regression_metrics[2] = abs_diffs[middle];
} else {
regression_metrics[2] = (abs_diffs[middle] + abs_diffs[middle - 1]) / 2;
}
}
template <typename T>
class RegressionMetricsTest : public ::testing::TestWithParam<RegressionInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<RegressionInputs<T>>::GetParam();
computed_regression_metrics.assign(3, -1.0);
ref_regression_metrics.assign(3, -1.0);
CUDA_CHECK(cudaStreamCreate(&stream));
std::shared_ptr<raft::mr::device::allocator> d_allocator(
new raft::mr::device::default_allocator);
raft::allocate(d_predictions, params.n);
raft::allocate(d_ref_predictions, params.n);
if (params.hardcoded_preds) {
raft::update_device(d_predictions, params.predictions.data(), params.n, stream);
raft::update_device(d_ref_predictions, params.ref_predictions.data(), params.n, stream);
} else {
params.predictions.resize(params.n);
params.ref_predictions.resize(params.n);
raft::random::Rng r(params.seed);
// randomly generate arrays
r.uniform(
d_predictions, params.n, params.predictions_range[0], params.predictions_range[1], stream);
r.uniform(d_ref_predictions,
params.n,
params.ref_predictions_range[0],
params.ref_predictions_range[1],
stream);
// copy to host to compute reference regression metrics
raft::update_host(params.predictions.data(), d_predictions, params.n, stream);
raft::update_host(params.ref_predictions.data(), d_ref_predictions, params.n, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
MLCommon::Score::regression_metrics(d_predictions,
d_ref_predictions,
params.n,
d_allocator,
stream,
computed_regression_metrics[0],
computed_regression_metrics[1],
computed_regression_metrics[2]);
host_regression_computations(
params.predictions, params.ref_predictions, params.n, ref_regression_metrics);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override
{
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(d_predictions));
CUDA_CHECK(cudaFree(d_ref_predictions));
}
RegressionInputs<T> params;
T *d_predictions, *d_ref_predictions;
std::vector<double> computed_regression_metrics;
std::vector<double> ref_regression_metrics;
cudaStream_t stream;
};
const std::vector<RegressionInputs<float>> regression_inputs_float = {
{0.00001f, 1, true, {10.2f}, {20.2f}, {}, {}, 1234ULL}, // single element
{0.00001f, 2, true, {10.2f, 40.2f}, {20.2f, 80.2f}, {}, {}, 1234ULL}, // two elements, mean same
// as median
// next three inputs should result in identical regression metrics values
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{20.5f, 40.5f, 55.5f, 80.5f, 100.5f, 120.5f},
{},
{},
1234ULL}, // diffs all negative, reverse sorted
{0.00001f,
6,
true,
{20.5f, 40.5f, 55.5f, 80.5f, 100.5f, 120.5f},
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{},
{},
1234ULL}, // diffs all positive, already sorted
{0.00001f,
6,
true,
{40.5f, 55.5f, 20.5f, 120.5f, 100.5f, 80.5f},
{20.5f, 30.5f, 10.5f, 60.5f, 50.5f, 40.5f},
{},
{},
1234ULL}, // mix
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{},
{},
1234ULL}, // identical predictions (0 error)
{0.00001f,
6,
true,
{10.5f, 20.5f, 30.5f, 40.5f, 50.5f, 60.5f},
{20.5f, 30.5f, 40.5f, 50.5f, 60.5f, 70.5f},
{},
{},
1234ULL}, // predictions[i] - ref_predictions[i] const for each i
{0.00001f,
2048,
false,
{},
{},
{-2048.0f, 2048.0f},
{-2048.0f, 2048.0f},
1234ULL}, // random mix, even number of elements
{0.00001f,
2049,
false,
{},
{},
{-2048.0f, 2048.0f},
{-2048.0f, 2048.0f},
1234ULL}, // random mix, odd number of elements
{0.00001f,
1024,
false,
{},
{},
{0.0f, 2048.0f},
{8192.0f, 16384.0f},
1234ULL}, // random mix, diffs are all negative
{0.00001f,
1024,
false,
{},
{},
{8192.0f, 16384.0f},
{0.0f, 2048.0f},
1234ULL} // random mix, diffs are all positive
};
const std::vector<RegressionInputs<double>> regression_inputs_double = {
{0.0000001, 1, true, {10.2}, {20.2}, {}, {}, 1234ULL}, // single element
{0.0000001, 2, true, {10.2, 40.2}, {20.2, 80.2}, {}, {}, 1234ULL}, // two elements
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{20.5, 40.5, 55.5, 80.5, 100.5, 120.5},
{},
{},
1234ULL}, // diffs all negative, reverse sorted
{0.0000001,
6,
true,
{20.5, 40.5, 55.5, 80.5, 100.5, 120.5},
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{},
{},
1234ULL}, // diffs all positive, already sorted
{0.0000001,
6,
true,
{40.5, 55.5, 20.5, 120.5, 100.5, 80.5},
{20.5, 30.5, 10.5, 60.5, 50.5, 40.5},
{},
{},
1234ULL}, // mix
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{},
{},
1234ULL}, // identical predictions (0 error)
{0.0000001,
6,
true,
{10.5, 20.5, 30.5, 40.5, 50.5, 60.5},
{20.5, 30.5, 40.5, 50.5, 60.5, 70.5},
{},
{},
1234ULL}, // predictions[i] - ref_predictions[i] const for each i
{0.0000001,
2048,
false,
{},
{},
{-2048.0, 2048.0},
{-2048.0, 2048.0},
1234ULL}, // random mix, even number of elements
{0.0000001,
2049,
false,
{},
{},
{-2048.0, 2048.0},
{-2048.0, 2048.0},
1234ULL}, // random mix, odd number of elements
{0.0000001, 1024, false, {}, {}, {0, 2048}, {8192.0, 16384.0}, 1234ULL}, // random mix, diffs are
// all negative
{0.0000001, 1024, false, {}, {}, {8192.0, 16384.0}, {0.0, 2048}, 1234ULL} // random mix, diffs
// are all positive
};
typedef RegressionMetricsTest<float> RegressionMetricsTestF;
TEST_P(RegressionMetricsTestF, Result)
{
for (int i = 0; i < 3; i++) {
ASSERT_TRUE(match(computed_regression_metrics[i],
ref_regression_metrics[i],
raft::CompareApprox<float>(params.tolerance)));
}
}
typedef RegressionMetricsTest<double> RegressionMetricsTestD;
TEST_P(RegressionMetricsTestD, Result)
{
for (int i = 0; i < 3; i++) {
ASSERT_TRUE(match(computed_regression_metrics[i],
ref_regression_metrics[i],
raft::CompareApprox<double>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(RegressionMetricsTests,
RegressionMetricsTestF,
::testing::ValuesIn(regression_inputs_float));
INSTANTIATE_TEST_CASE_P(RegressionMetricsTests,
RegressionMetricsTestD,
::testing::ValuesIn(regression_inputs_double));
} // end namespace Score
} // end namespace MLCommon
|
9f096d3aa115dce52191058b52240bf0644cd812.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#include "gaussian.h"
#include <iostream>
#include <cmath>
__global__
void blur(unsigned char* input_image, unsigned char* output_image, int width, int height) {
const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;// numer pixela
int x = offset % width;
int y = offset/width;
int fsize = 32; // Filter size
if(offset < width*height) {
float output_red = 0;
float output_green = 0;
float output_blue = 0;
int hits = 0;
for(int ox = -fsize; ox < fsize+1; ++ox) {
for(int oy = -fsize; oy < fsize+1; ++oy) {
if((x+ox) > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) {
const int currentoffset = (offset+ox+oy*width)*3;
output_red += input_image[currentoffset];
output_green += input_image[currentoffset+1];
output_blue += input_image[currentoffset+2];
hits++;
}
}
}
output_image[offset*3] = output_red/hits;
output_image[offset*3+1] = output_green/hits;
output_image[offset*3+2] = output_blue/hits;
}
}
void *filter(void *arg){
ParametersToFilter params = *(ParametersToFilter *)(arg);
unsigned char* input_image = params.images[params.img_id].input_image;
unsigned char* output_image = params.images[params.img_id].output_image;
int width = params.images[params.img_id].width;
int height = params.images[params.img_id].height;
unsigned char* dev_input;
unsigned char* dev_output;
getError(hipMalloc( (void**) &dev_input, width*height*3*sizeof(unsigned char)));
getError(hipMemcpy( dev_input, input_image, width*height*3*sizeof(unsigned char), hipMemcpyHostToDevice ));
getError(hipMalloc( (void**) &dev_output, width*height*3*sizeof(unsigned char)));
dim3 blockDims(512,1,1);
dim3 gridDims((unsigned int) ceil((double)(width*height*3/blockDims.x)), 1, 1 );
hipLaunchKernelGGL(( blur), dim3(gridDims), dim3(blockDims), 0, 0, dev_input, dev_output, width, height);
getError(hipMemcpy(output_image, dev_output, width*height*3*sizeof(unsigned char), hipMemcpyDeviceToHost ));
getError(hipFree(dev_input));
getError(hipFree(dev_output));
return NULL;
}
| 9f096d3aa115dce52191058b52240bf0644cd812.cu | #include "kernels.h"
#include "gaussian.h"
#include <iostream>
#include <cmath>
__global__
void blur(unsigned char* input_image, unsigned char* output_image, int width, int height) {
const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x;// numer pixela
int x = offset % width;
int y = offset/width;
int fsize = 32; // Filter size
if(offset < width*height) {
float output_red = 0;
float output_green = 0;
float output_blue = 0;
int hits = 0;
for(int ox = -fsize; ox < fsize+1; ++ox) {
for(int oy = -fsize; oy < fsize+1; ++oy) {
if((x+ox) > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) {
const int currentoffset = (offset+ox+oy*width)*3;
output_red += input_image[currentoffset];
output_green += input_image[currentoffset+1];
output_blue += input_image[currentoffset+2];
hits++;
}
}
}
output_image[offset*3] = output_red/hits;
output_image[offset*3+1] = output_green/hits;
output_image[offset*3+2] = output_blue/hits;
}
}
void *filter(void *arg){
ParametersToFilter params = *(ParametersToFilter *)(arg);
unsigned char* input_image = params.images[params.img_id].input_image;
unsigned char* output_image = params.images[params.img_id].output_image;
int width = params.images[params.img_id].width;
int height = params.images[params.img_id].height;
unsigned char* dev_input;
unsigned char* dev_output;
getError(cudaMalloc( (void**) &dev_input, width*height*3*sizeof(unsigned char)));
getError(cudaMemcpy( dev_input, input_image, width*height*3*sizeof(unsigned char), cudaMemcpyHostToDevice ));
getError(cudaMalloc( (void**) &dev_output, width*height*3*sizeof(unsigned char)));
dim3 blockDims(512,1,1);
dim3 gridDims((unsigned int) ceil((double)(width*height*3/blockDims.x)), 1, 1 );
blur<<<gridDims, blockDims>>>(dev_input, dev_output, width, height);
getError(cudaMemcpy(output_image, dev_output, width*height*3*sizeof(unsigned char), cudaMemcpyDeviceToHost ));
getError(cudaFree(dev_input));
getError(cudaFree(dev_output));
return NULL;
}
|
7cf4a4d7e12610841cceda06c644613ab1b5a320.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void expm132(float* A, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = expm1f(A[idx]);
}
#ifdef __cplusplus
}
#endif | 7cf4a4d7e12610841cceda06c644613ab1b5a320.cu | #include <math.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void expm132(float* A, int size)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int idx = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (idx >= size) {
return;
}
A[idx] = expm1f(A[idx]);
}
#ifdef __cplusplus
}
#endif |
92408e90e72bc1271bd084d438c8190660ed5ef3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_2d_layer_updater_cuda_fermi.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../convolution_layer.h"
texture<float, hipTextureType1D, hipReadModeElementType> input_tex_ref;
texture<float, hipTextureType1D, hipReadModeElementType> output_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
struct __align__(4) xy_config
{
xy_config(int y, int x)
{
this->xy_pair = (((unsigned int)y) << 16) | (unsigned int)x;
}
unsigned int xy_pair;
};
struct __align__(4) feature_map_config
{
feature_map_config(int input_feature_map_id, int output_feature_map_id)
{
this->feature_map_pair = (((unsigned int)input_feature_map_id) << 16) | (unsigned int)output_feature_map_id;
}
unsigned int feature_map_pair;
};
struct __align__(4) output_y_weight_y_config
{
output_y_weight_y_config(int output_y, int weight_y)
{
this->output_y_window_y_pair = (((unsigned int)output_y) << 16) | (unsigned int)weight_y;
}
unsigned int output_y_window_y_pair;
};
struct __align__(4) output_y_weight_y_weight_x_config
{
output_y_weight_y_weight_x_config(int output_y, int weight_y, int weight_x)
{
this->output_y_window_y_window_x_pair = (((unsigned int)output_y) << 16) | (((unsigned int)weight_y) << 8) | ((unsigned int)weight_x);
}
unsigned int output_y_window_y_window_x_pair;
};
template<int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_2d_tex_upd_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int base_input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_output_feature_map = window_width * window_height * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_height) + y) * input_width + x + texture_offset;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_width * window_height);
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
}
input_elem_id += input_width - window_width;
}
input_elem_id += input_width * (input_height - window_height);
}
float * base_output = output + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_2d_tex_exact_upd_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int base_input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_output_feature_map = WINDOW_WIDTH * window_height * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_height) + y) * input_width + x + texture_offset;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * WINDOW_WIDTH * window_height);
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
}
input_elem_id += input_width - WINDOW_WIDTH;
}
input_elem_id += input_width * (input_height - window_height);
}
float * base_output = output + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
extern __shared__ float arr_sh[];
__global__ void convolution_2d_update_biases_upd_kernel_fermi(
float * __restrict biases,
const float * __restrict output_errors,
const float * __restrict training_speed,
int output_feature_map_count,
int output_elem_count_per_feature_map,
int min_iteration_count)
{
int thread_id = threadIdx.x;
int output_feature_map_id = blockIdx.y;
int entry_id = blockIdx.z;
int threadblock_size = blockDim.x;
float sum = 0.0F;
const float * current_error = output_errors + (entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map;
int current_output_neuron_id = thread_id;
for(int i = 0; i < min_iteration_count; ++i)
{
sum += current_error[current_output_neuron_id];
current_output_neuron_id += threadblock_size;
}
if (current_output_neuron_id < output_elem_count_per_feature_map)
sum += current_error[current_output_neuron_id];
volatile float * arr = arr_sh;
arr[thread_id] = sum;
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
if (lane_id < tx)
arr[thread_id] += arr[thread_id + tx];
}
sum = arr[thread_id];
if (lane_id == 0)
{
int offset = entry_id * output_feature_map_count + output_feature_map_id;
float current_training_speed_val = training_speed[offset];
atomicAdd(biases + offset, sum * current_training_speed_val);
}
}
template<int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_2d_deriviative_tex_upd_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int base_output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_input_feature_map = window_width * window_height;
int output_elem_id = ((entry_id * output_feature_map_count + base_output_feature_map_id) * output_height + y) * output_width + x;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_width * window_height);
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit1 = (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit2 = b_fit1 && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit2)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
current_weights++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit2 = b_fit1 && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit2)
{
float inp = tex1Dfetch(output_tex_ref, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * current_weights[weight_offsets[i]];
}
}
current_weights++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
}
current_weights += window_width * window_height * (input_feature_map_count - 1);
output_elem_id += output_width * (output_height + window_height);
}
float * base_input = input_errors + ((entry_id * input_feature_map_count + input_feature_map_id) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_2d_deriviative_tex_exact_upd_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int base_output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_input_feature_map = WINDOW_WIDTH * window_height;
int output_elem_id = ((entry_id * output_feature_map_count + base_output_feature_map_id) * output_height + y) * output_width + x;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * WINDOW_WIDTH * window_height);
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit1 = (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit2 = b_fit1 && (((1 << i) & mask) != 0);
if (b_fit2)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
current_weights++;
}
output_elem_id -= output_width;
}
current_weights += WINDOW_WIDTH * window_height * (input_feature_map_count - 1);
output_elem_id += output_width * (output_height + window_height);
}
float * base_input = input_errors + ((entry_id * input_feature_map_count + input_feature_map_id) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<bool single_output_y_group>
__global__ void convolution_2d_update_weights_upd_kernel_fermi(
float * __restrict weights,
const float * __restrict output_errors,
const float * __restrict training_speed,
const output_y_weight_y_weight_x_config * __restrict output_y_weight_y_weight_x_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_y_group_count,
int texture_offset,
int entry_count,
bool different_input,
int output_y_weight_y_weight_x_config_count,
int feature_map_config_count)
{
int output_y_weight_y_weight_x_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((output_y_weight_y_weight_x_config_id < output_y_weight_y_weight_x_config_count) && (feature_map_config_id < feature_map_config_count) && (entry_id < entry_count))
{
output_y_weight_y_weight_x_config yw = output_y_weight_y_weight_x_config_list[output_y_weight_y_weight_x_config_id];
int weight_x = yw.output_y_window_y_window_x_pair & 0xFF;
int weight_y = (yw.output_y_window_y_window_x_pair & 0xFFFF) >> 8;
int output_y_start_id = yw.output_y_window_y_window_x_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int output_neuron_count_per_feature_map = output_width * output_height;
const float * current_output_errors = output_errors + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + output_y_start_id) * output_width;
int input_elem_id = (((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_height + weight_y + output_y_start_id) * input_width + texture_offset + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int output_y = output_y_start_id; output_y < output_height; output_y += output_y_group_count)
{
float input_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_buf[i] = tex1Dfetch(input_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch(input_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_buf[j];
current_output_errors++;
input_elem_id++;
}
current_output_errors += output_width * (output_y_group_count - 1);
input_elem_id += input_width * (output_y_group_count - 1) + (window_width - WINDOW_WIDTH_LOCAL);
}
int offset = (((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_height * window_width;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
template<int WINDOW_WIDTH, bool single_output_y_group>
__global__ void convolution_2d_update_weights_exact_upd_kernel_fermi(
float * __restrict weights,
const float * __restrict output_errors,
const float * __restrict training_speed,
const output_y_weight_y_config * __restrict output_y_weight_y_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_y_group_count,
int texture_offset,
int entry_count,
bool different_input,
int output_y_weight_y_config_count,
int feature_map_config_count)
{
int output_y_weight_y_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((output_y_weight_y_config_id < output_y_weight_y_config_count) && (feature_map_config_id < feature_map_config_count) && (entry_id < entry_count))
{
output_y_weight_y_config yw = output_y_weight_y_config_list[output_y_weight_y_config_id];
int weight_y = yw.output_y_window_y_pair & 0xFFFF;
int output_y_start_id = yw.output_y_window_y_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int output_neuron_count_per_feature_map = output_width * output_height;
const float * current_output_errors = output_errors + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + output_y_start_id) * output_width;
int input_elem_id = (((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_height + weight_y + output_y_start_id) * input_width + texture_offset;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int output_y = output_y_start_id; output_y < output_height; output_y += output_y_group_count)
{
float input_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_buf[i] = tex1Dfetch(input_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH - 1] = tex1Dfetch(input_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_buf[j];
current_output_errors++;
input_elem_id++;
}
current_output_errors += output_width * (output_y_group_count - 1);
input_elem_id += input_width * (output_y_group_count - 1);
}
int offset = (((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_height * WINDOW_WIDTH;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
namespace nnforge
{
namespace cuda
{
convolution_2d_layer_updater_cuda_fermi::convolution_2d_layer_updater_cuda_fermi()
{
input_tex_ref.addressMode[0] = hipAddressModeBorder;
input_tex_ref.normalized = false;
output_tex_ref.addressMode[0] = hipAddressModeBorder;
output_tex_ref.normalized = false;
input_tex_ref.addressMode[0] = hipAddressModeBorder;
input_tex_ref.normalized = false;
}
convolution_2d_layer_updater_cuda_fermi::~convolution_2d_layer_updater_cuda_fermi()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const, single_input_feature_map_group) \
hipLaunchKernelGGL(( convolution_2d_tex_exact_upd_kernel_fermi<window_width_const,block_size_const,single_input_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, *data[0], *data[1], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, xy_config_count, feature_map_config_count);
#define launch_exact_kernel_const(window_width, block_size_const, single_input_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const, single_input_feature_map_group); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const, single_input_feature_map_group); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const, single_input_feature_map_group); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const, single_input_feature_map_group); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const, single_input_feature_map_group); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const, single_input_feature_map_group); \
break; \
};
#define launch_exact_kernel(window_width, block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5, single_input_feature_map_group); \
break; \
};
#define launch_kernel_const(block_size_const, single_input_feature_map_group) \
hipLaunchKernelGGL(( convolution_2d_tex_upd_kernel_fermi<block_size_const,single_input_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, *data[0], *data[1], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, xy_config_count, feature_map_config_count);
#define launch_kernel(block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1, single_input_feature_map_group); \
break; \
case 2: \
launch_kernel_const(2, single_input_feature_map_group); \
break; \
case 3: \
launch_kernel_const(3, single_input_feature_map_group); \
break; \
case 4: \
launch_kernel_const(4, single_input_feature_map_group); \
break; \
case 5: \
launch_kernel_const(5, single_input_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const, single_output_feature_map_group) \
hipLaunchKernelGGL(( convolution_2d_deriviative_tex_exact_upd_kernel_fermi<window_width_const,block_size_const,single_output_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *data[0], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, xy_config_count, feature_map_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const, single_output_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const, single_output_feature_map_group); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const, single_output_feature_map_group); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const, single_output_feature_map_group); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const, single_output_feature_map_group); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const, single_output_feature_map_group); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const, single_output_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5, single_output_feature_map_group); \
break; \
};
#define launch_backprop_kernel_const(block_size_const, single_output_feature_map_group) \
hipLaunchKernelGGL(( convolution_2d_deriviative_tex_upd_kernel_fermi<block_size_const,single_output_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *data[0], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, xy_config_count, feature_map_config_count);
#define launch_backprop_kernel(block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_kernel_const(2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_kernel_const(3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_kernel_const(4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_kernel_const(5, single_output_feature_map_group); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const, single_output_y_group_const) \
hipLaunchKernelGGL(( convolution_2d_update_weights_exact_upd_kernel_fermi<window_width_const, single_output_y_group_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], *output_errors_buffer, *training_speed[0], output_y_weight_y_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_y_group_count, texture_offset, entry_count, different_input, output_y_weight_y_config_count, feature_map_config_count);
#define launch_update_weights_exact_kernel(window_width, single_output_y_group_const) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1, single_output_y_group_const); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2, single_output_y_group_const); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3, single_output_y_group_const); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4, single_output_y_group_const); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5, single_output_y_group_const); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6, single_output_y_group_const); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7, single_output_y_group_const); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8, single_output_y_group_const); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9, single_output_y_group_const); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10, single_output_y_group_const); \
break; \
};
#define launch_update_weights_kernel_const(single_output_y_group_const) \
hipLaunchKernelGGL(( convolution_2d_update_weights_upd_kernel_fermi<single_output_y_group_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], *output_errors_buffer, *training_speed[0], output_y_weight_y_weight_x_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_y_group_count, texture_offset, entry_count, different_input, output_y_weight_y_weight_x_config_count, feature_map_config_count);
void convolution_2d_layer_updater_cuda_fermi::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
size_t texture_offset;
cuda_safe_call(hipBindTexture(&texture_offset, input_tex_ref, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), desc, input_elem_count_per_entry * sizeof(float) * (different_input ? entry_count : 1)));
texture_offset /= sizeof(float);
int xy_config_count = forward_x_block_count * output_configuration_specific.dimension_sizes[1];
const xy_config * xy_config_list = static_cast<const xy_config *>((const void *)*additional_buffers[0]);
int feature_map_config_count = forward_input_feature_map_group_count * forward_output_feature_map_block_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[1]);
if (forward_input_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
xy_config_count,
feature_map_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (forward_input_feature_map_group_count == 1)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, true);
}
else
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, false);
}
}
else
{
if (forward_input_feature_map_group_count == 1)
{
launch_kernel(forward_x_block_size, true);
}
else
{
launch_kernel(forward_x_block_size, false);
}
}
}
void convolution_2d_layer_updater_cuda_fermi::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (!different_input)
throw neural_network_exception("convolution_2d_layer_updater_cuda_fermi is not able to backprop to the same input");
if (!backprop_required)
throw neural_network_exception("convolution_2d_layer_updater_cuda_fermi is not configured to do backprop but requested to");
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
cuda_safe_call(hipBindTexture(0, output_tex_ref, *output_errors_buffer, desc, output_elem_count_per_entry * entry_count * sizeof(float)));
int xy_config_count = backward_x_block_count * input_configuration_specific.dimension_sizes[1];
const xy_config * xy_config_list = static_cast<const xy_config *>((const void *)*additional_buffers[4]);
int feature_map_config_count = backward_output_feature_map_group_count * backward_input_feature_map_block_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[5]);
if (backward_output_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
xy_config_count,
feature_map_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, true);
}
else
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, false);
}
}
else
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_kernel(backward_x_block_size, true);
}
else
{
launch_backprop_kernel(backward_x_block_size, false);
}
}
}
void convolution_2d_layer_updater_cuda_fermi::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& training_speed,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update biases
{
int threadblock_size = get_threadblock_size_biases(output_elem_count_per_feature_map);
dim3 grid_size(1, output_configuration_specific.feature_map_count, entry_count);
dim3 block_size(threadblock_size, 1, 1);
int smem_size = threadblock_size * sizeof(float);
int min_iteration_count = output_elem_count_per_feature_map / threadblock_size;
hipLaunchKernelGGL(( convolution_2d_update_biases_upd_kernel_fermi), dim3(grid_size), dim3(block_size), smem_size, stream_id,
*data[1],
*output_errors_buffer,
*training_speed[1],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
min_iteration_count);
}
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
size_t texture_offset;
cuda_safe_call(hipBindTexture(&texture_offset, input_tex_ref, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), desc, input_elem_count_per_entry * sizeof(float) * (different_input ? entry_count : 1)));
texture_offset /= sizeof(float);
int feature_map_config_count = updater_output_feature_map_block_count * input_configuration_specific.feature_map_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[3]);
// Update weights
{
if (updater_window_x_block_count == 1)
{
int output_y_weight_y_config_count = updater_output_y_group_count * window_sizes[1];
const output_y_weight_y_config * output_y_weight_y_config_list = static_cast<const output_y_weight_y_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_y_weight_y_config_count,
feature_map_config_count,
entry_count);
if (updater_output_y_group_count == 1)
{
launch_update_weights_exact_kernel(window_sizes[0], true);
}
else
{
launch_update_weights_exact_kernel(window_sizes[0], false);
}
}
else
{
int output_y_weight_y_weight_x_config_count = updater_output_y_group_count * window_sizes[1] * updater_window_x_block_count;
const output_y_weight_y_weight_x_config * output_y_weight_y_weight_x_config_list = static_cast<const output_y_weight_y_weight_x_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_y_weight_y_weight_x_config_count,
feature_map_config_count,
entry_count);
if (updater_output_y_group_count == 1)
{
launch_update_weights_kernel_const(true);
}
else
{
launch_update_weights_kernel_const(false);
}
}
}
}
int convolution_2d_layer_updater_cuda_fermi::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_2d_layer_updater_cuda_fermi::updater_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_2d_layer_updater_cuda_fermi::is_in_place_backprop() const
{
return false;
}
std::vector<unsigned int> convolution_2d_layer_updater_cuda_fermi::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_2d_layer_updater_cuda_fermi::get_threadblock_size_biases(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 128)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 128 - 1) / 128;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
std::vector<size_t> convolution_2d_layer_updater_cuda_fermi::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(xy_config) * forward_x_block_count * output_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * input_configuration_specific.feature_map_count * forward_output_feature_map_block_count);
if (updater_window_x_block_count > 1)
res.push_back(sizeof(output_y_weight_y_weight_x_config) * window_sizes[1] * output_configuration_specific.dimension_sizes[1] * updater_window_x_block_count);
else
res.push_back(sizeof(output_y_weight_y_config) * window_sizes[1] * output_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
{
res.push_back(sizeof(xy_config) * backward_x_block_count * input_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * output_configuration_specific.feature_map_count * backward_input_feature_map_block_count);
}
return res;
}
void convolution_2d_layer_updater_cuda_fermi::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<xy_config> task_list;
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < forward_x_block_count; ++x)
task_list.push_back(xy_config(y, x * forward_x_block_size));
cuda_safe_call(hipMemcpy(*additional_buffers[0], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<feature_map_config> task_list;
for(int input_feature_map_group_id = 0; input_feature_map_group_id < forward_input_feature_map_group_count; ++input_feature_map_group_id)
for(int output_feature_map_id = 0; output_feature_map_id < forward_output_feature_map_block_count; ++output_feature_map_id)
task_list.push_back(feature_map_config(input_feature_map_group_id * forward_input_feature_map_group_size, output_feature_map_id * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), hipMemcpyHostToDevice));
}
if (updater_window_x_block_count == 1)
{
std::vector<output_y_weight_y_config> task_list;
for(int output_y = 0; output_y < updater_output_y_group_count; ++output_y)
for(int weight_y = 0; weight_y < window_sizes[1]; ++weight_y)
task_list.push_back(output_y_weight_y_config(output_y, weight_y));
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), hipMemcpyHostToDevice));
}
else
{
std::vector<output_y_weight_y_weight_x_config> task_list;
for(int output_y = 0; output_y < updater_output_y_group_count; ++output_y)
for(int weight_y = 0; weight_y < window_sizes[1]; ++weight_y)
for(int weight_x = 0; weight_x < updater_window_x_block_count; ++weight_x)
task_list.push_back(output_y_weight_y_weight_x_config(output_y, weight_y, weight_x * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<std::pair<int, int> > pair_list;
cuda_util::fill_tiling_pattern(input_configuration_specific.feature_map_count, updater_output_feature_map_block_count, pair_list);
std::vector<feature_map_config> task_list;
for(std::vector<std::pair<int, int> >::const_iterator it = pair_list.begin(); it != pair_list.end(); ++it)
task_list.push_back(feature_map_config(it->first, it->second * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(hipMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), hipMemcpyHostToDevice));
}
if (backprop_required)
{
{
std::vector<xy_config> task_list;
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < backward_x_block_count; ++x)
task_list.push_back(xy_config(y, x * backward_x_block_size + (backward_x_block_size - 1)));
cuda_safe_call(hipMemcpy(*additional_buffers[4], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<feature_map_config> task_list;
for(int output_feature_map_group_id = 0; output_feature_map_group_id < backward_output_feature_map_group_count; ++output_feature_map_group_id)
for(int input_feature_map_id = 0; input_feature_map_id < backward_input_feature_map_block_count; ++input_feature_map_id)
task_list.push_back(feature_map_config(input_feature_map_id * FEATURE_MAP_BLOCK_SIZE, output_feature_map_group_id * backward_output_feature_map_group_size));
cuda_safe_call(hipMemcpy(*additional_buffers[5], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), hipMemcpyHostToDevice));
}
}
}
void convolution_2d_layer_updater_cuda_fermi::set_max_entry_count(unsigned int max_entry_count)
{
forward_input_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
forward_x_block_count * output_configuration_specific.dimension_sizes[1] * forward_output_feature_map_block_count * max_entry_count,
input_configuration_specific.feature_map_count);
forward_input_feature_map_group_size = (input_configuration_specific.feature_map_count + forward_input_feature_map_group_count - 1) / forward_input_feature_map_group_count;
updater_output_y_group_count = cuda_util::get_group_count(
*cuda_config,
updater_output_feature_map_block_count * input_configuration_specific.feature_map_count * window_sizes[1] * max_entry_count * updater_window_x_block_count,
output_configuration_specific.dimension_sizes[1]);
updater_output_y_group_size = (output_configuration_specific.dimension_sizes[1] + updater_output_y_group_count - 1) / updater_output_y_group_count;
if (backprop_required)
{
backward_output_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
backward_x_block_count * input_configuration_specific.dimension_sizes[1] * backward_input_feature_map_block_count * max_entry_count,
output_configuration_specific.feature_map_count);
backward_output_feature_map_group_size = (output_configuration_specific.feature_map_count + backward_output_feature_map_group_count - 1) / backward_output_feature_map_group_count;
}
}
}
}
| 92408e90e72bc1271bd084d438c8190660ed5ef3.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_2d_layer_updater_cuda_fermi.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../convolution_layer.h"
texture<float, cudaTextureType1D, cudaReadModeElementType> input_tex_ref;
texture<float, cudaTextureType1D, cudaReadModeElementType> output_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
struct __align__(4) xy_config
{
xy_config(int y, int x)
{
this->xy_pair = (((unsigned int)y) << 16) | (unsigned int)x;
}
unsigned int xy_pair;
};
struct __align__(4) feature_map_config
{
feature_map_config(int input_feature_map_id, int output_feature_map_id)
{
this->feature_map_pair = (((unsigned int)input_feature_map_id) << 16) | (unsigned int)output_feature_map_id;
}
unsigned int feature_map_pair;
};
struct __align__(4) output_y_weight_y_config
{
output_y_weight_y_config(int output_y, int weight_y)
{
this->output_y_window_y_pair = (((unsigned int)output_y) << 16) | (unsigned int)weight_y;
}
unsigned int output_y_window_y_pair;
};
struct __align__(4) output_y_weight_y_weight_x_config
{
output_y_weight_y_weight_x_config(int output_y, int weight_y, int weight_x)
{
this->output_y_window_y_window_x_pair = (((unsigned int)output_y) << 16) | (((unsigned int)weight_y) << 8) | ((unsigned int)weight_x);
}
unsigned int output_y_window_y_window_x_pair;
};
template<int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_2d_tex_upd_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int base_input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_output_feature_map = window_width * window_height * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_height) + y) * input_width + x + texture_offset;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_width * window_height);
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
}
input_elem_id += input_width - window_width;
}
input_elem_id += input_width * (input_height - window_height);
}
float * base_output = output + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_2d_tex_exact_upd_kernel_fermi(
float * __restrict output,
const float * __restrict weights,
const float * __restrict biases,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int base_input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_output_feature_map = WINDOW_WIDTH * window_height * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_height) + y) * input_width + x + texture_offset;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * WINDOW_WIDTH * window_height);
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? weight_count_per_output_feature_map * i : 0;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch(input_tex_ref, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
current_weights++;
input_elem_id++;
}
input_elem_id += input_width - WINDOW_WIDTH;
}
input_elem_id += input_width * (input_height - window_height);
}
float * base_output = output + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
extern __shared__ float arr_sh[];
__global__ void convolution_2d_update_biases_upd_kernel_fermi(
float * __restrict biases,
const float * __restrict output_errors,
const float * __restrict training_speed,
int output_feature_map_count,
int output_elem_count_per_feature_map,
int min_iteration_count)
{
int thread_id = threadIdx.x;
int output_feature_map_id = blockIdx.y;
int entry_id = blockIdx.z;
int threadblock_size = blockDim.x;
float sum = 0.0F;
const float * current_error = output_errors + (entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map;
int current_output_neuron_id = thread_id;
for(int i = 0; i < min_iteration_count; ++i)
{
sum += current_error[current_output_neuron_id];
current_output_neuron_id += threadblock_size;
}
if (current_output_neuron_id < output_elem_count_per_feature_map)
sum += current_error[current_output_neuron_id];
volatile float * arr = arr_sh;
arr[thread_id] = sum;
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
if (lane_id < tx)
arr[thread_id] += arr[thread_id + tx];
}
sum = arr[thread_id];
if (lane_id == 0)
{
int offset = entry_id * output_feature_map_count + output_feature_map_id;
float current_training_speed_val = training_speed[offset];
atomicAdd(biases + offset, sum * current_training_speed_val);
}
}
template<int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_2d_deriviative_tex_upd_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int base_output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_input_feature_map = window_width * window_height;
int output_elem_id = ((entry_id * output_feature_map_count + base_output_feature_map_id) * output_height + y) * output_width + x;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_width * window_height);
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit1 = (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit2 = b_fit1 && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit2)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
current_weights++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit2 = b_fit1 && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit2)
{
float inp = tex1Dfetch(output_tex_ref, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * current_weights[weight_offsets[i]];
}
}
current_weights++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
}
current_weights += window_width * window_height * (input_feature_map_count - 1);
output_elem_id += output_width * (output_height + window_height);
}
float * base_input = input_errors + ((entry_id * input_feature_map_count + input_feature_map_id) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_2d_deriviative_tex_exact_upd_kernel_fermi(
float * __restrict input_errors,
const float * __restrict weights,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int base_output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_input_feature_map = WINDOW_WIDTH * window_height;
int output_elem_id = ((entry_id * output_feature_map_count + base_output_feature_map_id) * output_height + y) * output_width + x;
const float * current_weights = weights + (int)(((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * WINDOW_WIDTH * window_height);
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int weight_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_offsets[i] = (i < input_feature_map_count - input_feature_map_id) ? weight_count_per_input_feature_map * i : 0;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit1 = (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit2 = b_fit1 && (((1 << i) & mask) != 0);
if (b_fit2)
output_vals[i] = tex1Dfetch(output_tex_ref, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = current_weights[weight_offsets[i]];
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
current_weights++;
}
output_elem_id -= output_width;
}
current_weights += WINDOW_WIDTH * window_height * (input_feature_map_count - 1);
output_elem_id += output_width * (output_height + window_height);
}
float * base_input = input_errors + ((entry_id * input_feature_map_count + input_feature_map_id) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<bool single_output_y_group>
__global__ void convolution_2d_update_weights_upd_kernel_fermi(
float * __restrict weights,
const float * __restrict output_errors,
const float * __restrict training_speed,
const output_y_weight_y_weight_x_config * __restrict output_y_weight_y_weight_x_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_y_group_count,
int texture_offset,
int entry_count,
bool different_input,
int output_y_weight_y_weight_x_config_count,
int feature_map_config_count)
{
int output_y_weight_y_weight_x_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((output_y_weight_y_weight_x_config_id < output_y_weight_y_weight_x_config_count) && (feature_map_config_id < feature_map_config_count) && (entry_id < entry_count))
{
output_y_weight_y_weight_x_config yw = output_y_weight_y_weight_x_config_list[output_y_weight_y_weight_x_config_id];
int weight_x = yw.output_y_window_y_window_x_pair & 0xFF;
int weight_y = (yw.output_y_window_y_window_x_pair & 0xFFFF) >> 8;
int output_y_start_id = yw.output_y_window_y_window_x_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int output_neuron_count_per_feature_map = output_width * output_height;
const float * current_output_errors = output_errors + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + output_y_start_id) * output_width;
int input_elem_id = (((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_height + weight_y + output_y_start_id) * input_width + texture_offset + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int output_y = output_y_start_id; output_y < output_height; output_y += output_y_group_count)
{
float input_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_buf[i] = tex1Dfetch(input_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch(input_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_buf[j];
current_output_errors++;
input_elem_id++;
}
current_output_errors += output_width * (output_y_group_count - 1);
input_elem_id += input_width * (output_y_group_count - 1) + (window_width - WINDOW_WIDTH_LOCAL);
}
int offset = (((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_height * window_width;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
template<int WINDOW_WIDTH, bool single_output_y_group>
__global__ void convolution_2d_update_weights_exact_upd_kernel_fermi(
float * __restrict weights,
const float * __restrict output_errors,
const float * __restrict training_speed,
const output_y_weight_y_config * __restrict output_y_weight_y_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_y_group_count,
int texture_offset,
int entry_count,
bool different_input,
int output_y_weight_y_config_count,
int feature_map_config_count)
{
int output_y_weight_y_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((output_y_weight_y_config_id < output_y_weight_y_config_count) && (feature_map_config_id < feature_map_config_count) && (entry_id < entry_count))
{
output_y_weight_y_config yw = output_y_weight_y_config_list[output_y_weight_y_config_id];
int weight_y = yw.output_y_window_y_pair & 0xFFFF;
int output_y_start_id = yw.output_y_window_y_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int output_neuron_count_per_feature_map = output_width * output_height;
const float * current_output_errors = output_errors + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + output_y_start_id) * output_width;
int input_elem_id = (((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_height + weight_y + output_y_start_id) * input_width + texture_offset;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
int output_offsets[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_offsets[i] = (i < output_feature_map_count - output_feature_map_id) ? output_neuron_count_per_feature_map * i : 0;
for(int output_y = output_y_start_id; output_y < output_height; output_y += output_y_group_count)
{
float input_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_buf[i] = tex1Dfetch(input_tex_ref, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = current_output_errors[output_offsets[i]];
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH - 1] = tex1Dfetch(input_tex_ref, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_buf[j];
current_output_errors++;
input_elem_id++;
}
current_output_errors += output_width * (output_y_group_count - 1);
input_elem_id += input_width * (output_y_group_count - 1);
}
int offset = (((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_height * WINDOW_WIDTH;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
namespace nnforge
{
namespace cuda
{
convolution_2d_layer_updater_cuda_fermi::convolution_2d_layer_updater_cuda_fermi()
{
input_tex_ref.addressMode[0] = cudaAddressModeBorder;
input_tex_ref.normalized = false;
output_tex_ref.addressMode[0] = cudaAddressModeBorder;
output_tex_ref.normalized = false;
input_tex_ref.addressMode[0] = cudaAddressModeBorder;
input_tex_ref.normalized = false;
}
convolution_2d_layer_updater_cuda_fermi::~convolution_2d_layer_updater_cuda_fermi()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const, single_input_feature_map_group) \
convolution_2d_tex_exact_upd_kernel_fermi<window_width_const,block_size_const,single_input_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, *data[0], *data[1], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, xy_config_count, feature_map_config_count);
#define launch_exact_kernel_const(window_width, block_size_const, single_input_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const, single_input_feature_map_group); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const, single_input_feature_map_group); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const, single_input_feature_map_group); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const, single_input_feature_map_group); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const, single_input_feature_map_group); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const, single_input_feature_map_group); \
break; \
};
#define launch_exact_kernel(window_width, block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5, single_input_feature_map_group); \
break; \
};
#define launch_kernel_const(block_size_const, single_input_feature_map_group) \
convolution_2d_tex_upd_kernel_fermi<block_size_const,single_input_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, *data[0], *data[1], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, xy_config_count, feature_map_config_count);
#define launch_kernel(block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1, single_input_feature_map_group); \
break; \
case 2: \
launch_kernel_const(2, single_input_feature_map_group); \
break; \
case 3: \
launch_kernel_const(3, single_input_feature_map_group); \
break; \
case 4: \
launch_kernel_const(4, single_input_feature_map_group); \
break; \
case 5: \
launch_kernel_const(5, single_input_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const, single_output_feature_map_group) \
convolution_2d_deriviative_tex_exact_upd_kernel_fermi<window_width_const,block_size_const,single_output_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, *data[0], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, xy_config_count, feature_map_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const, single_output_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const, single_output_feature_map_group); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const, single_output_feature_map_group); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const, single_output_feature_map_group); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const, single_output_feature_map_group); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const, single_output_feature_map_group); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const, single_output_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5, single_output_feature_map_group); \
break; \
};
#define launch_backprop_kernel_const(block_size_const, single_output_feature_map_group) \
convolution_2d_deriviative_tex_upd_kernel_fermi<block_size_const,single_output_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, *data[0], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, xy_config_count, feature_map_config_count);
#define launch_backprop_kernel(block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_kernel_const(2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_kernel_const(3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_kernel_const(4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_kernel_const(5, single_output_feature_map_group); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const, single_output_y_group_const) \
convolution_2d_update_weights_exact_upd_kernel_fermi<window_width_const, single_output_y_group_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*data[0], *output_errors_buffer, *training_speed[0], output_y_weight_y_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_y_group_count, texture_offset, entry_count, different_input, output_y_weight_y_config_count, feature_map_config_count);
#define launch_update_weights_exact_kernel(window_width, single_output_y_group_const) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1, single_output_y_group_const); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2, single_output_y_group_const); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3, single_output_y_group_const); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4, single_output_y_group_const); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5, single_output_y_group_const); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6, single_output_y_group_const); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7, single_output_y_group_const); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8, single_output_y_group_const); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9, single_output_y_group_const); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10, single_output_y_group_const); \
break; \
};
#define launch_update_weights_kernel_const(single_output_y_group_const) \
convolution_2d_update_weights_upd_kernel_fermi<single_output_y_group_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*data[0], *output_errors_buffer, *training_speed[0], output_y_weight_y_weight_x_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_y_group_count, texture_offset, entry_count, different_input, output_y_weight_y_weight_x_config_count, feature_map_config_count);
void convolution_2d_layer_updater_cuda_fermi::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
size_t texture_offset;
cuda_safe_call(cudaBindTexture(&texture_offset, input_tex_ref, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), desc, input_elem_count_per_entry * sizeof(float) * (different_input ? entry_count : 1)));
texture_offset /= sizeof(float);
int xy_config_count = forward_x_block_count * output_configuration_specific.dimension_sizes[1];
const xy_config * xy_config_list = static_cast<const xy_config *>((const void *)*additional_buffers[0]);
int feature_map_config_count = forward_input_feature_map_group_count * forward_output_feature_map_block_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[1]);
if (forward_input_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
xy_config_count,
feature_map_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (forward_input_feature_map_group_count == 1)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, true);
}
else
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, false);
}
}
else
{
if (forward_input_feature_map_group_count == 1)
{
launch_kernel(forward_x_block_size, true);
}
else
{
launch_kernel(forward_x_block_size, false);
}
}
}
void convolution_2d_layer_updater_cuda_fermi::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (!different_input)
throw neural_network_exception("convolution_2d_layer_updater_cuda_fermi is not able to backprop to the same input");
if (!backprop_required)
throw neural_network_exception("convolution_2d_layer_updater_cuda_fermi is not configured to do backprop but requested to");
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cuda_safe_call(cudaBindTexture(0, output_tex_ref, *output_errors_buffer, desc, output_elem_count_per_entry * entry_count * sizeof(float)));
int xy_config_count = backward_x_block_count * input_configuration_specific.dimension_sizes[1];
const xy_config * xy_config_list = static_cast<const xy_config *>((const void *)*additional_buffers[4]);
int feature_map_config_count = backward_output_feature_map_group_count * backward_input_feature_map_block_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[5]);
if (backward_output_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
xy_config_count,
feature_map_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, true);
}
else
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, false);
}
}
else
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_kernel(backward_x_block_size, true);
}
else
{
launch_backprop_kernel(backward_x_block_size, false);
}
}
}
void convolution_2d_layer_updater_cuda_fermi::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& training_speed,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update biases
{
int threadblock_size = get_threadblock_size_biases(output_elem_count_per_feature_map);
dim3 grid_size(1, output_configuration_specific.feature_map_count, entry_count);
dim3 block_size(threadblock_size, 1, 1);
int smem_size = threadblock_size * sizeof(float);
int min_iteration_count = output_elem_count_per_feature_map / threadblock_size;
convolution_2d_update_biases_upd_kernel_fermi<<<grid_size, block_size, smem_size, stream_id>>>(
*data[1],
*output_errors_buffer,
*training_speed[1],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
min_iteration_count);
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
size_t texture_offset;
cuda_safe_call(cudaBindTexture(&texture_offset, input_tex_ref, (const float *)(*input_neurons_buffer) + (offset_input_entry_id * input_elem_count_per_entry), desc, input_elem_count_per_entry * sizeof(float) * (different_input ? entry_count : 1)));
texture_offset /= sizeof(float);
int feature_map_config_count = updater_output_feature_map_block_count * input_configuration_specific.feature_map_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[3]);
// Update weights
{
if (updater_window_x_block_count == 1)
{
int output_y_weight_y_config_count = updater_output_y_group_count * window_sizes[1];
const output_y_weight_y_config * output_y_weight_y_config_list = static_cast<const output_y_weight_y_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_y_weight_y_config_count,
feature_map_config_count,
entry_count);
if (updater_output_y_group_count == 1)
{
launch_update_weights_exact_kernel(window_sizes[0], true);
}
else
{
launch_update_weights_exact_kernel(window_sizes[0], false);
}
}
else
{
int output_y_weight_y_weight_x_config_count = updater_output_y_group_count * window_sizes[1] * updater_window_x_block_count;
const output_y_weight_y_weight_x_config * output_y_weight_y_weight_x_config_list = static_cast<const output_y_weight_y_weight_x_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_y_weight_y_weight_x_config_count,
feature_map_config_count,
entry_count);
if (updater_output_y_group_count == 1)
{
launch_update_weights_kernel_const(true);
}
else
{
launch_update_weights_kernel_const(false);
}
}
}
}
int convolution_2d_layer_updater_cuda_fermi::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_2d_layer_updater_cuda_fermi::updater_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_2d_layer_updater_cuda_fermi::is_in_place_backprop() const
{
return false;
}
std::vector<unsigned int> convolution_2d_layer_updater_cuda_fermi::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_2d_layer_updater_cuda_fermi::get_threadblock_size_biases(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 128)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 128 - 1) / 128;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
std::vector<size_t> convolution_2d_layer_updater_cuda_fermi::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(xy_config) * forward_x_block_count * output_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * input_configuration_specific.feature_map_count * forward_output_feature_map_block_count);
if (updater_window_x_block_count > 1)
res.push_back(sizeof(output_y_weight_y_weight_x_config) * window_sizes[1] * output_configuration_specific.dimension_sizes[1] * updater_window_x_block_count);
else
res.push_back(sizeof(output_y_weight_y_config) * window_sizes[1] * output_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
{
res.push_back(sizeof(xy_config) * backward_x_block_count * input_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * output_configuration_specific.feature_map_count * backward_input_feature_map_block_count);
}
return res;
}
void convolution_2d_layer_updater_cuda_fermi::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<xy_config> task_list;
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < forward_x_block_count; ++x)
task_list.push_back(xy_config(y, x * forward_x_block_size));
cuda_safe_call(cudaMemcpy(*additional_buffers[0], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<feature_map_config> task_list;
for(int input_feature_map_group_id = 0; input_feature_map_group_id < forward_input_feature_map_group_count; ++input_feature_map_group_id)
for(int output_feature_map_id = 0; output_feature_map_id < forward_output_feature_map_block_count; ++output_feature_map_id)
task_list.push_back(feature_map_config(input_feature_map_group_id * forward_input_feature_map_group_size, output_feature_map_id * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), cudaMemcpyHostToDevice));
}
if (updater_window_x_block_count == 1)
{
std::vector<output_y_weight_y_config> task_list;
for(int output_y = 0; output_y < updater_output_y_group_count; ++output_y)
for(int weight_y = 0; weight_y < window_sizes[1]; ++weight_y)
task_list.push_back(output_y_weight_y_config(output_y, weight_y));
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), cudaMemcpyHostToDevice));
}
else
{
std::vector<output_y_weight_y_weight_x_config> task_list;
for(int output_y = 0; output_y < updater_output_y_group_count; ++output_y)
for(int weight_y = 0; weight_y < window_sizes[1]; ++weight_y)
for(int weight_x = 0; weight_x < updater_window_x_block_count; ++weight_x)
task_list.push_back(output_y_weight_y_weight_x_config(output_y, weight_y, weight_x * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<std::pair<int, int> > pair_list;
cuda_util::fill_tiling_pattern(input_configuration_specific.feature_map_count, updater_output_feature_map_block_count, pair_list);
std::vector<feature_map_config> task_list;
for(std::vector<std::pair<int, int> >::const_iterator it = pair_list.begin(); it != pair_list.end(); ++it)
task_list.push_back(feature_map_config(it->first, it->second * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(cudaMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), cudaMemcpyHostToDevice));
}
if (backprop_required)
{
{
std::vector<xy_config> task_list;
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < backward_x_block_count; ++x)
task_list.push_back(xy_config(y, x * backward_x_block_size + (backward_x_block_size - 1)));
cuda_safe_call(cudaMemcpy(*additional_buffers[4], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<feature_map_config> task_list;
for(int output_feature_map_group_id = 0; output_feature_map_group_id < backward_output_feature_map_group_count; ++output_feature_map_group_id)
for(int input_feature_map_id = 0; input_feature_map_id < backward_input_feature_map_block_count; ++input_feature_map_id)
task_list.push_back(feature_map_config(input_feature_map_id * FEATURE_MAP_BLOCK_SIZE, output_feature_map_group_id * backward_output_feature_map_group_size));
cuda_safe_call(cudaMemcpy(*additional_buffers[5], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), cudaMemcpyHostToDevice));
}
}
}
void convolution_2d_layer_updater_cuda_fermi::set_max_entry_count(unsigned int max_entry_count)
{
forward_input_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
forward_x_block_count * output_configuration_specific.dimension_sizes[1] * forward_output_feature_map_block_count * max_entry_count,
input_configuration_specific.feature_map_count);
forward_input_feature_map_group_size = (input_configuration_specific.feature_map_count + forward_input_feature_map_group_count - 1) / forward_input_feature_map_group_count;
updater_output_y_group_count = cuda_util::get_group_count(
*cuda_config,
updater_output_feature_map_block_count * input_configuration_specific.feature_map_count * window_sizes[1] * max_entry_count * updater_window_x_block_count,
output_configuration_specific.dimension_sizes[1]);
updater_output_y_group_size = (output_configuration_specific.dimension_sizes[1] + updater_output_y_group_count - 1) / updater_output_y_group_count;
if (backprop_required)
{
backward_output_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
backward_x_block_count * input_configuration_specific.dimension_sizes[1] * backward_input_feature_map_block_count * max_entry_count,
output_configuration_specific.feature_map_count);
backward_output_feature_map_group_size = (output_configuration_specific.feature_map_count + backward_output_feature_map_group_count - 1) / backward_output_feature_map_group_count;
}
}
}
}
|
9940d8b2bbdf2f5b8781e4a287911e091cca21d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "NIN.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../layers/BranchLayer.h"
/*
* dim3 block = dim3(batch, outputAmpunt);
* dim3 thread= dim3(outputDim * outputDim);
*/
__global__ void g_NIN_feedforward(
float* inputs,
float** ws,
float** bs,
float* outputs,
int inputDim,
int outputDim,
int inputAmount,
int outputAmount,
int inputArea,
int outputArea);
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(THREADS, inputAmount);
*/
template <int INPUTAMOUNT, int THREADS>
__global__ void g_NIN_wgrad_1(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea);
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(inputAmount);
*/
__global__ void g_NIN_wgrad(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea);
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= min(inputDim * inputDim, 512);
*/
__global__ void g_NIN_backpropagation(
float* _curDelta,
float**ws,
float* _preDelta,
int curDim,
int preDim,
int preAmount,
int curAmount,
int curArea,
int preArea);
/*
* block = dim3(outputAmount, inputAmount);
* thread= dim3(batch);
*/
__global__ void g_NIN_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
int batch,
float lambda);
/*
*blocks : dim3(kernelAmount2)
*threads : dim3(256)
*shared : sizeof(float) * 256
*/
__global__ void g_NIN_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea);
void NIN::calCost()
{
cost->gpuClear();
hipLaunchKernelGGL(( g_getCost_3), dim3(dim3(w.size())), dim3(dim3(32)), sizeof(float) * 32, 0, cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN:getCost");
}
void NIN::feedforward()
{
if((inputs == NULL))
{
printf("NIN init error\n");
exit(0);
}
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 1024));
hipLaunchKernelGGL(( g_NIN_feedforward), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
outputs->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::g_NIN_feedforward");
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
cuMatrix<float>* cpp_outputs = new cuMatrix<float>(outputs->rows, outputs->cols, outputs->channels);
for(int ok = 0; ok < outputAmount; ok++){
w[ok]->toCpu();
b[ok]->toCpu();
}
inputs->toCpu();
outputs->toCpu();
for(int bt = 0; bt < batch; bt++){
for(int ok = 0; ok < outputAmount; ok++){
for(int i = 0; i < outputDim; i++){
for(int j = 0; j < outputDim; j++){
float value = 0.0;
for(int ik = 0; ik < inputAmount; ik++){
value += inputs->get(bt, i * inputDim + j, ik) * w[ok]->get(0,0,ik);
}
value += b[ok]->get(0,0,0);
cpp_outputs->set(bt, i * outputDim + j, ok, value);
}
}
}
}
checkMatrixIsSame(outputs, cpp_outputs);
init = true;
}
#endif
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
hipLaunchKernelGGL(( g_nonLinearity), dim3(block), dim3(thread), 0, 0,
outputs->getDev(),
outputs->getLen(),
NON_LINEARITY);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::g_nonLinearity");
}
}
void NIN::backpropagation()
{
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
hipLaunchKernelGGL(( g_dnonLinearity), dim3(block), dim3(thread), 0, 0, curDelta->getDev(),
outputs->getDev(), curDelta->getLen(), NON_LINEARITY);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::g_dnonLinearity");
}
if(Config::instance()->getLayerByName(m_name)->m_input == std::string("data"))
return;
dim3 block = dim3(batch, inputAmount);
dim3 thread= dim3(min(inputDim * inputDim, 1024));
hipLaunchKernelGGL(( g_NIN_backpropagation), dim3(block), dim3(thread), sizeof(float) * outputAmount, 0,
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
inputDim,
inputAmount,
outputAmount,
curDelta->getArea(),
preDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::g_NIN_backpropagation");
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
curDelta->toCpu();
preDelta->toCpu();
for(int ok = 0; ok < outputAmount; ok++){
w[ok]->toCpu();
}
cuMatrix<float>*cpp_preDelta = new cuMatrix<float>(preDelta->rows, preDelta->cols, preDelta->channels);
for(int bt = 0; bt < batch; bt++){
for(int ik = 0; ik < inputAmount; ik++){
for(int i = 0; i < inputDim; i++){
for(int j = 0; j < inputDim; j++){
float value = 0.0;
for(int ok = 0; ok < outputAmount; ok++){
value += curDelta->get(bt, i * outputDim + j, ok) * w[ok]->get(0,0,ik);
}
cpp_preDelta->set(bt, i * inputDim + j, ik, value);
}
}
}
}
checkMatrixIsSame(preDelta, cpp_preDelta);
init = true;
}
#endif
}
/*
* block = dim3(outputAmount, inputAmount);
* thread= dim3(batch);
*/
__global__ void g_NIN_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
int batch,
float lambda)
{
extern __shared__ float _sum[];
int ok = blockIdx.x;
int ik = blockIdx.y;
int tid = threadIdx.x;
_sum[tid] = 0;
int inputAmount = gridDim.y;
__syncthreads();
int tlen = batch;
float* wgradTmp = _WgradTmp[ok];
for(int i = 0; i < tlen; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < tlen)
{
_sum[threadIdx.x] += wgradTmp[ik + b * inputAmount];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < (len >> 1))
{
_sum[tid] += _sum[tid + skip];
}
else{
return;
}
len = (len + 1) >> 1;
}
if(tid == 0)
{
Wgrad[ok][ik] = _sum[0] / batch + w[ok][ik] * lambda;
}
}
void NIN::getGrad()
{
if(outputDim >= 8 && inputAmount == 32){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(32, inputAmount);
hipLaunchKernelGGL(( g_NIN_wgrad_1<32, 32>), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else if(outputDim >= 8 && inputAmount == 64){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(16, inputAmount);
hipLaunchKernelGGL(( g_NIN_wgrad_1<64, 16>), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else if(outputDim >=8 && inputAmount == 128){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(8, inputAmount);
hipLaunchKernelGGL(( g_NIN_wgrad_1<128, 8>), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else{
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(inputAmount);
hipLaunchKernelGGL(( g_NIN_wgrad), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad");
}
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
inputs->toCpu();
curDelta->toCpu();
for(size_t i = 0; i < wgradTmp.size(); i++){
wgradTmp[i]->toCpu();
}
cuMatrixVector<float>cpp_wgradTmp;
for(int ok = 0; ok < outputAmount; ok++){
cpp_wgradTmp.push_back(new cuMatrix<float>(wgradTmp[ok]->rows, wgradTmp[ok]->cols, wgradTmp[ok]->channels));
}
for(int bt = 0; bt < batch; bt++){
for(int ik = 0; ik < inputAmount; ik++){
for(int ok = 0; ok < outputAmount; ok++){
float value = 0.0;
for(int i = 0; i < inputDim; i++){
for(int j = 0; j < inputDim; j++){
value += inputs->get(bt, i * inputDim + j, ik) * curDelta->get(bt, i * inputDim + j, ok);
}
}
cpp_wgradTmp[ok]->set(bt, ik, 0, value);
}
}
}
for(size_t i = 0; i < wgradTmp.size(); i++){
checkMatrixIsSame(wgradTmp[i], cpp_wgradTmp[i]);
}
init = true;
}
#endif
dim3 block = dim3(outputAmount, inputAmount);
dim3 thread = dim3(batch);
hipLaunchKernelGGL(( g_NIN_wgradAdd), dim3(block), dim3(thread), sizeof(float) * batch, 0,
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
batch,
lambda);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgradAdd");
block = dim3(outputAmount);
thread= dim3(256);
hipLaunchKernelGGL(( g_NIN_Bgrad), dim3(block), dim3(thread), sizeof(float) * thread.x, 0, curDelta->getDev(),
bgrad.m_devPoint,
outputDim,
outputAmount,
batch,
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::getGrad::g_NIN_Bgrad");
}
void NIN::updateWeight()
{
dim3 block = outputAmount;
dim3 thread = min(256, w[0]->getLen());
hipLaunchKernelGGL(( g_vecAdd), dim3(block), dim3(thread), 0, Layers::instance()->get_stream(), momentum_w.m_devPoint, wgrad.m_devPoint, w.m_devPoint,
momentum_b.m_devPoint, bgrad.m_devPoint, b.m_devPoint,
w[0]->getLen(), b[0]->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate(), Config::instance()->getLrate());
}
NIN::NIN(std::string name)
{
m_name = name;
ConfigNIN* config = (ConfigNIN*)Config::instance()->getLayerByName(m_name);
ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getOutputs();
if(inputs == NULL){
/*inputs = NULL the type must be BranchLayers*/
Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer());
Assert(config->m_subInput != std::string("NULL"));
BranchLayer* bl = static_cast<BranchLayer*>(preLayer);
inputs = bl->getSubOutput(config->m_subInput);
preDelta = bl->getSubCurDelta(config->m_subInput);
}else{
preDelta = preLayer->getCurDelta();
}
inputAmount = preLayer->outputAmount;
outputAmount = config->m_amount;
inputDim = preLayer->outputDim;
outputDim = inputDim;
batch = Config::instance()->getBatchSize();
lambda = config->m_weightDecay;
NON_LINEARITY = config->m_nonLinearity;
outputs = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
for(int i = 0; i < outputAmount; i++){
w.push_back(new cuMatrix<float>(1, 1, inputAmount));
b.push_back(new cuMatrix<float>(1, 1, 1));
wgrad.push_back(new cuMatrix<float>(1, 1, inputAmount));
bgrad.push_back(new cuMatrix<float>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<float>(batch, inputAmount, 1));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount; i++){
momentum_w.push_back(new cuMatrix<float>(1, 1, inputAmount));
momentum_b.push_back(new cuMatrix<float>(1, 1, 1));
}
momentum_w.toGpu();
momentum_b.toGpu();
this->initRandom();
Layers::instance()->set(m_name, this);
}
void NIN::save(FILE* file)
{
for(int a = 0; a < (int)w.size(); a++){
w[a]->toCpu();
b[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%f ", w[a]->get(i, j, c));
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%f ", b[a]->get(i, j, c));
}
}
}
}
}
void NIN::clearMomentum()
{
for(int i = 0; i < (int)momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < (int)momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
void NIN::initRandom()
{
//srand(clock());
float initW = Config::instance()->getLayerByName(m_name)->m_initW;
if(Config::instance()->getLayerByName(m_name)->isGaussian()){
for(int i = 0; i < (int)w.size(); i++){
float epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
float r1 = 0.5f + 4.0f * (rand()) / RAND_MAX;
float r2 = 0.5f + 4.0f * (rand()) / RAND_MAX;
createGaussian(w[i]->getHost() + c * w[i]->getArea(), r1,r2,
1, 1, w[i]->channels,
epsilon);
}
w[i]->toGpu();
}
}
else{
for(int i = 0; i < (int)w.size(); i++){
for(int j = 0; j < (int)w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
//printf("%f ", w[i]->hostData[j]);
}//printf("\n");
w[i]->toGpu();
}
}
}
void NIN::initFromCheckpoint(FILE* file)
{
float val = 0;
for(int a = 0; a < (int)w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
w[a]->set(i, j, c, val);
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
b[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
b[a]->toGpu();
}
}
/*
* dim3 block = dim3(batch, outputAmpunt);
* dim3 thread= dim3(outputDim * outputDim);
*/
__global__ void g_NIN_feedforward(
float* inputs,
float** ws,
float** bs,
float* outputs,
int inputDim,
int outputDim,
int inputAmount,
int outputAmount,
int inputArea,
int outputArea)
{
int sp = blockIdx.x;
int ok = blockIdx.y;
int outputSize2 = outputDim * outputDim;
int inputSize2 = inputDim* inputDim;
float b = bs[ok][0];
float *w = ws[ok];
float* curOutput = outputs + ok * outputArea + sp * outputSize2;
/*convolution*/
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int idx = tidx + threadIdx.x;
if(idx < outputSize2)
{
float val = 0.0;
int skip_add = sp * inputSize2;
for(int ik = 0; ik < inputAmount; ik++){
float* curInput = inputs + skip_add;
val += curInput[idx] * w[ik];
skip_add += inputArea;
}
curOutput[idx] = val + b;
}
}
}
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= min(inputDim * inputDim, 512);
*/
__global__ void g_NIN_backpropagation(
float* _curDelta,
float**ws,
float* _preDelta,
int curDim,
int preDim,
int preAmount,
int curAmount,
int curArea,
int preArea)
{
extern __shared__ float wShared[];
int sp = blockIdx.x;
int ik = blockIdx.y;
for(int id = 0; id < curAmount; id += blockDim.x){
int idx = id + threadIdx.x;
if(idx < curAmount){
wShared[idx] = ws[idx][ik];
}
}
__syncthreads();
int curSize2 = curDim * curDim;
int preSize2 = preDim * preDim;
float *preDelta = _preDelta + ik * preArea + sp * preSize2;
for (int tidx = 0; tidx < preSize2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < preSize2) {
float val = 0.0;
int skip_add = sp * curSize2;
for(int ok = 0; ok < curAmount; ok++){
float *curDelta = _curDelta + skip_add;
val += curDelta[idx] * wShared[ok];
skip_add += curArea;
}
preDelta[idx] = val;
}
}
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(inputAmount);
*/
__global__ void g_NIN_wgrad(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea)
{
int ok = blockIdx.y;
int ik = threadIdx.x;
int b = blockIdx.x;
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
float* input = _inputs + ik * inputArea + b * inputSize2;
float* curDelta = _curDelta + ok * curDeltaAea + b * curDeltaSize2;
float val = 0.0;
for(int x = 0; x < inputSize2; x++){
val += input[x] * curDelta[x];
}
wgradTmp[ok][ik + b * inputAmount] = val;
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(THREADS, inputAmount);
*/
template <int INPUTAMOUNT, int THREADS>
__global__ void g_NIN_wgrad_1(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea)
{
__shared__ float __sum[INPUTAMOUNT][THREADS];
int ok = blockIdx.y;
int ik = threadIdx.y;
int b = blockIdx.x;
float* _sum = __sum[ik];
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
float* input = _inputs + ik * inputArea + b * inputSize2;
float* curDelta = _curDelta + ok * curDeltaAea + b * curDeltaSize2;
float val = 0.0;
for(int x = 0; x < inputSize2; x += blockDim.x){
int idx = x + threadIdx.x;
if(idx < inputSize2){
val += input[idx] * curDelta[idx];
}
}
_sum[threadIdx.x] = val;
__syncthreads();
int len = THREADS;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
if(threadIdx.x == 0){
wgradTmp[ok][ik + b * inputAmount] = _sum[0];
}
}
/*
* block = dim3(outputAmount);
* thread= dim3(256);
* shared : sizeof(float) * 256
*/
__global__ void g_NIN_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea)
{
extern __shared__ float _sum[];
int k2 = blockIdx.x;
_sum[threadIdx.x] = 0.0;
__syncthreads();
int deltaSize2 = deltaSize * deltaSize;
int tlen = deltaSize2 * batch;
for(int i = 0; i < tlen; i += blockDim.x)
{
int idx = i + threadIdx.x;
if(idx < tlen)
{
int s = idx / (deltaSize2);//s
int t2 = idx % (deltaSize2);//x,y
int id =
deltaArea * k2 + s * deltaSize2 + t2;
_sum[threadIdx.x] += delta[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(threadIdx.x == 0)
{
bgrad[k2][0] = _sum[0] / batch;
}
}
| 9940d8b2bbdf2f5b8781e4a287911e091cca21d4.cu | #include "NIN.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../layers/BranchLayer.h"
/*
* dim3 block = dim3(batch, outputAmpunt);
* dim3 thread= dim3(outputDim * outputDim);
*/
__global__ void g_NIN_feedforward(
float* inputs,
float** ws,
float** bs,
float* outputs,
int inputDim,
int outputDim,
int inputAmount,
int outputAmount,
int inputArea,
int outputArea);
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(THREADS, inputAmount);
*/
template <int INPUTAMOUNT, int THREADS>
__global__ void g_NIN_wgrad_1(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea);
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(inputAmount);
*/
__global__ void g_NIN_wgrad(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea);
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= min(inputDim * inputDim, 512);
*/
__global__ void g_NIN_backpropagation(
float* _curDelta,
float**ws,
float* _preDelta,
int curDim,
int preDim,
int preAmount,
int curAmount,
int curArea,
int preArea);
/*
* block = dim3(outputAmount, inputAmount);
* thread= dim3(batch);
*/
__global__ void g_NIN_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
int batch,
float lambda);
/*
*blocks : dim3(kernelAmount2)
*threads : dim3(256)
*shared : sizeof(float) * 256
*/
__global__ void g_NIN_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea);
void NIN::calCost()
{
cost->gpuClear();
g_getCost_3<<<dim3(w.size()), dim3(32), sizeof(float) * 32>>>(cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN:getCost");
}
void NIN::feedforward()
{
if((inputs == NULL))
{
printf("NIN init error\n");
exit(0);
}
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 1024));
g_NIN_feedforward<<<block, thread>>>(
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
outputs->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::g_NIN_feedforward");
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
cuMatrix<float>* cpp_outputs = new cuMatrix<float>(outputs->rows, outputs->cols, outputs->channels);
for(int ok = 0; ok < outputAmount; ok++){
w[ok]->toCpu();
b[ok]->toCpu();
}
inputs->toCpu();
outputs->toCpu();
for(int bt = 0; bt < batch; bt++){
for(int ok = 0; ok < outputAmount; ok++){
for(int i = 0; i < outputDim; i++){
for(int j = 0; j < outputDim; j++){
float value = 0.0;
for(int ik = 0; ik < inputAmount; ik++){
value += inputs->get(bt, i * inputDim + j, ik) * w[ok]->get(0,0,ik);
}
value += b[ok]->get(0,0,0);
cpp_outputs->set(bt, i * outputDim + j, ok, value);
}
}
}
}
checkMatrixIsSame(outputs, cpp_outputs);
init = true;
}
#endif
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
g_nonLinearity<<<block, thread>>>(
outputs->getDev(),
outputs->getLen(),
NON_LINEARITY);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::g_nonLinearity");
}
}
void NIN::backpropagation()
{
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
g_dnonLinearity<<<block, thread>>>(curDelta->getDev(),
outputs->getDev(), curDelta->getLen(), NON_LINEARITY);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::g_dnonLinearity");
}
if(Config::instance()->getLayerByName(m_name)->m_input == std::string("data"))
return;
dim3 block = dim3(batch, inputAmount);
dim3 thread= dim3(min(inputDim * inputDim, 1024));
g_NIN_backpropagation<<<block, thread, sizeof(float) * outputAmount>>>(
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
inputDim,
inputAmount,
outputAmount,
curDelta->getArea(),
preDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::g_NIN_backpropagation");
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
curDelta->toCpu();
preDelta->toCpu();
for(int ok = 0; ok < outputAmount; ok++){
w[ok]->toCpu();
}
cuMatrix<float>*cpp_preDelta = new cuMatrix<float>(preDelta->rows, preDelta->cols, preDelta->channels);
for(int bt = 0; bt < batch; bt++){
for(int ik = 0; ik < inputAmount; ik++){
for(int i = 0; i < inputDim; i++){
for(int j = 0; j < inputDim; j++){
float value = 0.0;
for(int ok = 0; ok < outputAmount; ok++){
value += curDelta->get(bt, i * outputDim + j, ok) * w[ok]->get(0,0,ik);
}
cpp_preDelta->set(bt, i * inputDim + j, ik, value);
}
}
}
}
checkMatrixIsSame(preDelta, cpp_preDelta);
init = true;
}
#endif
}
/*
* block = dim3(outputAmount, inputAmount);
* thread= dim3(batch);
*/
__global__ void g_NIN_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
int batch,
float lambda)
{
extern __shared__ float _sum[];
int ok = blockIdx.x;
int ik = blockIdx.y;
int tid = threadIdx.x;
_sum[tid] = 0;
int inputAmount = gridDim.y;
__syncthreads();
int tlen = batch;
float* wgradTmp = _WgradTmp[ok];
for(int i = 0; i < tlen; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < tlen)
{
_sum[threadIdx.x] += wgradTmp[ik + b * inputAmount];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < (len >> 1))
{
_sum[tid] += _sum[tid + skip];
}
else{
return;
}
len = (len + 1) >> 1;
}
if(tid == 0)
{
Wgrad[ok][ik] = _sum[0] / batch + w[ok][ik] * lambda;
}
}
void NIN::getGrad()
{
if(outputDim >= 8 && inputAmount == 32){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(32, inputAmount);
g_NIN_wgrad_1<32, 32><<<block, thread>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else if(outputDim >= 8 && inputAmount == 64){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(16, inputAmount);
g_NIN_wgrad_1<64, 16><<<block, thread>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else if(outputDim >=8 && inputAmount == 128){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(8, inputAmount);
g_NIN_wgrad_1<128, 8><<<block, thread>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else{
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(inputAmount);
g_NIN_wgrad<<<block, thread>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad");
}
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
inputs->toCpu();
curDelta->toCpu();
for(size_t i = 0; i < wgradTmp.size(); i++){
wgradTmp[i]->toCpu();
}
cuMatrixVector<float>cpp_wgradTmp;
for(int ok = 0; ok < outputAmount; ok++){
cpp_wgradTmp.push_back(new cuMatrix<float>(wgradTmp[ok]->rows, wgradTmp[ok]->cols, wgradTmp[ok]->channels));
}
for(int bt = 0; bt < batch; bt++){
for(int ik = 0; ik < inputAmount; ik++){
for(int ok = 0; ok < outputAmount; ok++){
float value = 0.0;
for(int i = 0; i < inputDim; i++){
for(int j = 0; j < inputDim; j++){
value += inputs->get(bt, i * inputDim + j, ik) * curDelta->get(bt, i * inputDim + j, ok);
}
}
cpp_wgradTmp[ok]->set(bt, ik, 0, value);
}
}
}
for(size_t i = 0; i < wgradTmp.size(); i++){
checkMatrixIsSame(wgradTmp[i], cpp_wgradTmp[i]);
}
init = true;
}
#endif
dim3 block = dim3(outputAmount, inputAmount);
dim3 thread = dim3(batch);
g_NIN_wgradAdd<<<block, thread, sizeof(float) * batch>>>(
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
batch,
lambda);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgradAdd");
block = dim3(outputAmount);
thread= dim3(256);
g_NIN_Bgrad<<<block, thread, sizeof(float) * thread.x>>>(curDelta->getDev(),
bgrad.m_devPoint,
outputDim,
outputAmount,
batch,
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::getGrad::g_NIN_Bgrad");
}
void NIN::updateWeight()
{
dim3 block = outputAmount;
dim3 thread = min(256, w[0]->getLen());
g_vecAdd<<<block, thread, 0, Layers::instance()->get_stream()>>>(momentum_w.m_devPoint, wgrad.m_devPoint, w.m_devPoint,
momentum_b.m_devPoint, bgrad.m_devPoint, b.m_devPoint,
w[0]->getLen(), b[0]->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate(), Config::instance()->getLrate());
}
NIN::NIN(std::string name)
{
m_name = name;
ConfigNIN* config = (ConfigNIN*)Config::instance()->getLayerByName(m_name);
ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getOutputs();
if(inputs == NULL){
/*inputs = NULL the type must be BranchLayers*/
Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer());
Assert(config->m_subInput != std::string("NULL"));
BranchLayer* bl = static_cast<BranchLayer*>(preLayer);
inputs = bl->getSubOutput(config->m_subInput);
preDelta = bl->getSubCurDelta(config->m_subInput);
}else{
preDelta = preLayer->getCurDelta();
}
inputAmount = preLayer->outputAmount;
outputAmount = config->m_amount;
inputDim = preLayer->outputDim;
outputDim = inputDim;
batch = Config::instance()->getBatchSize();
lambda = config->m_weightDecay;
NON_LINEARITY = config->m_nonLinearity;
outputs = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
for(int i = 0; i < outputAmount; i++){
w.push_back(new cuMatrix<float>(1, 1, inputAmount));
b.push_back(new cuMatrix<float>(1, 1, 1));
wgrad.push_back(new cuMatrix<float>(1, 1, inputAmount));
bgrad.push_back(new cuMatrix<float>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<float>(batch, inputAmount, 1));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount; i++){
momentum_w.push_back(new cuMatrix<float>(1, 1, inputAmount));
momentum_b.push_back(new cuMatrix<float>(1, 1, 1));
}
momentum_w.toGpu();
momentum_b.toGpu();
this->initRandom();
Layers::instance()->set(m_name, this);
}
void NIN::save(FILE* file)
{
for(int a = 0; a < (int)w.size(); a++){
w[a]->toCpu();
b[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%f ", w[a]->get(i, j, c));
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%f ", b[a]->get(i, j, c));
}
}
}
}
}
void NIN::clearMomentum()
{
for(int i = 0; i < (int)momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < (int)momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
void NIN::initRandom()
{
//srand(clock());
float initW = Config::instance()->getLayerByName(m_name)->m_initW;
if(Config::instance()->getLayerByName(m_name)->isGaussian()){
for(int i = 0; i < (int)w.size(); i++){
float epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
float r1 = 0.5f + 4.0f * (rand()) / RAND_MAX;
float r2 = 0.5f + 4.0f * (rand()) / RAND_MAX;
createGaussian(w[i]->getHost() + c * w[i]->getArea(), r1,r2,
1, 1, w[i]->channels,
epsilon);
}
w[i]->toGpu();
}
}
else{
for(int i = 0; i < (int)w.size(); i++){
for(int j = 0; j < (int)w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
//printf("%f ", w[i]->hostData[j]);
}//printf("\n");
w[i]->toGpu();
}
}
}
void NIN::initFromCheckpoint(FILE* file)
{
float val = 0;
for(int a = 0; a < (int)w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
w[a]->set(i, j, c, val);
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
b[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
b[a]->toGpu();
}
}
/*
* dim3 block = dim3(batch, outputAmpunt);
* dim3 thread= dim3(outputDim * outputDim);
*/
__global__ void g_NIN_feedforward(
float* inputs,
float** ws,
float** bs,
float* outputs,
int inputDim,
int outputDim,
int inputAmount,
int outputAmount,
int inputArea,
int outputArea)
{
int sp = blockIdx.x;
int ok = blockIdx.y;
int outputSize2 = outputDim * outputDim;
int inputSize2 = inputDim* inputDim;
float b = bs[ok][0];
float *w = ws[ok];
float* curOutput = outputs + ok * outputArea + sp * outputSize2;
/*convolution*/
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int idx = tidx + threadIdx.x;
if(idx < outputSize2)
{
float val = 0.0;
int skip_add = sp * inputSize2;
for(int ik = 0; ik < inputAmount; ik++){
float* curInput = inputs + skip_add;
val += curInput[idx] * w[ik];
skip_add += inputArea;
}
curOutput[idx] = val + b;
}
}
}
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= min(inputDim * inputDim, 512);
*/
__global__ void g_NIN_backpropagation(
float* _curDelta,
float**ws,
float* _preDelta,
int curDim,
int preDim,
int preAmount,
int curAmount,
int curArea,
int preArea)
{
extern __shared__ float wShared[];
int sp = blockIdx.x;
int ik = blockIdx.y;
for(int id = 0; id < curAmount; id += blockDim.x){
int idx = id + threadIdx.x;
if(idx < curAmount){
wShared[idx] = ws[idx][ik];
}
}
__syncthreads();
int curSize2 = curDim * curDim;
int preSize2 = preDim * preDim;
float *preDelta = _preDelta + ik * preArea + sp * preSize2;
for (int tidx = 0; tidx < preSize2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < preSize2) {
float val = 0.0;
int skip_add = sp * curSize2;
for(int ok = 0; ok < curAmount; ok++){
float *curDelta = _curDelta + skip_add;
val += curDelta[idx] * wShared[ok];
skip_add += curArea;
}
preDelta[idx] = val;
}
}
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(inputAmount);
*/
__global__ void g_NIN_wgrad(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea)
{
int ok = blockIdx.y;
int ik = threadIdx.x;
int b = blockIdx.x;
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
float* input = _inputs + ik * inputArea + b * inputSize2;
float* curDelta = _curDelta + ok * curDeltaAea + b * curDeltaSize2;
float val = 0.0;
for(int x = 0; x < inputSize2; x++){
val += input[x] * curDelta[x];
}
wgradTmp[ok][ik + b * inputAmount] = val;
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(THREADS, inputAmount);
*/
template <int INPUTAMOUNT, int THREADS>
__global__ void g_NIN_wgrad_1(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea)
{
__shared__ float __sum[INPUTAMOUNT][THREADS];
int ok = blockIdx.y;
int ik = threadIdx.y;
int b = blockIdx.x;
float* _sum = __sum[ik];
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
float* input = _inputs + ik * inputArea + b * inputSize2;
float* curDelta = _curDelta + ok * curDeltaAea + b * curDeltaSize2;
float val = 0.0;
for(int x = 0; x < inputSize2; x += blockDim.x){
int idx = x + threadIdx.x;
if(idx < inputSize2){
val += input[idx] * curDelta[idx];
}
}
_sum[threadIdx.x] = val;
__syncthreads();
int len = THREADS;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
if(threadIdx.x == 0){
wgradTmp[ok][ik + b * inputAmount] = _sum[0];
}
}
/*
* block = dim3(outputAmount);
* thread= dim3(256);
* shared : sizeof(float) * 256
*/
__global__ void g_NIN_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea)
{
extern __shared__ float _sum[];
int k2 = blockIdx.x;
_sum[threadIdx.x] = 0.0;
__syncthreads();
int deltaSize2 = deltaSize * deltaSize;
int tlen = deltaSize2 * batch;
for(int i = 0; i < tlen; i += blockDim.x)
{
int idx = i + threadIdx.x;
if(idx < tlen)
{
int s = idx / (deltaSize2);//s
int t2 = idx % (deltaSize2);//x,y
int id =
deltaArea * k2 + s * deltaSize2 + t2;
_sum[threadIdx.x] += delta[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(threadIdx.x == 0)
{
bgrad[k2][0] = _sum[0] / batch;
}
}
|
df35dd25a174537e74c2d98fcf24549740e1d193.hip | // !!! This is a file automatically generated by hipify!!!
// Main file of our project
/*
* TU Eindhoven
* Eindhoven, The Netherlands
*
* Name : haar.cpp
*
* Author : Francesco Comaschi (f.comaschi@tue.nl)
*
* Date : November 12, 2012
*
* Function : Haar features evaluation for face detection
*
* History :
* 12-11-12 : Initial version.
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>
*
* In other words, you are welcome to use, share and improve this program.
* You are forbidden to forbid anyone else to use, share and improve
* what you give them. Happy coding!
*/
#include "haar.h"
#include "image.h"
#include <stdio.h>
#include "stdio-wrapper.h"
#include "cuda_util.h"
/* include the gpu functions */
//#include "gpu_functions_hip.cuh"
#include "nearestNeighbor.cuh"
////DEBUG Varibales
//#ifdef LOG
// const bool PRINT_LOG = true;
//#else
// const bool PRINT_LOG = false;
//#endif
//
//#ifdef DEVICE
// const bool PRINT_GPU = true;
//#else
// const bool PRINT_GPU = false;
//#endif
/* TODO: use matrices */
/* classifier parameters */
/************************************
* Notes:
* To paralleism the filter,
* these monolithic arrays may
* need to be splitted or duplicated
***********************************/
static int *stages_array;
static int *rectangles_array;
static int *weights_array;
static int *alpha1_array;
static int *alpha2_array;
static int *tree_thresh_array;
static int *stages_thresh_array;
static int **scaled_rectangles_array;
//////////////////////////////////////////////////////
////////////////////////////////////////////////////
int clock_counter = 0;
float n_features = 0;
int iter_counter = 0;
/* compute integral images */
void integralImages( MyImage *src, MyIntImage *sum, MyIntImage *sqsum );
/* scale down the image */
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec);
/* compute scaled image */
void nearestNeighborOnHost(MyImage *src, MyImage *dst);
void nearestNeighborOnDevice(MyImage *src, MyImage *dst);
/* rounding function */
inline int myRound( float value )
{
return (int)(value + (value >= 0 ? 0.5 : -0.5));
}
/*******************************************************
* Function: detectObjects
* Description: It calls all the major steps
******************************************************/
std::vector<MyRect> detectObjects( MyImage* _img, MySize minSize, MySize maxSize, myCascade* cascade,
float scaleFactor, int minNeighbors, std::fstream& ofs)
{
/* group overlaping windows */
const float GROUP_EPS = 0.4f;
/* pointer to input image */
MyImage *img = _img;
/***********************************
* create structs for images
* see haar.h for details
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: square integral image (int)
**********************************/
MyImage image1Obj;
MyImage imageDeviceObj;
MyIntImage sum1Obj;
MyIntImage sqsum1Obj;
/* pointers for the created structs */
MyImage *img1 = &image1Obj;
MyImage *deviceimg = &imageDeviceObj;
MyIntImage *sum1 = &sum1Obj;
MyIntImage *sqsum1 = &sqsum1Obj;
/**************************************/
//Timing related
hipError_t error;
hipEvent_t cpu_start;
hipEvent_t cpu_stop;
float msecTotal;
//CUDA Events
error = hipEventCreate(&cpu_start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventCreate(&cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
/**************************************/
/********************************************************
* allCandidates is the preliminaray face candidate,
* which will be refined later.
*
* std::vector is a sequential container
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Each element of the std::vector is a "MyRect" struct
* MyRect struct keeps the info of a rectangle (see haar.h)
* The rectangle contains one face candidate
*****************************************************/
std::vector<MyRect> allCandidates;
/* scaling factor */
float factor;
/* maxSize */
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->height;
maxSize.width = img->width;
}
/* window size of the training set */
MySize winSize0 = cascade->orig_window_size;
/* malloc for img1: unsigned char */
createImage(img->width, img->height, img1);
createImage(img->width, img->height, deviceimg);
/* malloc for sum1: unsigned char */
createSumImage(img->width, img->height, sum1);
/* malloc for sqsum1: unsigned char */
createSumImage(img->width, img->height, sqsum1);
/* initial scaling factor */
factor = 1;
/* iterate over the image pyramid */
for( factor = 1; ; factor *= scaleFactor )
{
/* iteration counter */
iter_counter++;
/* size of the image scaled up */
MySize winSize = { myRound(winSize0.width*factor), myRound(winSize0.height*factor) };
/* size of the image scaled down (from bigger to smaller) */
MySize sz = { ( img->width/factor ), ( img->height/factor ) };
/* difference between sizes of the scaled image and the original detection window */
MySize sz1 = { sz.width - winSize0.width, sz.height - winSize0.height };
/* if the actual scaled image is smaller than the original detection window, break */
if( sz1.width < 0 || sz1.height < 0 )
break;
/* if a minSize different from the original detection window is specified, continue to the next scaling */
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
/*************************************
* Set the width and height of
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: squared integral image (int)
* see image.c for details
************************************/
setImage(sz.width, sz.height, img1);
setImage(sz.width, sz.height, deviceimg);
setSumImage(sz.width, sz.height, sum1);
setSumImage(sz.width, sz.height, sqsum1);
printf("\n\tIteration:= %d\n \tDownsampling--> New Image Size: Width: %d, Height: %d\n",
iter_counter, sz.width, sz.height);
/***************************************
* Compute-intensive step:
* building image pyramid by downsampling
* downsampling using nearest neighbor
**************************************/
error = hipEventRecord(cpu_start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
nearestNeighborOnHost(img, img1);
nearestNeighborOnDevice(img, deviceimg);
if(PRINT_LOG){
//Compare the host and device results
if(!CompareResults(img1->data, deviceimg->data, img1->width * img1->height)){
printf("\tNN GPU and Host Image doesn't match!! -- Printing Image Log\n");
ofs<<"\n";
ofs<<"\nHost Image Log: ";
ofs<<"Width: "<<img1->width<<" x "<<"Height: "<<img1->height<<"\n";
WriteFile(img1->data, img1->width * img1->height, ofs);
ofs<<"\n";
ofs<<"\nDevice Image Log: ";
ofs<<"Width: "<<deviceimg->width<<" x "<<"Height: "<<deviceimg->height<<"\n";
WriteFile(deviceimg->data, deviceimg->width * deviceimg->height, ofs);
}
}
// Record the stop event
error = hipEventRecord(cpu_stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventElapsedTime(&msecTotal, cpu_start, cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("\tNearestNeighbor computation complete--> Execution time: %f ms\n", msecTotal);
/***************************************************
* Compute-intensive step:
* At each scale of the image pyramid,
* compute a new integral and squared integral image
***************************************************/
integralImages(img1, sum1, sqsum1);
printf("\tIntegral Image Sum Calculation Done\n");
/* sets images for haar classifier cascade */
/**************************************************
* Note:
* Summing pixels within a haar window is done by
* using four corners of the integral image:
* http://en.wikipedia.org/wiki/Summed_area_table
*
* This function loads the four corners,
* but does not do compuation based on four coners.
* The computation is done next in ScaleImage_Invoker
*************************************************/
setImageForCascadeClassifier( cascade, sum1, sqsum1);
/* print out for each scale of the image pyramid */
//printf("detecting faces, iter := %d\n", iter_counter);
/****************************************************
* Process the current scale with the cascaded fitler.
* The main computations are invoked by this function.
* Optimization oppurtunity:
* the same cascade filter is invoked each time
***************************************************/
// Record the start event
error = hipEventRecord(cpu_start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
ScaleImage_Invoker(cascade, factor, sum1->height, sum1->width,
allCandidates);
// Record the stop event
error = hipEventRecord(cpu_stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
error = hipEventElapsedTime(&msecTotal, cpu_start, cpu_stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("\tScaleImage_Invoker computation complete--> Execution time: %f ms\n", msecTotal);
/*********************************************
* For the 5kk73 assignment,
* here is a skeleton
********************************************/
/* malloc cascade filter on GPU memory*/
/*
int filter_count = 0;
for(int i = 0; i < cascade->n_stages; i++ ){
filter_count += stages_array[i];
}
int size_per_filter = 18;
int* gpu_cascade;
hipMalloc((void**) &gpu_cascade, filter_count*size_per_filter*sizeof(int));
dim3 threads = dim3(64, 1);
dim3 grid = dim3(filter_count/64, 1);
gpu_function_1<<< grid, threads >>>();
gpu_function_2<<< grid, threads >>>();
hipFree(gpu_cascade);
*/
/*********************************************
* End of the GPU skeleton
********************************************/
} /* end of the factor loop, finish all scales in pyramid*/
if( minNeighbors != 0)
{
groupRectangles(allCandidates, minNeighbors, GROUP_EPS);
}
freeImage(img1);
freeImage(deviceimg);
freeSumImage(sum1);
freeSumImage(sqsum1);
return allCandidates;
}
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert an int variable
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
/*****************************************************
* The int_sqrt is only used in runCascadeClassifier
* If you want to replace int_sqrt with HW sqrtf in GPU,
* simple look into the runCascadeClassifier function.
*****************************************************/
unsigned int int_sqrt (unsigned int value)
{
int i;
unsigned int a = 0, b = 0, c = 0;
for (i=0; i < (32 >> 1); i++)
{
c<<= 2;
#define UPPERBITS(value) (value>>30)
c += UPPERBITS(value);
#undef UPPERBITS
value <<= 2;
a <<= 1;
b = (a<<1) | 1;
if (c >= b)
{
c -= b;
a++;
}
}
return a;
}
void setImageForCascadeClassifier( myCascade* _cascade, MyIntImage* _sum, MyIntImage* _sqsum)
{
MyIntImage *sum = _sum;
MyIntImage *sqsum = _sqsum;
myCascade* cascade = _cascade;
int i, j, k;
MyRect equRect;
int r_index = 0;
int w_index = 0;
MyRect tr;
cascade->sum = *sum;
cascade->sqsum = *sqsum;
equRect.x = equRect.y = 0;
equRect.width = cascade->orig_window_size.width;
equRect.height = cascade->orig_window_size.height;
cascade->inv_window_area = equRect.width*equRect.height;
cascade->p0 = (sum->data) ;
cascade->p1 = (sum->data + equRect.width - 1) ;
cascade->p2 = (sum->data + sum->width*(equRect.height - 1));
cascade->p3 = (sum->data + sum->width*(equRect.height - 1) + equRect.width - 1);
cascade->pq0 = (sqsum->data);
cascade->pq1 = (sqsum->data + equRect.width - 1) ;
cascade->pq2 = (sqsum->data + sqsum->width*(equRect.height - 1));
cascade->pq3 = (sqsum->data + sqsum->width*(equRect.height - 1) + equRect.width - 1);
printf("\tSetting Image for Classifier--> Detection Window Corners:\n \t\tp0: %d, p1: %d, p2: %d, p3: %d\n",
(cascade->p0) - (cascade->p0),
(cascade->p1) - (cascade->p0),
(cascade->p2) - (cascade->p0),
(cascade->p3) - (cascade->p0));
/****************************************
* Load the index of the four corners
* of the filter rectangle
**************************************/
/* loop over the number of stages */
for( i = 0; i < cascade->n_stages; i++ )
{
/* loop over the number of haar features */
for( j = 0; j < stages_array[i]; j++ )
{
int nr = 3;
/* loop over the number of rectangles */
for( k = 0; k < nr; k++ )
{
tr.x = rectangles_array[r_index + k*4];
tr.width = rectangles_array[r_index + 2 + k*4];
tr.y = rectangles_array[r_index + 1 + k*4];
tr.height = rectangles_array[r_index + 3 + k*4];
if (k < 2)
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
else //for 3rd rect
{
if ((tr.x == 0)&& (tr.y == 0) &&(tr.width == 0) &&(tr.height == 0))
{
scaled_rectangles_array[r_index + k*4] = NULL ;
scaled_rectangles_array[r_index + k*4 + 1] = NULL ;
scaled_rectangles_array[r_index + k*4 + 2] = NULL;
scaled_rectangles_array[r_index + k*4 + 3] = NULL;
}
else
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
} /* end of branch if(k<2) */
} /* end of k loop*/
r_index+=12;
w_index+=3;
} /* end of j loop */
} /* end i loop */
printf("\tFour Corners of all the Haar Filter Rectangles Loaded\n");
}
/****************************************************
* evalWeakClassifier:
* the actual computation of a haar filter.
* More info:
* http://en.wikipedia.org/wiki/Haar-like_features
***************************************************/
inline int evalWeakClassifier(int variance_norm_factor, int p_offset, int tree_index, int w_index, int r_index )
{
/* the node threshold is multiplied by the standard deviation of the image */
int t = tree_thresh_array[tree_index] * variance_norm_factor; //Filter threshold
int sum = (*(scaled_rectangles_array[r_index] + p_offset)
- *(scaled_rectangles_array[r_index + 1] + p_offset)
- *(scaled_rectangles_array[r_index + 2] + p_offset)
+ *(scaled_rectangles_array[r_index + 3] + p_offset))
* weights_array[w_index];
sum += (*(scaled_rectangles_array[r_index+4] + p_offset)
- *(scaled_rectangles_array[r_index + 5] + p_offset)
- *(scaled_rectangles_array[r_index + 6] + p_offset)
+ *(scaled_rectangles_array[r_index + 7] + p_offset))
* weights_array[w_index + 1];
if ((scaled_rectangles_array[r_index+8] != NULL)){
sum += (*(scaled_rectangles_array[r_index+8] + p_offset)
- *(scaled_rectangles_array[r_index + 9] + p_offset)
- *(scaled_rectangles_array[r_index + 10] + p_offset)
+ *(scaled_rectangles_array[r_index + 11] + p_offset))
* weights_array[w_index + 2];
}
if(sum >= t)
return alpha2_array[tree_index];
else
return alpha1_array[tree_index];
}
int runCascadeClassifier( myCascade* _cascade, MyPoint pt, int start_stage )
{
int p_offset, pq_offset;
int i, j;
unsigned int mean;
unsigned int variance_norm_factor;
int haar_counter = 0;
int w_index = 0;
int r_index = 0;
int stage_sum;
myCascade* cascade;
cascade = _cascade;
//To find out memory index in lienar array of mem
p_offset = pt.y * (cascade->sum.width) + pt.x;
pq_offset = pt.y * (cascade->sqsum.width) + pt.x;
/**************************************************************************
* Image normalization
* mean is the mean of the pixels in the detection window
* cascade->pqi[pq_offset] are the squared pixel values (using the squared integral image)
* inv_window_area is 1 over the total number of pixels in the detection window
*************************************************************************/
variance_norm_factor = (cascade->pq0[pq_offset] - cascade->pq1[pq_offset] - cascade->pq2[pq_offset] + cascade->pq3[pq_offset]);
mean = (cascade->p0[p_offset] - cascade->p1[p_offset] - cascade->p2[p_offset] + cascade->p3[p_offset]);
variance_norm_factor = (variance_norm_factor * cascade->inv_window_area);
variance_norm_factor = variance_norm_factor - mean*mean;
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert the variance norm
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
if( variance_norm_factor > 0 )
variance_norm_factor = int_sqrt(variance_norm_factor);
else
variance_norm_factor = 1;
/**************************************************
* The major computation happens here.
* For each scale in the image pyramid,
* and for each shifted step of the filter,
* send the shifted window through cascade filter.
*
* Note:
*
* Stages in the cascade filter are independent.
* However, a face can be rejected by any stage.
* Running stages in parallel delays the rejection,
* which induces unnecessary computation.
*
* Filters in the same stage are also independent,
* except that filter results need to be merged,
* and compared with a per-stage threshold.
*************************************************/
for( i = start_stage; i < cascade->n_stages; i++ )
{
/****************************************************
* A shared variable that induces false dependency
*
* To avoid it from limiting parallelism,
* we can duplicate it multiple times,
* e.g., using stage_sum_array[number_of_threads].
* Then threads only need to sync at the end
***************************************************/
stage_sum = 0;
for( j = 0; j < stages_array[i]; j++ )
{
/**************************************************
* Send the shifted window to a haar filter.
**************************************************/
stage_sum += evalWeakClassifier(variance_norm_factor, p_offset, haar_counter, w_index, r_index);
n_features++;
haar_counter++;
w_index+=3;
r_index+=12;
} /* end of j loop */
/**************************************************************
* threshold of the stage.
* If the sum is below the threshold,
* no faces are detected,
* and the search is abandoned at the i-th stage (-i).
* Otherwise, a face is detected (1)
**************************************************************/
/* the number "0.4" is empirically chosen for 5kk73 */
if( stage_sum < 0.4 * stages_thresh_array[i] ){
return -i;
} /* end of the per-stage thresholding */
} /* end of i loop */
return 1;
}
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec)
{
myCascade* cascade = _cascade;
float factor = _factor;
MyPoint p;
int result;
int y1, y2, x2, x, y, step;
std::vector<MyRect> *vec = &_vec;
MySize winSize0 = cascade->orig_window_size;
MySize winSize;
winSize.width = myRound(winSize0.width*factor);
winSize.height = myRound(winSize0.height*factor);
y1 = 0;
/********************************************
* When filter window shifts to image boarder,
* some margin need to be kept
*********************************************/
y2 = sum_row - winSize0.height;
x2 = sum_col - winSize0.width;
/********************************************
* Step size of filter window shifting
* Reducing step makes program faster,
* but decreases quality of detection.
* example:
* step = factor > 2 ? 1 : 2;
*
* For 5kk73,
* the factor and step can be kept constant,
* unless you want to change input image.
*
* The step size is set to 1 for 5kk73,
* i.e., shift the filter window by 1 pixel.
*******************************************/
step = 1;
/**********************************************
* Shift the filter window over the image.
* Each shift step is independent.
* Shared data structure may limit parallelism.
*
* Some random hints (may or may not work):
* Split or duplicate data structure.
* Merge functions/loops to increase locality
* Tiling to increase computation-to-memory ratio
*********************************************/
printf("\tRunning Cascade Classifier for %d times--> x2: %d, y2: %d\n", x2 * y2, x2, y2);
for( x = 0; x <= x2; x += step )
for( y = y1; y <= y2; y += step )
{
p.x = x;
p.y = y;
/*********************************************
* Optimization Oppotunity:
* The same cascade filter is used each time
********************************************/
result = runCascadeClassifier( cascade, p, 0 );
/*******************************************************
* If a face is detected,
* record the coordinates of the filter window
* the "push_back" function is from std:vec, more info:
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Note that, if the filter runs on GPUs,
* the push_back operation is not possible on GPUs.
* The GPU may need to use a simpler data structure,
* e.g., an array, to store the coordinates of face,
* which can be later memcpy from GPU to CPU to do push_back
*******************************************************/
if( result > 0 )
{
MyRect r = {myRound(x*factor), myRound(y*factor), winSize.width, winSize.height};
vec->push_back(r);
}
}
}
/*****************************************************
* Compute the integral image (and squared integral)
* Integral image helps quickly sum up an area.
* More info:
* http://en.wikipedia.org/wiki/Summed_area_table
****************************************************/
void integralImages( MyImage *src, MyIntImage *sum, MyIntImage *sqsum )
{
int x, y, s, sq, t, tq;
unsigned char it;
int height = src->height;
int width = src->width;
unsigned char *data = src->data;
int * sumData = sum->data;
int * sqsumData = sqsum->data;
for( y = 0; y < height; y++)
{
s = 0;
sq = 0;
/* loop over the number of columns */
for( x = 0; x < width; x ++)
{
it = data[y*width+x];
/* sum of the current row (integer)*/
s += it;
sq += it*it;
t = s;
tq = sq;
if (y != 0)
{
t += sumData[(y-1)*width+x];
tq += sqsumData[(y-1)*width+x];
}
sumData[y*width+x]=t;
sqsumData[y*width+x]=tq;
}
}
}
/***********************************************************
* This function downsample an image using nearest neighbor
* It is used to build the image pyramid
**********************************************************/
void nearestNeighborOnHost(MyImage *src, MyImage *dst)
{
int y;
int j;
int x;
int i;
unsigned char* t;
unsigned char* p;
int w1 = src->width;
int h1 = src->height;
int w2 = dst->width;
int h2 = dst->height;
int rat = 0;
unsigned char* src_data = src->data;
unsigned char* dst_data = dst->data;
int x_ratio = (int)((w1<<16)/w2) +1;
int y_ratio = (int)((h1<<16)/h2) +1;
for (i=0;i < h2;i++)
{
t = dst_data + i * w2; //Pointer to next row in dst image
y = ((i * y_ratio)>>16);
p = src_data + y * w1;
rat = 0;
for (j=0;j < w2;j++)
{
x = (rat>>16);
*(t++) = p[x];
rat += x_ratio;
}
}
}
void readTextClassifier()//(myCascade * cascade)
{
/*number of stages of the cascade classifier*/
int stages;
/*total number of weak classifiers (one node each)*/
int total_nodes = 0;
int i, j, k, l;
char mystring [12];
int r_index = 0;
int w_index = 0;
int tree_index = 0;
FILE *finfo = fopen("info.txt", "r");
/**************************************************
/* how many stages are in the cascaded filter?
/* the first line of info.txt is the number of stages
/* (in the 5kk73 example, there are 25 stages)
**************************************************/
if ( fgets (mystring , 12 , finfo) != NULL )
{
stages = atoi(mystring);
}
i = 0;
stages_array = (int *)malloc(sizeof(int)*stages);
/**************************************************
* how many filters in each stage?
* They are specified in info.txt,
* starting from second line.
* (in the 5kk73 example, from line 2 to line 26)
*************************************************/
while ( fgets (mystring , 12 , finfo) != NULL )
{
stages_array[i] = atoi(mystring);
total_nodes += stages_array[i];
i++;
}
fclose(finfo);
/* TODO: use matrices where appropriate */
/***********************************************
* Allocate a lot of array structures
* Note that, to increase parallelism,
* some arrays need to be splitted or duplicated
**********************************************/
rectangles_array = (int *)malloc(sizeof(int)*total_nodes*12);
scaled_rectangles_array = (int **)malloc(sizeof(int*)*total_nodes*12);
weights_array = (int *)malloc(sizeof(int)*total_nodes*3);
alpha1_array = (int*)malloc(sizeof(int)*total_nodes);
alpha2_array = (int*)malloc(sizeof(int)*total_nodes);
tree_thresh_array = (int*)malloc(sizeof(int)*total_nodes);
stages_thresh_array = (int*)malloc(sizeof(int)*stages);
FILE *fp = fopen("class.txt", "r");
/******************************************
* Read the filter parameters in class.txt
*
* Each stage of the cascaded filter has:
* 18 parameter per filter x tilter per stage
* + 1 threshold per stage
*
* For example, in 5kk73,
* the first stage has 9 filters,
* the first stage is specified using
* 18 * 9 + 1 = 163 parameters
* They are line 1 to 163 of class.txt
*
* The 18 parameters for each filter are:
* 1 to 4: coordinates of rectangle 1
* 5: weight of rectangle 1
* 6 to 9: coordinates of rectangle 2
* 10: weight of rectangle 2
* 11 to 14: coordinates of rectangle 3
* 15: weight of rectangle 3
* 16: threshold of the filter
* 17: alpha 1 of the filter
* 18: alpha 2 of the filter
******************************************/
/* loop over n of stages */
for (i = 0; i < stages; i++)
{ /* loop over n of trees */
for (j = 0; j < stages_array[i]; j++)
{ /* loop over n of rectangular features */
for(k = 0; k < 3; k++)
{ /* loop over the n of vertices */
for (l = 0; l <4; l++)
{
if (fgets (mystring , 12 , fp) != NULL)
rectangles_array[r_index] = atoi(mystring);
else
break;
r_index++;
} /* end of l loop */
if (fgets (mystring , 12 , fp) != NULL)
{
weights_array[w_index] = atoi(mystring);
/* Shift value to avoid overflow in the haar evaluation */
/*TODO: make more general */
/*weights_array[w_index]>>=8; */
}
else
break;
w_index++;
} /* end of k loop */
if (fgets (mystring , 12 , fp) != NULL)
tree_thresh_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha1_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha2_array[tree_index]= atoi(mystring);
else
break;
tree_index++;
if (j == stages_array[i]-1)
{
if (fgets (mystring , 12 , fp) != NULL)
stages_thresh_array[i] = atoi(mystring);
else
break;
}
} /* end of j loop */
} /* end of i loop */
fclose(fp);
}
void releaseTextClassifier()
{
free(stages_array);
free(rectangles_array);
free(scaled_rectangles_array);
free(weights_array);
free(tree_thresh_array);
free(alpha1_array);
free(alpha2_array);
free(stages_thresh_array);
}
/* End of file. */
| df35dd25a174537e74c2d98fcf24549740e1d193.cu | // Main file of our project
/*
* TU Eindhoven
* Eindhoven, The Netherlands
*
* Name : haar.cpp
*
* Author : Francesco Comaschi (f.comaschi@tue.nl)
*
* Date : November 12, 2012
*
* Function : Haar features evaluation for face detection
*
* History :
* 12-11-12 : Initial version.
*
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>
*
* In other words, you are welcome to use, share and improve this program.
* You are forbidden to forbid anyone else to use, share and improve
* what you give them. Happy coding!
*/
#include "haar.h"
#include "image.h"
#include <stdio.h>
#include "stdio-wrapper.h"
#include "cuda_util.h"
/* include the gpu functions */
//#include "gpu_functions.cuh"
#include "nearestNeighbor.cuh"
////DEBUG Varibales
//#ifdef LOG
// const bool PRINT_LOG = true;
//#else
// const bool PRINT_LOG = false;
//#endif
//
//#ifdef DEVICE
// const bool PRINT_GPU = true;
//#else
// const bool PRINT_GPU = false;
//#endif
/* TODO: use matrices */
/* classifier parameters */
/************************************
* Notes:
* To paralleism the filter,
* these monolithic arrays may
* need to be splitted or duplicated
***********************************/
static int *stages_array;
static int *rectangles_array;
static int *weights_array;
static int *alpha1_array;
static int *alpha2_array;
static int *tree_thresh_array;
static int *stages_thresh_array;
static int **scaled_rectangles_array;
//////////////////////////////////////////////////////
////////////////////////////////////////////////////
int clock_counter = 0;
float n_features = 0;
int iter_counter = 0;
/* compute integral images */
void integralImages( MyImage *src, MyIntImage *sum, MyIntImage *sqsum );
/* scale down the image */
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec);
/* compute scaled image */
void nearestNeighborOnHost(MyImage *src, MyImage *dst);
void nearestNeighborOnDevice(MyImage *src, MyImage *dst);
/* rounding function */
inline int myRound( float value )
{
return (int)(value + (value >= 0 ? 0.5 : -0.5));
}
/*******************************************************
* Function: detectObjects
* Description: It calls all the major steps
******************************************************/
std::vector<MyRect> detectObjects( MyImage* _img, MySize minSize, MySize maxSize, myCascade* cascade,
float scaleFactor, int minNeighbors, std::fstream& ofs)
{
/* group overlaping windows */
const float GROUP_EPS = 0.4f;
/* pointer to input image */
MyImage *img = _img;
/***********************************
* create structs for images
* see haar.h for details
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: square integral image (int)
**********************************/
MyImage image1Obj;
MyImage imageDeviceObj;
MyIntImage sum1Obj;
MyIntImage sqsum1Obj;
/* pointers for the created structs */
MyImage *img1 = &image1Obj;
MyImage *deviceimg = &imageDeviceObj;
MyIntImage *sum1 = &sum1Obj;
MyIntImage *sqsum1 = &sqsum1Obj;
/**************************************/
//Timing related
cudaError_t error;
cudaEvent_t cpu_start;
cudaEvent_t cpu_stop;
float msecTotal;
//CUDA Events
error = cudaEventCreate(&cpu_start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventCreate(&cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
/**************************************/
/********************************************************
* allCandidates is the preliminaray face candidate,
* which will be refined later.
*
* std::vector is a sequential container
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Each element of the std::vector is a "MyRect" struct
* MyRect struct keeps the info of a rectangle (see haar.h)
* The rectangle contains one face candidate
*****************************************************/
std::vector<MyRect> allCandidates;
/* scaling factor */
float factor;
/* maxSize */
if( maxSize.height == 0 || maxSize.width == 0 )
{
maxSize.height = img->height;
maxSize.width = img->width;
}
/* window size of the training set */
MySize winSize0 = cascade->orig_window_size;
/* malloc for img1: unsigned char */
createImage(img->width, img->height, img1);
createImage(img->width, img->height, deviceimg);
/* malloc for sum1: unsigned char */
createSumImage(img->width, img->height, sum1);
/* malloc for sqsum1: unsigned char */
createSumImage(img->width, img->height, sqsum1);
/* initial scaling factor */
factor = 1;
/* iterate over the image pyramid */
for( factor = 1; ; factor *= scaleFactor )
{
/* iteration counter */
iter_counter++;
/* size of the image scaled up */
MySize winSize = { myRound(winSize0.width*factor), myRound(winSize0.height*factor) };
/* size of the image scaled down (from bigger to smaller) */
MySize sz = { ( img->width/factor ), ( img->height/factor ) };
/* difference between sizes of the scaled image and the original detection window */
MySize sz1 = { sz.width - winSize0.width, sz.height - winSize0.height };
/* if the actual scaled image is smaller than the original detection window, break */
if( sz1.width < 0 || sz1.height < 0 )
break;
/* if a minSize different from the original detection window is specified, continue to the next scaling */
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
/*************************************
* Set the width and height of
* img1: normal image (unsigned char)
* sum1: integral image (int)
* sqsum1: squared integral image (int)
* see image.c for details
************************************/
setImage(sz.width, sz.height, img1);
setImage(sz.width, sz.height, deviceimg);
setSumImage(sz.width, sz.height, sum1);
setSumImage(sz.width, sz.height, sqsum1);
printf("\n\tIteration:= %d\n \tDownsampling--> New Image Size: Width: %d, Height: %d\n",
iter_counter, sz.width, sz.height);
/***************************************
* Compute-intensive step:
* building image pyramid by downsampling
* downsampling using nearest neighbor
**************************************/
error = cudaEventRecord(cpu_start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
nearestNeighborOnHost(img, img1);
nearestNeighborOnDevice(img, deviceimg);
if(PRINT_LOG){
//Compare the host and device results
if(!CompareResults(img1->data, deviceimg->data, img1->width * img1->height)){
printf("\tNN GPU and Host Image doesn't match!! -- Printing Image Log\n");
ofs<<"\n";
ofs<<"\nHost Image Log: ";
ofs<<"Width: "<<img1->width<<" x "<<"Height: "<<img1->height<<"\n";
WriteFile(img1->data, img1->width * img1->height, ofs);
ofs<<"\n";
ofs<<"\nDevice Image Log: ";
ofs<<"Width: "<<deviceimg->width<<" x "<<"Height: "<<deviceimg->height<<"\n";
WriteFile(deviceimg->data, deviceimg->width * deviceimg->height, ofs);
}
}
// Record the stop event
error = cudaEventRecord(cpu_stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventElapsedTime(&msecTotal, cpu_start, cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("\tNearestNeighbor computation complete--> Execution time: %f ms\n", msecTotal);
/***************************************************
* Compute-intensive step:
* At each scale of the image pyramid,
* compute a new integral and squared integral image
***************************************************/
integralImages(img1, sum1, sqsum1);
printf("\tIntegral Image Sum Calculation Done\n");
/* sets images for haar classifier cascade */
/**************************************************
* Note:
* Summing pixels within a haar window is done by
* using four corners of the integral image:
* http://en.wikipedia.org/wiki/Summed_area_table
*
* This function loads the four corners,
* but does not do compuation based on four coners.
* The computation is done next in ScaleImage_Invoker
*************************************************/
setImageForCascadeClassifier( cascade, sum1, sqsum1);
/* print out for each scale of the image pyramid */
//printf("detecting faces, iter := %d\n", iter_counter);
/****************************************************
* Process the current scale with the cascaded fitler.
* The main computations are invoked by this function.
* Optimization oppurtunity:
* the same cascade filter is invoked each time
***************************************************/
// Record the start event
error = cudaEventRecord(cpu_start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
ScaleImage_Invoker(cascade, factor, sum1->height, sum1->width,
allCandidates);
// Record the stop event
error = cudaEventRecord(cpu_stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventElapsedTime(&msecTotal, cpu_start, cpu_stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("\tScaleImage_Invoker computation complete--> Execution time: %f ms\n", msecTotal);
/*********************************************
* For the 5kk73 assignment,
* here is a skeleton
********************************************/
/* malloc cascade filter on GPU memory*/
/*
int filter_count = 0;
for(int i = 0; i < cascade->n_stages; i++ ){
filter_count += stages_array[i];
}
int size_per_filter = 18;
int* gpu_cascade;
cudaMalloc((void**) &gpu_cascade, filter_count*size_per_filter*sizeof(int));
dim3 threads = dim3(64, 1);
dim3 grid = dim3(filter_count/64, 1);
gpu_function_1<<< grid, threads >>>();
gpu_function_2<<< grid, threads >>>();
cudaFree(gpu_cascade);
*/
/*********************************************
* End of the GPU skeleton
********************************************/
} /* end of the factor loop, finish all scales in pyramid*/
if( minNeighbors != 0)
{
groupRectangles(allCandidates, minNeighbors, GROUP_EPS);
}
freeImage(img1);
freeImage(deviceimg);
freeSumImage(sum1);
freeSumImage(sqsum1);
return allCandidates;
}
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert an int variable
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
/*****************************************************
* The int_sqrt is only used in runCascadeClassifier
* If you want to replace int_sqrt with HW sqrtf in GPU,
* simple look into the runCascadeClassifier function.
*****************************************************/
unsigned int int_sqrt (unsigned int value)
{
int i;
unsigned int a = 0, b = 0, c = 0;
for (i=0; i < (32 >> 1); i++)
{
c<<= 2;
#define UPPERBITS(value) (value>>30)
c += UPPERBITS(value);
#undef UPPERBITS
value <<= 2;
a <<= 1;
b = (a<<1) | 1;
if (c >= b)
{
c -= b;
a++;
}
}
return a;
}
void setImageForCascadeClassifier( myCascade* _cascade, MyIntImage* _sum, MyIntImage* _sqsum)
{
MyIntImage *sum = _sum;
MyIntImage *sqsum = _sqsum;
myCascade* cascade = _cascade;
int i, j, k;
MyRect equRect;
int r_index = 0;
int w_index = 0;
MyRect tr;
cascade->sum = *sum;
cascade->sqsum = *sqsum;
equRect.x = equRect.y = 0;
equRect.width = cascade->orig_window_size.width;
equRect.height = cascade->orig_window_size.height;
cascade->inv_window_area = equRect.width*equRect.height;
cascade->p0 = (sum->data) ;
cascade->p1 = (sum->data + equRect.width - 1) ;
cascade->p2 = (sum->data + sum->width*(equRect.height - 1));
cascade->p3 = (sum->data + sum->width*(equRect.height - 1) + equRect.width - 1);
cascade->pq0 = (sqsum->data);
cascade->pq1 = (sqsum->data + equRect.width - 1) ;
cascade->pq2 = (sqsum->data + sqsum->width*(equRect.height - 1));
cascade->pq3 = (sqsum->data + sqsum->width*(equRect.height - 1) + equRect.width - 1);
printf("\tSetting Image for Classifier--> Detection Window Corners:\n \t\tp0: %d, p1: %d, p2: %d, p3: %d\n",
(cascade->p0) - (cascade->p0),
(cascade->p1) - (cascade->p0),
(cascade->p2) - (cascade->p0),
(cascade->p3) - (cascade->p0));
/****************************************
* Load the index of the four corners
* of the filter rectangle
**************************************/
/* loop over the number of stages */
for( i = 0; i < cascade->n_stages; i++ )
{
/* loop over the number of haar features */
for( j = 0; j < stages_array[i]; j++ )
{
int nr = 3;
/* loop over the number of rectangles */
for( k = 0; k < nr; k++ )
{
tr.x = rectangles_array[r_index + k*4];
tr.width = rectangles_array[r_index + 2 + k*4];
tr.y = rectangles_array[r_index + 1 + k*4];
tr.height = rectangles_array[r_index + 3 + k*4];
if (k < 2)
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
else //for 3rd rect
{
if ((tr.x == 0)&& (tr.y == 0) &&(tr.width == 0) &&(tr.height == 0))
{
scaled_rectangles_array[r_index + k*4] = NULL ;
scaled_rectangles_array[r_index + k*4 + 1] = NULL ;
scaled_rectangles_array[r_index + k*4 + 2] = NULL;
scaled_rectangles_array[r_index + k*4 + 3] = NULL;
}
else
{
scaled_rectangles_array[r_index + k*4] = (sum->data + sum->width*(tr.y ) + (tr.x )) ;
scaled_rectangles_array[r_index + k*4 + 1] = (sum->data + sum->width*(tr.y ) + (tr.x + tr.width)) ;
scaled_rectangles_array[r_index + k*4 + 2] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x ));
scaled_rectangles_array[r_index + k*4 + 3] = (sum->data + sum->width*(tr.y + tr.height) + (tr.x + tr.width));
}
} /* end of branch if(k<2) */
} /* end of k loop*/
r_index+=12;
w_index+=3;
} /* end of j loop */
} /* end i loop */
printf("\tFour Corners of all the Haar Filter Rectangles Loaded\n");
}
/****************************************************
* evalWeakClassifier:
* the actual computation of a haar filter.
* More info:
* http://en.wikipedia.org/wiki/Haar-like_features
***************************************************/
inline int evalWeakClassifier(int variance_norm_factor, int p_offset, int tree_index, int w_index, int r_index )
{
/* the node threshold is multiplied by the standard deviation of the image */
int t = tree_thresh_array[tree_index] * variance_norm_factor; //Filter threshold
int sum = (*(scaled_rectangles_array[r_index] + p_offset)
- *(scaled_rectangles_array[r_index + 1] + p_offset)
- *(scaled_rectangles_array[r_index + 2] + p_offset)
+ *(scaled_rectangles_array[r_index + 3] + p_offset))
* weights_array[w_index];
sum += (*(scaled_rectangles_array[r_index+4] + p_offset)
- *(scaled_rectangles_array[r_index + 5] + p_offset)
- *(scaled_rectangles_array[r_index + 6] + p_offset)
+ *(scaled_rectangles_array[r_index + 7] + p_offset))
* weights_array[w_index + 1];
if ((scaled_rectangles_array[r_index+8] != NULL)){
sum += (*(scaled_rectangles_array[r_index+8] + p_offset)
- *(scaled_rectangles_array[r_index + 9] + p_offset)
- *(scaled_rectangles_array[r_index + 10] + p_offset)
+ *(scaled_rectangles_array[r_index + 11] + p_offset))
* weights_array[w_index + 2];
}
if(sum >= t)
return alpha2_array[tree_index];
else
return alpha1_array[tree_index];
}
int runCascadeClassifier( myCascade* _cascade, MyPoint pt, int start_stage )
{
int p_offset, pq_offset;
int i, j;
unsigned int mean;
unsigned int variance_norm_factor;
int haar_counter = 0;
int w_index = 0;
int r_index = 0;
int stage_sum;
myCascade* cascade;
cascade = _cascade;
//To find out memory index in lienar array of mem
p_offset = pt.y * (cascade->sum.width) + pt.x;
pq_offset = pt.y * (cascade->sqsum.width) + pt.x;
/**************************************************************************
* Image normalization
* mean is the mean of the pixels in the detection window
* cascade->pqi[pq_offset] are the squared pixel values (using the squared integral image)
* inv_window_area is 1 over the total number of pixels in the detection window
*************************************************************************/
variance_norm_factor = (cascade->pq0[pq_offset] - cascade->pq1[pq_offset] - cascade->pq2[pq_offset] + cascade->pq3[pq_offset]);
mean = (cascade->p0[p_offset] - cascade->p1[p_offset] - cascade->p2[p_offset] + cascade->p3[p_offset]);
variance_norm_factor = (variance_norm_factor * cascade->inv_window_area);
variance_norm_factor = variance_norm_factor - mean*mean;
/***********************************************
* Note:
* The int_sqrt is softwar integer squre root.
* GPU has hardware for floating squre root (sqrtf).
* In GPU, it is wise to convert the variance norm
* into floating point, and use HW sqrtf function.
* More info:
* http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#standard-functions
**********************************************/
if( variance_norm_factor > 0 )
variance_norm_factor = int_sqrt(variance_norm_factor);
else
variance_norm_factor = 1;
/**************************************************
* The major computation happens here.
* For each scale in the image pyramid,
* and for each shifted step of the filter,
* send the shifted window through cascade filter.
*
* Note:
*
* Stages in the cascade filter are independent.
* However, a face can be rejected by any stage.
* Running stages in parallel delays the rejection,
* which induces unnecessary computation.
*
* Filters in the same stage are also independent,
* except that filter results need to be merged,
* and compared with a per-stage threshold.
*************************************************/
for( i = start_stage; i < cascade->n_stages; i++ )
{
/****************************************************
* A shared variable that induces false dependency
*
* To avoid it from limiting parallelism,
* we can duplicate it multiple times,
* e.g., using stage_sum_array[number_of_threads].
* Then threads only need to sync at the end
***************************************************/
stage_sum = 0;
for( j = 0; j < stages_array[i]; j++ )
{
/**************************************************
* Send the shifted window to a haar filter.
**************************************************/
stage_sum += evalWeakClassifier(variance_norm_factor, p_offset, haar_counter, w_index, r_index);
n_features++;
haar_counter++;
w_index+=3;
r_index+=12;
} /* end of j loop */
/**************************************************************
* threshold of the stage.
* If the sum is below the threshold,
* no faces are detected,
* and the search is abandoned at the i-th stage (-i).
* Otherwise, a face is detected (1)
**************************************************************/
/* the number "0.4" is empirically chosen for 5kk73 */
if( stage_sum < 0.4 * stages_thresh_array[i] ){
return -i;
} /* end of the per-stage thresholding */
} /* end of i loop */
return 1;
}
void ScaleImage_Invoker( myCascade* _cascade, float _factor, int sum_row, int sum_col, std::vector<MyRect>& _vec)
{
myCascade* cascade = _cascade;
float factor = _factor;
MyPoint p;
int result;
int y1, y2, x2, x, y, step;
std::vector<MyRect> *vec = &_vec;
MySize winSize0 = cascade->orig_window_size;
MySize winSize;
winSize.width = myRound(winSize0.width*factor);
winSize.height = myRound(winSize0.height*factor);
y1 = 0;
/********************************************
* When filter window shifts to image boarder,
* some margin need to be kept
*********************************************/
y2 = sum_row - winSize0.height;
x2 = sum_col - winSize0.width;
/********************************************
* Step size of filter window shifting
* Reducing step makes program faster,
* but decreases quality of detection.
* example:
* step = factor > 2 ? 1 : 2;
*
* For 5kk73,
* the factor and step can be kept constant,
* unless you want to change input image.
*
* The step size is set to 1 for 5kk73,
* i.e., shift the filter window by 1 pixel.
*******************************************/
step = 1;
/**********************************************
* Shift the filter window over the image.
* Each shift step is independent.
* Shared data structure may limit parallelism.
*
* Some random hints (may or may not work):
* Split or duplicate data structure.
* Merge functions/loops to increase locality
* Tiling to increase computation-to-memory ratio
*********************************************/
printf("\tRunning Cascade Classifier for %d times--> x2: %d, y2: %d\n", x2 * y2, x2, y2);
for( x = 0; x <= x2; x += step )
for( y = y1; y <= y2; y += step )
{
p.x = x;
p.y = y;
/*********************************************
* Optimization Oppotunity:
* The same cascade filter is used each time
********************************************/
result = runCascadeClassifier( cascade, p, 0 );
/*******************************************************
* If a face is detected,
* record the coordinates of the filter window
* the "push_back" function is from std:vec, more info:
* http://en.wikipedia.org/wiki/Sequence_container_(C++)
*
* Note that, if the filter runs on GPUs,
* the push_back operation is not possible on GPUs.
* The GPU may need to use a simpler data structure,
* e.g., an array, to store the coordinates of face,
* which can be later memcpy from GPU to CPU to do push_back
*******************************************************/
if( result > 0 )
{
MyRect r = {myRound(x*factor), myRound(y*factor), winSize.width, winSize.height};
vec->push_back(r);
}
}
}
/*****************************************************
* Compute the integral image (and squared integral)
* Integral image helps quickly sum up an area.
* More info:
* http://en.wikipedia.org/wiki/Summed_area_table
****************************************************/
void integralImages( MyImage *src, MyIntImage *sum, MyIntImage *sqsum )
{
int x, y, s, sq, t, tq;
unsigned char it;
int height = src->height;
int width = src->width;
unsigned char *data = src->data;
int * sumData = sum->data;
int * sqsumData = sqsum->data;
for( y = 0; y < height; y++)
{
s = 0;
sq = 0;
/* loop over the number of columns */
for( x = 0; x < width; x ++)
{
it = data[y*width+x];
/* sum of the current row (integer)*/
s += it;
sq += it*it;
t = s;
tq = sq;
if (y != 0)
{
t += sumData[(y-1)*width+x];
tq += sqsumData[(y-1)*width+x];
}
sumData[y*width+x]=t;
sqsumData[y*width+x]=tq;
}
}
}
/***********************************************************
* This function downsample an image using nearest neighbor
* It is used to build the image pyramid
**********************************************************/
void nearestNeighborOnHost(MyImage *src, MyImage *dst)
{
int y;
int j;
int x;
int i;
unsigned char* t;
unsigned char* p;
int w1 = src->width;
int h1 = src->height;
int w2 = dst->width;
int h2 = dst->height;
int rat = 0;
unsigned char* src_data = src->data;
unsigned char* dst_data = dst->data;
int x_ratio = (int)((w1<<16)/w2) +1;
int y_ratio = (int)((h1<<16)/h2) +1;
for (i=0;i < h2;i++)
{
t = dst_data + i * w2; //Pointer to next row in dst image
y = ((i * y_ratio)>>16);
p = src_data + y * w1;
rat = 0;
for (j=0;j < w2;j++)
{
x = (rat>>16);
*(t++) = p[x];
rat += x_ratio;
}
}
}
void readTextClassifier()//(myCascade * cascade)
{
/*number of stages of the cascade classifier*/
int stages;
/*total number of weak classifiers (one node each)*/
int total_nodes = 0;
int i, j, k, l;
char mystring [12];
int r_index = 0;
int w_index = 0;
int tree_index = 0;
FILE *finfo = fopen("info.txt", "r");
/**************************************************
/* how many stages are in the cascaded filter?
/* the first line of info.txt is the number of stages
/* (in the 5kk73 example, there are 25 stages)
**************************************************/
if ( fgets (mystring , 12 , finfo) != NULL )
{
stages = atoi(mystring);
}
i = 0;
stages_array = (int *)malloc(sizeof(int)*stages);
/**************************************************
* how many filters in each stage?
* They are specified in info.txt,
* starting from second line.
* (in the 5kk73 example, from line 2 to line 26)
*************************************************/
while ( fgets (mystring , 12 , finfo) != NULL )
{
stages_array[i] = atoi(mystring);
total_nodes += stages_array[i];
i++;
}
fclose(finfo);
/* TODO: use matrices where appropriate */
/***********************************************
* Allocate a lot of array structures
* Note that, to increase parallelism,
* some arrays need to be splitted or duplicated
**********************************************/
rectangles_array = (int *)malloc(sizeof(int)*total_nodes*12);
scaled_rectangles_array = (int **)malloc(sizeof(int*)*total_nodes*12);
weights_array = (int *)malloc(sizeof(int)*total_nodes*3);
alpha1_array = (int*)malloc(sizeof(int)*total_nodes);
alpha2_array = (int*)malloc(sizeof(int)*total_nodes);
tree_thresh_array = (int*)malloc(sizeof(int)*total_nodes);
stages_thresh_array = (int*)malloc(sizeof(int)*stages);
FILE *fp = fopen("class.txt", "r");
/******************************************
* Read the filter parameters in class.txt
*
* Each stage of the cascaded filter has:
* 18 parameter per filter x tilter per stage
* + 1 threshold per stage
*
* For example, in 5kk73,
* the first stage has 9 filters,
* the first stage is specified using
* 18 * 9 + 1 = 163 parameters
* They are line 1 to 163 of class.txt
*
* The 18 parameters for each filter are:
* 1 to 4: coordinates of rectangle 1
* 5: weight of rectangle 1
* 6 to 9: coordinates of rectangle 2
* 10: weight of rectangle 2
* 11 to 14: coordinates of rectangle 3
* 15: weight of rectangle 3
* 16: threshold of the filter
* 17: alpha 1 of the filter
* 18: alpha 2 of the filter
******************************************/
/* loop over n of stages */
for (i = 0; i < stages; i++)
{ /* loop over n of trees */
for (j = 0; j < stages_array[i]; j++)
{ /* loop over n of rectangular features */
for(k = 0; k < 3; k++)
{ /* loop over the n of vertices */
for (l = 0; l <4; l++)
{
if (fgets (mystring , 12 , fp) != NULL)
rectangles_array[r_index] = atoi(mystring);
else
break;
r_index++;
} /* end of l loop */
if (fgets (mystring , 12 , fp) != NULL)
{
weights_array[w_index] = atoi(mystring);
/* Shift value to avoid overflow in the haar evaluation */
/*TODO: make more general */
/*weights_array[w_index]>>=8; */
}
else
break;
w_index++;
} /* end of k loop */
if (fgets (mystring , 12 , fp) != NULL)
tree_thresh_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha1_array[tree_index]= atoi(mystring);
else
break;
if (fgets (mystring , 12 , fp) != NULL)
alpha2_array[tree_index]= atoi(mystring);
else
break;
tree_index++;
if (j == stages_array[i]-1)
{
if (fgets (mystring , 12 , fp) != NULL)
stages_thresh_array[i] = atoi(mystring);
else
break;
}
} /* end of j loop */
} /* end of i loop */
fclose(fp);
}
void releaseTextClassifier()
{
free(stages_array);
free(rectangles_array);
free(scaled_rectangles_array);
free(weights_array);
free(tree_thresh_array);
free(alpha1_array);
free(alpha2_array);
free(stages_thresh_array);
}
/* End of file. */
|
b6aa50ffd017c41f54ad24407d433b8e98805ce0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SciFiSortByX.cuh"
using namespace SciFi;
__global__ void scifi_sort_by_x(
char* scifi_hits,
uint32_t* scifi_hit_count,
uint* scifi_hit_permutations
) {
// Taken from UT sorting
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const uint* zone_offsets = scifi_hit_count + event_number * SciFi::number_of_zones;
const uint* n_hits_zones = scifi_hit_count + number_of_events * SciFi::number_of_zones + 1 + event_number * SciFi::number_of_zones;
// Two SciFiHits objects are created: one typecasts the base_pointer assuming
// the data is unsorted, the other assuming the data is sorted.
// This makes sorting more readable
SciFiHits unsorted_scifi_hits, sorted_scifi_hits;
unsorted_scifi_hits.typecast_unsorted(scifi_hits, scifi_hit_count[number_of_events * SciFi::number_of_zones]);
sorted_scifi_hits.typecast_sorted(scifi_hits, scifi_hit_count[number_of_events * SciFi::number_of_zones]);
uint total_number_of_hits = 0;
for (int i_zone=0; i_zone < SciFi::number_of_zones; ++i_zone) {
const uint zone_offset = zone_offsets[i_zone];
const uint n_hits_zone = n_hits_zones[i_zone];
total_number_of_hits += n_hits_zone;
find_permutation(
zone_offset,
scifi_hit_permutations,
n_hits_zone,
[&unsorted_scifi_hits] (const int a, const int b) {
if (unsorted_scifi_hits.x0[a] > unsorted_scifi_hits.x0[b]) { return 1; }
if (unsorted_scifi_hits.x0[a] == unsorted_scifi_hits.x0[b]) { return 0; }
return -1;
}
);
// Skip padding
for(uint i = zone_offset + n_hits_zone; i < zone_offsets[i_zone + 1]; i++) {
scifi_hit_permutations[i] = i;
total_number_of_hits++;
}
}
// A thread may have filled in a value in scifi_hit_permutations and another
// one may be using it in the next step
__syncthreads();
// Important note: Order matters, and should be kept as is
apply_permutation<uint>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.hitZone, sorted_scifi_hits.hitZone );
__syncthreads();
apply_permutation<uint>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.planeCode, sorted_scifi_hits.planeCode );
__syncthreads();
apply_permutation<uint>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.LHCbID, sorted_scifi_hits.LHCbID );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.yMax, sorted_scifi_hits.yMax );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.yMin, sorted_scifi_hits.yMin );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.dzdy, sorted_scifi_hits.dzdy );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.dxdy, sorted_scifi_hits.dxdy );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.w, sorted_scifi_hits.w );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.z0, sorted_scifi_hits.z0 );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.x0, sorted_scifi_hits.x0 );
}
| b6aa50ffd017c41f54ad24407d433b8e98805ce0.cu | #include "SciFiSortByX.cuh"
using namespace SciFi;
__global__ void scifi_sort_by_x(
char* scifi_hits,
uint32_t* scifi_hit_count,
uint* scifi_hit_permutations
) {
// Taken from UT sorting
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const uint* zone_offsets = scifi_hit_count + event_number * SciFi::number_of_zones;
const uint* n_hits_zones = scifi_hit_count + number_of_events * SciFi::number_of_zones + 1 + event_number * SciFi::number_of_zones;
// Two SciFiHits objects are created: one typecasts the base_pointer assuming
// the data is unsorted, the other assuming the data is sorted.
// This makes sorting more readable
SciFiHits unsorted_scifi_hits, sorted_scifi_hits;
unsorted_scifi_hits.typecast_unsorted(scifi_hits, scifi_hit_count[number_of_events * SciFi::number_of_zones]);
sorted_scifi_hits.typecast_sorted(scifi_hits, scifi_hit_count[number_of_events * SciFi::number_of_zones]);
uint total_number_of_hits = 0;
for (int i_zone=0; i_zone < SciFi::number_of_zones; ++i_zone) {
const uint zone_offset = zone_offsets[i_zone];
const uint n_hits_zone = n_hits_zones[i_zone];
total_number_of_hits += n_hits_zone;
find_permutation(
zone_offset,
scifi_hit_permutations,
n_hits_zone,
[&unsorted_scifi_hits] (const int a, const int b) {
if (unsorted_scifi_hits.x0[a] > unsorted_scifi_hits.x0[b]) { return 1; }
if (unsorted_scifi_hits.x0[a] == unsorted_scifi_hits.x0[b]) { return 0; }
return -1;
}
);
// Skip padding
for(uint i = zone_offset + n_hits_zone; i < zone_offsets[i_zone + 1]; i++) {
scifi_hit_permutations[i] = i;
total_number_of_hits++;
}
}
// A thread may have filled in a value in scifi_hit_permutations and another
// one may be using it in the next step
__syncthreads();
// Important note: Order matters, and should be kept as is
apply_permutation<uint>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.hitZone, sorted_scifi_hits.hitZone );
__syncthreads();
apply_permutation<uint>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.planeCode, sorted_scifi_hits.planeCode );
__syncthreads();
apply_permutation<uint>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.LHCbID, sorted_scifi_hits.LHCbID );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.yMax, sorted_scifi_hits.yMax );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.yMin, sorted_scifi_hits.yMin );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.dzdy, sorted_scifi_hits.dzdy );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.dxdy, sorted_scifi_hits.dxdy );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.w, sorted_scifi_hits.w );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.z0, sorted_scifi_hits.z0 );
__syncthreads();
apply_permutation<float>( scifi_hit_permutations, zone_offsets[0], total_number_of_hits, unsorted_scifi_hits.x0, sorted_scifi_hits.x0 );
}
|
e0492bcbff896d1e51a56a00572a4a953722730d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "float4toUchar4.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *inputImage = NULL;
hipMalloc(&inputImage, XSIZE*YSIZE);
uchar4 *outputImage = NULL;
hipMalloc(&outputImage, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
float4toUchar4), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
float4toUchar4), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
float4toUchar4), dim3(gridBlock),dim3(threadBlock), 0, 0, inputImage,outputImage,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e0492bcbff896d1e51a56a00572a4a953722730d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "float4toUchar4.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *inputImage = NULL;
cudaMalloc(&inputImage, XSIZE*YSIZE);
uchar4 *outputImage = NULL;
cudaMalloc(&outputImage, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
float4toUchar4<<<gridBlock,threadBlock>>>(inputImage,outputImage,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
float4toUchar4<<<gridBlock,threadBlock>>>(inputImage,outputImage,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
float4toUchar4<<<gridBlock,threadBlock>>>(inputImage,outputImage,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5fba3dd9ced081017b16877ab0cd072187c25a29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv_batched.cu, normal z -> c, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define PRECISION_c
#define NB 256 //NB is the 1st level blocking in recursive blocking, BLOCK_SIZE is the 2ed level, NB=256, BLOCK_SIZE=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ctrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ magmaFloatComplex shared_data[];
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ctrsv_notrans_kernel_outplace_batched(
int n,
magmaFloatComplex **A_array, int lda,
magmaFloatComplex **b_array, int incb,
magmaFloatComplex **x_array)
{
int batchid = blockIdx.z;
ctrsv_notrans_device<BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag>(n, A_array[batchid], lda, b_array[batchid], incb, x_array[batchid]);
}
/******************************************************************************/
template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ctrsv_trans_kernel_outplace_batched(
int n,
magmaFloatComplex **A_array, int lda,
magmaFloatComplex **b_array, int incb,
magmaFloatComplex **x_array)
{
int batchid = blockIdx.z;
ctrsv_trans_device<BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag>(n, A_array[batchid], lda, b_array[batchid], incb, x_array[batchid]);
}
/******************************************************************************/
extern "C" void
magmablas_ctrsv_outofplace_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex ** A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue,
magma_int_t flag)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS, 1, 1 );
dim3 blocks( 1, 1, batchCount );
size_t shmem = n * sizeof(magmaFloatComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
else {
hipLaunchKernelGGL(( ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >)
, dim3(blocks), dim3(threads), shmem, queue->cuda_stream() ,
n, A_array, lda, b_array, incb, x_array);
}
}
}
}
}
/******************************************************************************/
extern "C" void
magmablas_ctrsv_recursive_outofplace_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x_array with zero
//magmablas_claset_batched(MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x_array, n, batchCount, queue);
//memory allocation takes 0.32ms
magmaFloatComplex **dW0_displ = NULL;
magmaFloatComplex **dW1_displ = NULL;
magmaFloatComplex **dW2_displ = NULL;
magma_int_t alloc = 0;
alloc += magma_malloc((void**)&dW0_displ, batchCount * sizeof(*dW0_displ));
alloc += magma_malloc((void**)&dW1_displ, batchCount * sizeof(*dW1_displ));
alloc += magma_malloc((void**)&dW2_displ, batchCount * sizeof(*dW2_displ));
if (alloc != 0)
{
magma_free( dW0_displ );
magma_free( dW1_displ );
magma_free( dW2_displ );
info = MAGMA_ERR_DEVICE_ALLOC;
return;
}
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col+jb, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, col+jb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
else
{
col = i;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, 0, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, 0, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
//assume x_array contains zero elements
magmablas_cgemv_batched(MagmaNoTrans, jb, i, MAGMA_C_ONE, dW0_displ, lda, dW1_displ, 1, MAGMA_C_ONE, dW2_displ, 1, batchCount, queue);
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, b_array, 1, col*incb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
magmablas_ctrsv_outofplace_batched(uplo, trans, diag,jb, dW0_displ, lda, dW1_displ, incb, dW2_displ, batchCount, queue, i);
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col+jb, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, col+jb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
else
{
col = i;
magma_cdisplace_pointers(dW0_displ, A_array, lda, 0, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, 0, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
//assume x_array contains zero elements
magmablas_cgemv_batched(trans, i, jb, MAGMA_C_ONE, dW0_displ, lda, dW1_displ, 1, MAGMA_C_ONE, dW2_displ, 1, batchCount, queue);
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, b_array, 1, col*incb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
magmablas_ctrsv_outofplace_batched(uplo, trans, diag, jb, dW0_displ, lda, dW1_displ, incb, dW2_displ, batchCount, queue, i);
}
}
magma_free(dW0_displ);
magma_free(dW1_displ);
magma_free(dW2_displ);
}
/******************************************************************************/
extern "C" void
magmablas_ctrsv_work_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue)
{
//magmablas_claset_batched(MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x_array, n, batchCount, queue);
//magmablas_ctrsv_recursive_outofplace_batched
magmablas_ctrsv_recursive_outofplace_batched(uplo, trans, diag, n, A_array, lda, b_array, incb, x_array, batchCount, queue);
magmablas_clacpy_batched( MagmaFull, n, incb, x_array, n, b_array, n, batchCount, queue);
}
/***************************************************************************//**
Purpose
-------
ctrsv solves one of the matrix equations on gpu
op(A)*x = b, or
x*op(A) = b,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
A_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of dimension ( lda, n ),
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
lda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
b_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv_batched
*******************************************************************************/
extern "C" void
magmablas_ctrsv_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magma_int_t batchCount,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaFloatComplex *x=NULL;
magmaFloatComplex **x_array = NULL;
magma_cmalloc( &x, size_x * batchCount);
magma_malloc((void**)&x_array, batchCount * sizeof(*x_array));
magma_cset_pointer( x_array, x, n, 0, 0, size_x, batchCount, queue );
magmablas_ctrsv_work_batched(uplo, trans, diag, n, A_array, lda, b_array, incb, x_array, batchCount, queue);
magma_free(x);
magma_free(x_array);
}
| 5fba3dd9ced081017b16877ab0cd072187c25a29.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tingxing Dong
@author Azzam Haidar
@generated from magmablas/ztrsv_batched.cu, normal z -> c, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define PRECISION_c
#define NB 256 //NB is the 1st level blocking in recursive blocking, BLOCK_SIZE is the 2ed level, NB=256, BLOCK_SIZE=64 is optimal for batched
#define NUM_THREADS 128 //64 //128
#define BLOCK_SIZE_N 128
#define DIM_X_N 128
#define DIM_Y_N 1
#define BLOCK_SIZE_T 32
#define DIM_X_T 16
#define DIM_Y_T 8
#include "ctrsv_template_device.cuh"
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
extern __shared__ magmaFloatComplex shared_data[];
/******************************************************************************/
template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ctrsv_notrans_kernel_outplace_batched(
int n,
magmaFloatComplex **A_array, int lda,
magmaFloatComplex **b_array, int incb,
magmaFloatComplex **x_array)
{
int batchid = blockIdx.z;
ctrsv_notrans_device<BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag>(n, A_array[batchid], lda, b_array[batchid], incb, x_array[batchid]);
}
/******************************************************************************/
template<const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag>
__global__ void
ctrsv_trans_kernel_outplace_batched(
int n,
magmaFloatComplex **A_array, int lda,
magmaFloatComplex **b_array, int incb,
magmaFloatComplex **x_array)
{
int batchid = blockIdx.z;
ctrsv_trans_device<BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag>(n, A_array[batchid], lda, b_array[batchid], incb, x_array[batchid]);
}
/******************************************************************************/
extern "C" void
magmablas_ctrsv_outofplace_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex ** A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue,
magma_int_t flag)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
dim3 threads( NUM_THREADS, 1, 1 );
dim3 blocks( 1, 1, batchCount );
size_t shmem = n * sizeof(magmaFloatComplex);
if (trans == MagmaNoTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit)
{
if (flag == 0) {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0) {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
else //Lower
{
if (diag == MagmaNonUnit)
{
if (flag == 0)
{
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit)
{
if (flag == 0)
{
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_notrans_kernel_outplace_batched< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
}
else if (trans == MagmaTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
}
else if (trans == MagmaConjTrans)
{
if (uplo == MagmaUpper)
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
else
{
if (diag == MagmaNonUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
else if (diag == MagmaUnit) {
if (flag == 0)
{
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
else {
ctrsv_trans_kernel_outplace_batched< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >
<<< blocks, threads, shmem, queue->cuda_stream() >>>
(n, A_array, lda, b_array, incb, x_array);
}
}
}
}
}
/******************************************************************************/
extern "C" void
magmablas_ctrsv_recursive_outofplace_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue)
{
/* Check arguments */
magma_int_t info = 0;
if ( uplo != MagmaUpper && uplo != MagmaLower ) {
info = -1;
} else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) {
info = -2;
} else if ( diag != MagmaUnit && diag != MagmaNonUnit ) {
info = -3;
} else if (n < 0) {
info = -5;
} else if (lda < max(1,n)) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return;
}
// quick return if possible.
if (n == 0)
return;
//Init x_array with zero
//magmablas_claset_batched(MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x_array, n, batchCount, queue);
//memory allocation takes 0.32ms
magmaFloatComplex **dW0_displ = NULL;
magmaFloatComplex **dW1_displ = NULL;
magmaFloatComplex **dW2_displ = NULL;
magma_int_t alloc = 0;
alloc += magma_malloc((void**)&dW0_displ, batchCount * sizeof(*dW0_displ));
alloc += magma_malloc((void**)&dW1_displ, batchCount * sizeof(*dW1_displ));
alloc += magma_malloc((void**)&dW2_displ, batchCount * sizeof(*dW2_displ));
if (alloc != 0)
{
magma_free( dW0_displ );
magma_free( dW1_displ );
magma_free( dW2_displ );
info = MAGMA_ERR_DEVICE_ALLOC;
return;
}
magma_int_t col = n;
if (trans == MagmaNoTrans)
{
for (magma_int_t i=0; i < n; i+= NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaUpper)
{
col -= jb;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col+jb, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, col+jb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
else
{
col = i;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, 0, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, 0, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
//assume x_array contains zero elements
magmablas_cgemv_batched(MagmaNoTrans, jb, i, MAGMA_C_ONE, dW0_displ, lda, dW1_displ, 1, MAGMA_C_ONE, dW2_displ, 1, batchCount, queue);
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, b_array, 1, col*incb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
magmablas_ctrsv_outofplace_batched(uplo, trans, diag,jb, dW0_displ, lda, dW1_displ, incb, dW2_displ, batchCount, queue, i);
}
}
else
{
for (magma_int_t i=0; i < n; i += NB)
{
magma_int_t jb = min(NB, n-i);
if (uplo == MagmaLower)
{
col -= jb;
magma_cdisplace_pointers(dW0_displ, A_array, lda, col+jb, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, col+jb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
else
{
col = i;
magma_cdisplace_pointers(dW0_displ, A_array, lda, 0, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, x_array, 1, 0, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
}
//assume x_array contains zero elements
magmablas_cgemv_batched(trans, i, jb, MAGMA_C_ONE, dW0_displ, lda, dW1_displ, 1, MAGMA_C_ONE, dW2_displ, 1, batchCount, queue);
magma_cdisplace_pointers(dW0_displ, A_array, lda, col, col, batchCount, queue);
magma_cdisplace_pointers(dW1_displ, b_array, 1, col*incb, 0, batchCount, queue);
magma_cdisplace_pointers(dW2_displ, x_array, 1, col, 0, batchCount, queue);
magmablas_ctrsv_outofplace_batched(uplo, trans, diag, jb, dW0_displ, lda, dW1_displ, incb, dW2_displ, batchCount, queue, i);
}
}
magma_free(dW0_displ);
magma_free(dW1_displ);
magma_free(dW2_displ);
}
/******************************************************************************/
extern "C" void
magmablas_ctrsv_work_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magmaFloatComplex **x_array,
magma_int_t batchCount, magma_queue_t queue)
{
//magmablas_claset_batched(MagmaFull, n, incb, MAGMA_C_ZERO, MAGMA_C_ZERO, x_array, n, batchCount, queue);
//magmablas_ctrsv_recursive_outofplace_batched
magmablas_ctrsv_recursive_outofplace_batched(uplo, trans, diag, n, A_array, lda, b_array, incb, x_array, batchCount, queue);
magmablas_clacpy_batched( MagmaFull, n, incb, x_array, n, b_array, n, batchCount, queue);
}
/***************************************************************************//**
Purpose
-------
ctrsv solves one of the matrix equations on gpu
op(A)*x = b, or
x*op(A) = b,
where alpha is a scalar, X and B are vectors, A is a unit, or
non-unit, upper or lower triangular matrix and op(A) is one of
op(A) = A, or
op(A) = A^T, or
op(A) = A^H.
The vector x is overwritten on b.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
trans magma_trans_t.
On entry, trans specifies the form of op(A) to be used in
the matrix multiplication as follows:
- = MagmaNoTrans: op(A) = A.
- = MagmaTrans: op(A) = A^T.
- = MagmaConjTrans: op(A) = A^H.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n N specifies the order of the matrix A. n >= 0.
@param[in]
A_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array A of dimension ( lda, n ),
Before entry with uplo = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = MagmaUnit, the diagonal elements of
A are not referenced either, but are assumed to be unity.
@param[in]
lda INTEGER.
On entry, lda specifies the first dimension of A.
lda >= max( 1, n ).
@param[in]
b_array Array of pointers, dimension (batchCount).
Each is a COMPLEX array of dimension n
On exit, b is overwritten with the solution vector X.
@param[in]
incb INTEGER.
On entry, incb specifies the increment for the elements of
b. incb must not be zero.
Unchanged on exit.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trsv_batched
*******************************************************************************/
extern "C" void
magmablas_ctrsv_batched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag,
magma_int_t n,
magmaFloatComplex **A_array, magma_int_t lda,
magmaFloatComplex **b_array, magma_int_t incb,
magma_int_t batchCount,
magma_queue_t queue)
{
magma_int_t size_x = n * incb;
magmaFloatComplex *x=NULL;
magmaFloatComplex **x_array = NULL;
magma_cmalloc( &x, size_x * batchCount);
magma_malloc((void**)&x_array, batchCount * sizeof(*x_array));
magma_cset_pointer( x_array, x, n, 0, 0, size_x, batchCount, queue );
magmablas_ctrsv_work_batched(uplo, trans, diag, n, A_array, lda, b_array, incb, x_array, batchCount, queue);
magma_free(x);
magma_free(x_array);
}
|
4fa3b1e049851cc3d9f14802ada0392d9d089ecc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <stdlib.h>
#include <ctime>
#include <fstream>
#include <array>
#include <chrono>
#include <string>
#include <algorithm>
using namespace std::chrono;
using namespace std;
#define BIN_SIZE 256 // Number of bins in the histogram
#define gridSize 128 // size of the grid for grid size of 16 we are working with 16 X 16 grid
#define width 1920 // frame width
#define height 1200 // frame height
#define rowBlocks ((height + gridSize -1 ) / gridSize) // number of blocks in the frame height ((height + gridSize -1 ) / gridSize)
#define colBlocks ((width + gridSize -1 ) / gridSize) // number of blocks in the frame width ((width + gridSize -1 ) / gridSize)
#define threshold (gridSize * gridSize)// threshold of pixels to clip. Also depends on gridSize. e.g 16 grid size gives a total of 256 pizels in each tile.
//the max value should not exceed 256. 100 is considered high.
#define CUDA_CHECK(ERR) \
if (ERR != hipSuccess) { \
cout << "Cuda error at line:" << __LINE__ << endl; \
cout << hipGetErrorString(ERR) << endl; \
}
__global__ void localHistograms(float* L , int* Hists){
int idx = threadIdx.x;
int idy = blockIdx.x;
int rowStart = idx * gridSize;
int colStart = idy * gridSize;
// const int numRows = height / gridSize;
if ((rowStart + gridSize) > height-1){
rowStart = height - gridSize -1;
}
if ((colStart + gridSize) > width -1){
colStart = width - gridSize -1;
}
int pixel;
for (int row = rowStart; row < rowStart + gridSize; row++) {
for (int col = colStart; col < colStart + gridSize; col++) {
pixel = (int)(L[col + row * width] * 255);
// printf("%d \n", pixel);
atomicAdd(&Hists[((idx + (idy * rowBlocks)) * BIN_SIZE) + pixel], 1);
}
}
}
__global__ void clipHistograms(int* dHist){
int idx = threadIdx.x;
int idy = blockIdx.x;
int index = idx + idy * blockDim.x;
int counter = 0;
// if(index == 0) printf("checkpoint");
int* hist = dHist + (index * BIN_SIZE);
for(int i = 0; i < BIN_SIZE; i++){
if (hist[i] > threshold ){
counter = counter + hist[i] - threshold;
hist[i] = threshold;
}
}
for(int j = 0; j < BIN_SIZE; j++){
hist[j] = hist[j] + counter/BIN_SIZE + (j < counter%BIN_SIZE);
}
}
__global__ void generateCDFs(float* dCdf, int* dHist){
int idx = threadIdx.x;
int idy = blockIdx.x;
int index = idx + idy * blockDim.x;
int* hist = dHist + (index * BIN_SIZE);
float* cdf = dCdf + (index * BIN_SIZE);
cdf[0] = (float)hist[0];
for (int i = 1 ; i < BIN_SIZE ; i++){
cdf[i] = (cdf[i - 1] + hist[i]);
}
}
__global__ void transform(float* L , float* dCdf){
int idx = threadIdx.x;
int idy = blockIdx.x;
int rowStart = idx * gridSize;
int colStart = idy * gridSize;
// const int numRows = height / gridSize;
if ((rowStart + gridSize) > height -1 ){
rowStart = height - gridSize -1;
// printf("colStart = %d rowStart = %d \n",colStart, rowStart);
}
if ((colStart + gridSize) > width-1){
colStart = width - gridSize-1;
// printf("rowStart = %d colStart = %d \n",rowStart, colStart);
}
int pixel;
float newVal;
for (int row = rowStart; row < rowStart + gridSize; row++) {
for (int col = colStart; col < colStart + gridSize; col++) {
pixel = (int)(L[col + row * width] * 255);
newVal = dCdf[((idx + (idy * rowBlocks)) * BIN_SIZE)+ pixel];
L[col + row * width] = (float) newVal/255.f;
}
}
}
int main()
{
/*
Load frame pointer here
uint16_t* rawFrame =
*/
long N = width * height;
int num_block = rowBlocks * colBlocks;
int frameMin = *min_element(rawFrame, rawFrame + N);
int frameMax = *max_element(rawFrame, rawFrame + N);
cout<<frameMax<<" "<<frameMin<<endl;
float* dL;
hipMallocManaged((void **) &dL, N * sizeof(float));
hipMemset(dL, 0.0, N * sizeof(float));
// This loop to scale down the image pixel values between 0-1 then scale to 255
// the 27983 and 6444 are specific to this frame..
// scalesPixel = (pixel - frameMin)/(frameMax - frameMin)
for(int pixel = 0; pixel < N ; pixel++)
{
dL[pixel] = (float)((rawFrame[pixel] - frameMin)/(frameMax - frameMin));
cout<< dL[pixel]<< endl;
}
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
dim3 dimBlock(16, 16);
dim3 dimGrid(120, 75);
int* dHist;
CUDA_CHECK(hipMallocManaged(&dHist, num_block * BIN_SIZE * sizeof(int)));
hipMemset(dHist, 0, num_block * BIN_SIZE * sizeof(int));
float* dCdf;
CUDA_CHECK(hipMallocManaged(&dCdf, num_block * BIN_SIZE * sizeof(float)));
hipMemset(dCdf, 0.0, num_block * BIN_SIZE * sizeof(float));
hipLaunchKernelGGL(( localHistograms), dim3(colBlocks) , dim3(rowBlocks), 0, 0, dL, dHist); // generate local histograms
CUDA_CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( clipHistograms), dim3(colBlocks) , dim3(rowBlocks), 0, 0, dHist); //clip histograms and distribute the clipped pixels to uniformly
CUDA_CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( generateCDFs), dim3(colBlocks),dim3(rowBlocks), 0, 0, dCdf, dHist);// generate local cdfs
CUDA_CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( transform), dim3(colBlocks), dim3(rowBlocks), 0, 0, dL, dCdf); // use cdfs to transform pixel values
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
hipEventElapsedTime(&ms, start, stop);
cout << ms << endl;
ofstream myfile0 ("dHist.txt");
for(long counter = 0; counter < num_block * BIN_SIZE; counter++){
myfile0<< dHist[counter]<<endl;
}
myfile0.close();
ofstream myfile2 ("CDFs.txt");
for(long counter = 0; counter < num_block * BIN_SIZE; counter++){
myfile2<< dCdf[counter]<<endl;
}
myfile2.close();
ofstream myfile1 ("dL_processed-1.txt");
for(long counter = 0; counter < width*height; counter++){
myfile1<< dL[counter]<<endl;
}
myfile1.close();
std::string command = "python plotImage.py "; //+ std::to_string(frameNum);
cout<< command<<endl;
system(command.c_str());
hipFree(dL);
hipFree(dHist);
hipFree(dCdf);
}
| 4fa3b1e049851cc3d9f14802ada0392d9d089ecc.cu | #include <iostream>
#include <cstdlib>
#include <stdlib.h>
#include <ctime>
#include <fstream>
#include <array>
#include <chrono>
#include <string>
#include <algorithm>
using namespace std::chrono;
using namespace std;
#define BIN_SIZE 256 // Number of bins in the histogram
#define gridSize 128 // size of the grid for grid size of 16 we are working with 16 X 16 grid
#define width 1920 // frame width
#define height 1200 // frame height
#define rowBlocks ((height + gridSize -1 ) / gridSize) // number of blocks in the frame height ((height + gridSize -1 ) / gridSize)
#define colBlocks ((width + gridSize -1 ) / gridSize) // number of blocks in the frame width ((width + gridSize -1 ) / gridSize)
#define threshold (gridSize * gridSize)// threshold of pixels to clip. Also depends on gridSize. e.g 16 grid size gives a total of 256 pizels in each tile.
//the max value should not exceed 256. 100 is considered high.
#define CUDA_CHECK(ERR) \
if (ERR != cudaSuccess) { \
cout << "Cuda error at line:" << __LINE__ << endl; \
cout << cudaGetErrorString(ERR) << endl; \
}
__global__ void localHistograms(float* L , int* Hists){
int idx = threadIdx.x;
int idy = blockIdx.x;
int rowStart = idx * gridSize;
int colStart = idy * gridSize;
// const int numRows = height / gridSize;
if ((rowStart + gridSize) > height-1){
rowStart = height - gridSize -1;
}
if ((colStart + gridSize) > width -1){
colStart = width - gridSize -1;
}
int pixel;
for (int row = rowStart; row < rowStart + gridSize; row++) {
for (int col = colStart; col < colStart + gridSize; col++) {
pixel = (int)(L[col + row * width] * 255);
// printf("%d \n", pixel);
atomicAdd(&Hists[((idx + (idy * rowBlocks)) * BIN_SIZE) + pixel], 1);
}
}
}
__global__ void clipHistograms(int* dHist){
int idx = threadIdx.x;
int idy = blockIdx.x;
int index = idx + idy * blockDim.x;
int counter = 0;
// if(index == 0) printf("checkpoint");
int* hist = dHist + (index * BIN_SIZE);
for(int i = 0; i < BIN_SIZE; i++){
if (hist[i] > threshold ){
counter = counter + hist[i] - threshold;
hist[i] = threshold;
}
}
for(int j = 0; j < BIN_SIZE; j++){
hist[j] = hist[j] + counter/BIN_SIZE + (j < counter%BIN_SIZE);
}
}
__global__ void generateCDFs(float* dCdf, int* dHist){
int idx = threadIdx.x;
int idy = blockIdx.x;
int index = idx + idy * blockDim.x;
int* hist = dHist + (index * BIN_SIZE);
float* cdf = dCdf + (index * BIN_SIZE);
cdf[0] = (float)hist[0];
for (int i = 1 ; i < BIN_SIZE ; i++){
cdf[i] = (cdf[i - 1] + hist[i]);
}
}
__global__ void transform(float* L , float* dCdf){
int idx = threadIdx.x;
int idy = blockIdx.x;
int rowStart = idx * gridSize;
int colStart = idy * gridSize;
// const int numRows = height / gridSize;
if ((rowStart + gridSize) > height -1 ){
rowStart = height - gridSize -1;
// printf("colStart = %d rowStart = %d \n",colStart, rowStart);
}
if ((colStart + gridSize) > width-1){
colStart = width - gridSize-1;
// printf("rowStart = %d colStart = %d \n",rowStart, colStart);
}
int pixel;
float newVal;
for (int row = rowStart; row < rowStart + gridSize; row++) {
for (int col = colStart; col < colStart + gridSize; col++) {
pixel = (int)(L[col + row * width] * 255);
newVal = dCdf[((idx + (idy * rowBlocks)) * BIN_SIZE)+ pixel];
L[col + row * width] = (float) newVal/255.f;
}
}
}
int main()
{
/*
Load frame pointer here
uint16_t* rawFrame =
*/
long N = width * height;
int num_block = rowBlocks * colBlocks;
int frameMin = *min_element(rawFrame, rawFrame + N);
int frameMax = *max_element(rawFrame, rawFrame + N);
cout<<frameMax<<" "<<frameMin<<endl;
float* dL;
cudaMallocManaged((void **) &dL, N * sizeof(float));
cudaMemset(dL, 0.0, N * sizeof(float));
// This loop to scale down the image pixel values between 0-1 then scale to 255
// the 27983 and 6444 are specific to this frame..
// scalesPixel = (pixel - frameMin)/(frameMax - frameMin)
for(int pixel = 0; pixel < N ; pixel++)
{
dL[pixel] = (float)((rawFrame[pixel] - frameMin)/(frameMax - frameMin));
cout<< dL[pixel]<< endl;
}
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
dim3 dimBlock(16, 16);
dim3 dimGrid(120, 75);
int* dHist;
CUDA_CHECK(cudaMallocManaged(&dHist, num_block * BIN_SIZE * sizeof(int)));
cudaMemset(dHist, 0, num_block * BIN_SIZE * sizeof(int));
float* dCdf;
CUDA_CHECK(cudaMallocManaged(&dCdf, num_block * BIN_SIZE * sizeof(float)));
cudaMemset(dCdf, 0.0, num_block * BIN_SIZE * sizeof(float));
localHistograms<<<colBlocks , rowBlocks>>>(dL, dHist); // generate local histograms
CUDA_CHECK(cudaDeviceSynchronize());
clipHistograms<<<colBlocks , rowBlocks>>>(dHist); //clip histograms and distribute the clipped pixels to uniformly
CUDA_CHECK(cudaDeviceSynchronize());
generateCDFs<<<colBlocks,rowBlocks>>>(dCdf, dHist);// generate local cdfs
CUDA_CHECK(cudaDeviceSynchronize());
transform<<<colBlocks, rowBlocks>>>(dL, dCdf); // use cdfs to transform pixel values
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << ms << endl;
ofstream myfile0 ("dHist.txt");
for(long counter = 0; counter < num_block * BIN_SIZE; counter++){
myfile0<< dHist[counter]<<endl;
}
myfile0.close();
ofstream myfile2 ("CDFs.txt");
for(long counter = 0; counter < num_block * BIN_SIZE; counter++){
myfile2<< dCdf[counter]<<endl;
}
myfile2.close();
ofstream myfile1 ("dL_processed-1.txt");
for(long counter = 0; counter < width*height; counter++){
myfile1<< dL[counter]<<endl;
}
myfile1.close();
std::string command = "python plotImage.py "; //+ std::to_string(frameNum);
cout<< command<<endl;
system(command.c_str());
cudaFree(dL);
cudaFree(dHist);
cudaFree(dCdf);
}
|
95b1e98597f10907ed869d815a4317fae0884edb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
const char inputName256[] = "data/input_14_1_256.bin";
const char weight_winograd_Name256[] = "data/weight_winograd_256_256.bin";
const char bnBias_winograd_Name256[] = "data/bnBias_winograd_256.bin";
const char bnScale_winograd_Name256[] = "data/bnScale_winograd_256.bin";
#define d(input, i, j, Inz) ( input[Inz + i*768 + (j<<7)] )
__global__ void kernel_256_winograd_BtdB(
const float *__restrict__ pInputs,
float *__restrict__ pOutputs)
{
int Inx = blockIdx.x<<2, Iny0 = blockIdx.y<<2, Part = blockIdx.z,
Iny1 = threadIdx.y, Inz = threadIdx.x;
int Iny = Iny0+Iny1, stride_r = 4096, stride_c = 256; // 4096 = 16*256
int c_glb_start = Inx*stride_r + Iny*stride_c + Inz + (Part<<7), c_input = Iny1*128 + Inz;
extern __shared__ float input[];
int stride_768[6] = {0, 768, 1536, 2304, 3072, 3840}; // 768 = 6*128
for (int i = 0; i < 6; i++) {
input[c_input + stride_768[i]] = pInputs[c_glb_start + i*stride_r];
}
__syncthreads();
float BTd[6];
switch(Iny1) {
case 0:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 0, j, Inz)*4 - d(input, 2, j, Inz)*5 + d(input, 4, j, Inz);
}
break;
case 1:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 + d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 2:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 - d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 3:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) + d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 4:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) - d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 5:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 3, j, Inz)*5 + d(input, 5, j, Inz);
}
break;
}
__syncthreads();
int tmp_offset = Iny1*768+Inz;
for (int i = 0; i < 6; i++) {
input[tmp_offset + i*128] = BTd[i];
}
__syncthreads();
float BTdB[6];
switch(Iny1) {
case 0:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 0, Inz) - 5*d(input, i, 2, Inz) + d(input, i, 4, Inz);
}
break;
case 1:
for (int i = 0; i < 6; i++) {
BTdB[i] = -4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) + d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 2:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) - d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 3:
for (int i = 0; i < 6; i++) {
BTdB[i] = -2*d(input, i, 1, Inz) - d(input, i, 2, Inz) + 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 4:
for (int i = 0; i < 6; i++) {
BTdB[i] = 2*d(input, i, 1, Inz) - d(input, i, 2, Inz) - 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 5:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 5*d(input, i, 3, Inz) + d(input, i, 5, Inz);
}
break;
}
__syncthreads();
for (int i = 0; i < 6; i++) {
pOutputs[(Iny1 + i*6)*4096 + (blockIdx.x*4+blockIdx.y)*256 + Inz + (Part<<7)] = BTdB[i];
}
}
__global__ void kernel_256_winograd_AtIA(
const float *__restrict__ pInputs,
const float *__restrict__ pBiases,
const float *__restrict__ pScales,
float *__restrict__ pOutputs)
{
int Tilex = blockIdx.x, Inx = threadIdx.x;
int Tiley = blockIdx.y, Iny = threadIdx.y;
int kz = blockIdx.z;
int c_input = Inx*6 + Iny;
__shared__ float bias, scale;
extern __shared__ float input[];
input[c_input] = pInputs[c_input*16*256 + (Tilex*4+Tiley)*256 + kz];
bias = pBiases[kz];
scale = pScales[kz];
__syncthreads();
float tmp = 0;
switch(Inx) {
case 0:
tmp = input[Iny] + input[6+Iny] + input[12+Iny] + input[18+Iny] + input[24+Iny];
break;
case 1:
tmp = input[6+Iny] - input[12+Iny] + 2*input[18+Iny] - 2*input[24+Iny];
break;
case 2:
tmp = input[6+Iny] + input[12+Iny] + 4*input[18+Iny] + 4*input[24+Iny];
break;
case 3:
tmp = input[6+Iny] - input[12+Iny] + 8*input[18+Iny] - 8*input[24+Iny] + input[30+Iny];
break;
}
__syncthreads();
input[c_input] = tmp;
__syncthreads();
if (Inx > 3 || (Tilex == 3 && Inx > 1)) return;
int x;
float o;
switch(Iny) {
case 0:
x = Inx*6;
o = scale*(input[x]+input[x+1]+input[x+2]+input[x+3]+input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+1)*256 + kz] = o > 0 ? o : 0;
break;
case 1:
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 2*input[x+3] - 2*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+2)*256 + kz] = o > 0 ? o : 0;
break;
case 2:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] + input[x+2] + 4*input[x+3] + 4*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+3)*256 + kz] = o > 0 ? o : 0;
break;
case 3:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 8*input[x+3] - 8*input[x+4] + input[x+5]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+4)*256 + kz] = o > 0 ? o : 0;
break;
}
}
__global__ void kernel_256_OuterProduct_256(
const float *__restrict__ A,
const float *__restrict__ B,
float *__restrict__ C)
{
int Tile = blockIdx.x, Part = blockIdx.y,
tX = threadIdx.x, tY = threadIdx.y;
int c_input = tY*256 + tX,
c_kernel = c_input,
T_offset = (Tile<<12) + (Part<<11) + c_input, B_offset = (Tile<<16) + c_kernel;
extern __shared__ float input[];
float *kernel = input + 2048, *out = kernel + 8192;
int B_stride[32] = {0, 256, 512, 768, 1024, 1280, 1536, 1792,
2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840,
4096, 4352, 4608, 4864, 5120, 5376, 5632, 5888,
6144, 6400, 6656, 6912, 7168, 7424, 7680, 7936};
out[c_input] = 0.0f;
out[c_input+1024] = 0;
input[c_input] = A[T_offset];
input[c_input+1024] = A[T_offset+1024];
for (int k = 0; k < 8; k++) {
int B_start = B_offset + (k<<13); // 32*64
kernel[c_kernel] = B[B_start], kernel[c_kernel+1024] = B[B_start+1024];
kernel[c_kernel+2048] = B[B_start+2048], kernel[c_kernel+3072] = B[B_start+3072];
kernel[c_kernel+4096] = B[B_start+4096], kernel[c_kernel+5120] = B[B_start+5120];
kernel[c_kernel+6144] = B[B_start+6144], kernel[c_kernel+7168] = B[B_start+7168];
__syncthreads();
float sum = 0, sum1 = 0;
int y_tmp = (tY<<8)+(k<<5), y_tmp1 = y_tmp+1024;
for (int j = 0; j < 32; j++) {
sum += input[y_tmp + j] * kernel[tX + B_stride[j]];
sum1 += input[y_tmp1 + j] * kernel[tX + B_stride[j]];
}
out[c_input] += sum;
out[c_input+1024] += sum1;
__syncthreads();
}
C[T_offset] = out[c_input];
C[T_offset+1024] = out[c_input+1024];
}
void kernel_256(double &time, double &ktime) {
float *input_ = get_parameter(inputName256, 16*16*256);
float *input, *output, *l_weights;
float *kernel = get_parameter(weight_winograd_Name256, 36*256*256), *t_input, *ip;
int nInput = 16*16*256, nOutput = 16*16*256, nWeights = 36*256*256, nBias = 256,
nTransInput = 16*6*6*256, nInnerProd = 16*6*6*256;
float *l_bnBias, *l_bnScale, *bnBias, *bnScale;
float result[nOutput];
bnBias = get_parameter(bnBias_winograd_Name256, 256);
bnScale = get_parameter(bnScale_winograd_Name256, 256);
auto start = std::chrono::steady_clock::now();
hipMalloc((void **) &input, nInput<<2);
hipMalloc((void **) &output, nOutput<<2);
hipMalloc((void **) &l_weights, nWeights<<2);
hipMalloc((void **) &t_input, nTransInput<<2);
hipMalloc((void **) &ip, nInnerProd<<2);
hipMalloc((void **) &l_bnBias, nBias<<2);
hipMalloc((void **) &l_bnScale, nBias<<2);
hipMemset((void *) output, 0, nOutput<<2);
hipMemset((void *) t_input, 0, nTransInput<<2);
hipMemset((void *) ip, 0, nInnerProd<<2);
hipMemcpy(input, input_, nInput<<2, hipMemcpyHostToDevice);
hipMemcpy(l_weights, kernel, nWeights<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bnBias, bnBias, nBias<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bnScale, bnScale, nBias<<2, hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto kstart = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( kernel_256_winograd_BtdB) , dim3(dim3(4, 4, 2)), dim3(dim3(128, 6)), (6*6*128)<<2 , 0, 0, input, t_input);
hipLaunchKernelGGL(( kernel_256_OuterProduct_256), dim3(dim3(36, 2)), dim3(dim3(256, 4)), (8*256 + 32*256 + 8*256)<<2 , 0, 0, t_input, l_weights, ip);
hipLaunchKernelGGL(( kernel_256_winograd_AtIA) , dim3(dim3(4, 4, 256)), dim3(dim3(6, 6)), ((6*6)<<2), 0, 0, ip, l_bnBias, l_bnScale, output);
hipDeviceSynchronize();
auto kend = std::chrono::steady_clock::now();
ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count();
hipMemcpy(result, output, nOutput<<2, hipMemcpyDeviceToHost);
hipFree(t_input);
hipFree(ip);
hipFree(input);
hipFree(output);
hipFree(l_weights);
hipFree(l_bnScale);
hipFree(l_bnBias);
auto end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
#ifdef DEBUG
double s = 0;
for (int i = 0; i < nOutput; i++) {
s += result[i];
}
printf("Check sum: %lf\n", s);
#endif
free(kernel);
free(bnScale);
free(bnBias);
free(input_);
}
| 95b1e98597f10907ed869d815a4317fae0884edb.cu | #include "util.h"
const char inputName256[] = "data/input_14_1_256.bin";
const char weight_winograd_Name256[] = "data/weight_winograd_256_256.bin";
const char bnBias_winograd_Name256[] = "data/bnBias_winograd_256.bin";
const char bnScale_winograd_Name256[] = "data/bnScale_winograd_256.bin";
#define d(input, i, j, Inz) ( input[Inz + i*768 + (j<<7)] )
__global__ void kernel_256_winograd_BtdB(
const float *__restrict__ pInputs,
float *__restrict__ pOutputs)
{
int Inx = blockIdx.x<<2, Iny0 = blockIdx.y<<2, Part = blockIdx.z,
Iny1 = threadIdx.y, Inz = threadIdx.x;
int Iny = Iny0+Iny1, stride_r = 4096, stride_c = 256; // 4096 = 16*256
int c_glb_start = Inx*stride_r + Iny*stride_c + Inz + (Part<<7), c_input = Iny1*128 + Inz;
extern __shared__ float input[];
int stride_768[6] = {0, 768, 1536, 2304, 3072, 3840}; // 768 = 6*128
for (int i = 0; i < 6; i++) {
input[c_input + stride_768[i]] = pInputs[c_glb_start + i*stride_r];
}
__syncthreads();
float BTd[6];
switch(Iny1) {
case 0:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 0, j, Inz)*4 - d(input, 2, j, Inz)*5 + d(input, 4, j, Inz);
}
break;
case 1:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 + d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 2:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 - d(input, 3, j, Inz) + d(input, 4, j, Inz);
}
break;
case 3:
for (int j = 0; j < 6; j++) {
BTd[j] = -d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) + d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 4:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) - d(input, 3, j, Inz)*2 + d(input, 4, j, Inz);
}
break;
case 5:
for (int j = 0; j < 6; j++) {
BTd[j] = d(input, 1, j, Inz)*4 - d(input, 3, j, Inz)*5 + d(input, 5, j, Inz);
}
break;
}
__syncthreads();
int tmp_offset = Iny1*768+Inz;
for (int i = 0; i < 6; i++) {
input[tmp_offset + i*128] = BTd[i];
}
__syncthreads();
float BTdB[6];
switch(Iny1) {
case 0:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 0, Inz) - 5*d(input, i, 2, Inz) + d(input, i, 4, Inz);
}
break;
case 1:
for (int i = 0; i < 6; i++) {
BTdB[i] = -4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) + d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 2:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) - d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 3:
for (int i = 0; i < 6; i++) {
BTdB[i] = -2*d(input, i, 1, Inz) - d(input, i, 2, Inz) + 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 4:
for (int i = 0; i < 6; i++) {
BTdB[i] = 2*d(input, i, 1, Inz) - d(input, i, 2, Inz) - 2*d(input, i, 3, Inz) + d(input, i, 4, Inz);
}
break;
case 5:
for (int i = 0; i < 6; i++) {
BTdB[i] = 4*d(input, i, 1, Inz) - 5*d(input, i, 3, Inz) + d(input, i, 5, Inz);
}
break;
}
__syncthreads();
for (int i = 0; i < 6; i++) {
pOutputs[(Iny1 + i*6)*4096 + (blockIdx.x*4+blockIdx.y)*256 + Inz + (Part<<7)] = BTdB[i];
}
}
__global__ void kernel_256_winograd_AtIA(
const float *__restrict__ pInputs,
const float *__restrict__ pBiases,
const float *__restrict__ pScales,
float *__restrict__ pOutputs)
{
int Tilex = blockIdx.x, Inx = threadIdx.x;
int Tiley = blockIdx.y, Iny = threadIdx.y;
int kz = blockIdx.z;
int c_input = Inx*6 + Iny;
__shared__ float bias, scale;
extern __shared__ float input[];
input[c_input] = pInputs[c_input*16*256 + (Tilex*4+Tiley)*256 + kz];
bias = pBiases[kz];
scale = pScales[kz];
__syncthreads();
float tmp = 0;
switch(Inx) {
case 0:
tmp = input[Iny] + input[6+Iny] + input[12+Iny] + input[18+Iny] + input[24+Iny];
break;
case 1:
tmp = input[6+Iny] - input[12+Iny] + 2*input[18+Iny] - 2*input[24+Iny];
break;
case 2:
tmp = input[6+Iny] + input[12+Iny] + 4*input[18+Iny] + 4*input[24+Iny];
break;
case 3:
tmp = input[6+Iny] - input[12+Iny] + 8*input[18+Iny] - 8*input[24+Iny] + input[30+Iny];
break;
}
__syncthreads();
input[c_input] = tmp;
__syncthreads();
if (Inx > 3 || (Tilex == 3 && Inx > 1)) return;
int x;
float o;
switch(Iny) {
case 0:
x = Inx*6;
o = scale*(input[x]+input[x+1]+input[x+2]+input[x+3]+input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+1)*256 + kz] = o > 0 ? o : 0;
break;
case 1:
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 2*input[x+3] - 2*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+2)*256 + kz] = o > 0 ? o : 0;
break;
case 2:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] + input[x+2] + 4*input[x+3] + 4*input[x+4]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+3)*256 + kz] = o > 0 ? o : 0;
break;
case 3:
if (Tiley == 3) break;
x = Inx*6;
o = scale*(input[x+1] - input[x+2] + 8*input[x+3] - 8*input[x+4] + input[x+5]) + bias;
pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+4)*256 + kz] = o > 0 ? o : 0;
break;
}
}
__global__ void kernel_256_OuterProduct_256(
const float *__restrict__ A,
const float *__restrict__ B,
float *__restrict__ C)
{
int Tile = blockIdx.x, Part = blockIdx.y,
tX = threadIdx.x, tY = threadIdx.y;
int c_input = tY*256 + tX,
c_kernel = c_input,
T_offset = (Tile<<12) + (Part<<11) + c_input, B_offset = (Tile<<16) + c_kernel;
extern __shared__ float input[];
float *kernel = input + 2048, *out = kernel + 8192;
int B_stride[32] = {0, 256, 512, 768, 1024, 1280, 1536, 1792,
2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840,
4096, 4352, 4608, 4864, 5120, 5376, 5632, 5888,
6144, 6400, 6656, 6912, 7168, 7424, 7680, 7936};
out[c_input] = 0.0f;
out[c_input+1024] = 0;
input[c_input] = A[T_offset];
input[c_input+1024] = A[T_offset+1024];
for (int k = 0; k < 8; k++) {
int B_start = B_offset + (k<<13); // 32*64
kernel[c_kernel] = B[B_start], kernel[c_kernel+1024] = B[B_start+1024];
kernel[c_kernel+2048] = B[B_start+2048], kernel[c_kernel+3072] = B[B_start+3072];
kernel[c_kernel+4096] = B[B_start+4096], kernel[c_kernel+5120] = B[B_start+5120];
kernel[c_kernel+6144] = B[B_start+6144], kernel[c_kernel+7168] = B[B_start+7168];
__syncthreads();
float sum = 0, sum1 = 0;
int y_tmp = (tY<<8)+(k<<5), y_tmp1 = y_tmp+1024;
for (int j = 0; j < 32; j++) {
sum += input[y_tmp + j] * kernel[tX + B_stride[j]];
sum1 += input[y_tmp1 + j] * kernel[tX + B_stride[j]];
}
out[c_input] += sum;
out[c_input+1024] += sum1;
__syncthreads();
}
C[T_offset] = out[c_input];
C[T_offset+1024] = out[c_input+1024];
}
void kernel_256(double &time, double &ktime) {
float *input_ = get_parameter(inputName256, 16*16*256);
float *input, *output, *l_weights;
float *kernel = get_parameter(weight_winograd_Name256, 36*256*256), *t_input, *ip;
int nInput = 16*16*256, nOutput = 16*16*256, nWeights = 36*256*256, nBias = 256,
nTransInput = 16*6*6*256, nInnerProd = 16*6*6*256;
float *l_bnBias, *l_bnScale, *bnBias, *bnScale;
float result[nOutput];
bnBias = get_parameter(bnBias_winograd_Name256, 256);
bnScale = get_parameter(bnScale_winograd_Name256, 256);
auto start = std::chrono::steady_clock::now();
hipMalloc((void **) &input, nInput<<2);
hipMalloc((void **) &output, nOutput<<2);
hipMalloc((void **) &l_weights, nWeights<<2);
hipMalloc((void **) &t_input, nTransInput<<2);
hipMalloc((void **) &ip, nInnerProd<<2);
hipMalloc((void **) &l_bnBias, nBias<<2);
hipMalloc((void **) &l_bnScale, nBias<<2);
hipMemset((void *) output, 0, nOutput<<2);
hipMemset((void *) t_input, 0, nTransInput<<2);
hipMemset((void *) ip, 0, nInnerProd<<2);
hipMemcpy(input, input_, nInput<<2, hipMemcpyHostToDevice);
hipMemcpy(l_weights, kernel, nWeights<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bnBias, bnBias, nBias<<2, hipMemcpyHostToDevice);
hipMemcpy(l_bnScale, bnScale, nBias<<2, hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto kstart = std::chrono::steady_clock::now();
kernel_256_winograd_BtdB <<<dim3(4, 4, 2), dim3(128, 6), (6*6*128)<<2 >>> (input, t_input);
kernel_256_OuterProduct_256<<<dim3(36, 2), dim3(256, 4), (8*256 + 32*256 + 8*256)<<2 >>> (t_input, l_weights, ip);
kernel_256_winograd_AtIA <<<dim3(4, 4, 256), dim3(6, 6), ((6*6)<<2)>>> (ip, l_bnBias, l_bnScale, output);
hipDeviceSynchronize();
auto kend = std::chrono::steady_clock::now();
ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count();
hipMemcpy(result, output, nOutput<<2, hipMemcpyDeviceToHost);
hipFree(t_input);
hipFree(ip);
hipFree(input);
hipFree(output);
hipFree(l_weights);
hipFree(l_bnScale);
hipFree(l_bnBias);
auto end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
#ifdef DEBUG
double s = 0;
for (int i = 0; i < nOutput; i++) {
s += result[i];
}
printf("Check sum: %lf\n", s);
#endif
free(kernel);
free(bnScale);
free(bnBias);
free(input_);
}
|
2f19377fde77c02f03812a0915b6bed4a0f80413.hip | // !!! This is a file automatically generated by hipify!!!
#include <libgeodecomp/geometry/coord.h>
#include <libgeodecomp/misc/apitraits.h>
#include <libgeodecomp/misc/chronometer.h>
#include <libgeodecomp/misc/cudautil.h>
#include <libgeodecomp/storage/fixedneighborhood.h>
#include <libgeodecomp/storage/soagrid.h>
#include <libflatarray/testbed/gpu_benchmark.hpp>
#include <libflatarray/testbed/evaluate.hpp>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdexcept>
using namespace LibGeoDecomp;
class GPUBenchmark : public LibFlatArray::gpu_benchmark
{
public:
double performance(std::vector<int> dim)
{
Coord<3> c(dim[0], dim[1], dim[2]);
return performance2(c);
}
virtual double performance2(const Coord<3>& dim) = 0;
};
class Cell
{
public:
double c;
int a;
char b;
};
LIBFLATARRAY_REGISTER_SOA(Cell, ((double)(c))((int)(a))((char)(b)))
class CellLBM
{
public:
class API :
public APITraits::HasStencil<Stencils::Moore<3, 1> >,
public APITraits::HasCubeTopology<3>
{};
double C;
double N;
double E;
double W;
double S;
double T;
double B;
double NW;
double NE;
double SW;
double SE;
double TW;
double BW;
double TE;
double BE;
double TN;
double BN;
double TS;
double BS;
};
LIBFLATARRAY_REGISTER_SOA(CellLBM, ((double)(C))((double)(N))((double)(E))((double)(W))((double)(S))((double)(T))((double)(B))((double)(NW))((double)(SW))((double)(NE))((double)(SE))((double)(TW))((double)(BW))((double)(TE))((double)(BE))((double)(TN))((double)(BN))((double)(TS))((double)(BS)))
#define hoody(X, Y, Z) \
gridOld[z * dimX * dimY + y * dimX + x + X + Y * dimX + Z * dimX * dimY]
template<int DIM_X, int DIM_Y, int DIM_Z>
__global__ void updateRTMClassic(int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
int x = blockIdx.x * blockDim.x + threadIdx.x + 2;
int y = blockIdx.y * blockDim.y + threadIdx.y + 2;
int z = 2;
double c0 = hoody(0, 0, -2);
double c1 = hoody(0, 0, -1);
double c2 = hoody(0, 0, 0);
double c3 = hoody(0, 0, 1);
#pragma unroll 10
for (; z < (dimZ - 2); ++z) {
double c4 = hoody(0, 0, 2);
gridNew[z * dimX * dimY + y * dimX + x] =
0.10 * c0 +
0.15 * c1 +
0.20 * c2 +
0.25 * c4 +
0.30 * hoody( 0, -2, 0) +
0.35 * hoody( 0, -1, 0) +
0.40 * hoody( 0, 1, 0) +
0.45 * hoody( 0, 2, 0) +
0.50 * hoody(-2, 0, 0) +
0.55 * hoody(-1, 0, 0) +
0.60 * hoody( 1, 0, 0) +
0.65 * hoody( 2, 0, 0);
c0 = c1;
c1 = c2;
c2 = c3;
c3 = c4;
}
}
#undef hoody
#define C 0
#define N 1
#define E 2
#define W 3
#define S 4
#define T 5
#define B 6
#define NW 7
#define SW 8
#define NE 9
#define SE 10
#define TW 11
#define BW 12
#define TE 13
#define BE 14
#define TN 15
#define BN 16
#define TS 17
#define BS 18
#define GET_COMP(X, Y, Z, DIR) \
gridOld[(Z) * dimX * dimY + (Y) * dimX + (X) + (DIR) * dimX * dimY * dimZ]
#define SET_COMP(DIR) \
gridNew[z * dimX * dimY + y * dimX + x + (DIR) * dimX * dimY * dimZ]
template<int DIM_X, int DIM_Y, int DIM_Z>
__global__ void updateLBMClassic(int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
int x = blockIdx.x * blockDim.x + threadIdx.x + 2;
int y = blockIdx.y * blockDim.y + threadIdx.y + 2;
int z = 2;
#pragma unroll 10
for (; z < (dimZ - 2); z += 1) {
#define SQR(X) ((X)*(X))
const double omega = 1.0/1.7;
const double omega_trm = 1.0 - omega;
const double omega_w0 = 3.0 * 1.0 / 3.0 * omega;
const double omega_w1 = 3.0*1.0/18.0*omega;
const double omega_w2 = 3.0*1.0/36.0*omega;
const double one_third = 1.0 / 3.0;
double velX, velY, velZ;
velX =
GET_COMP(x-1,y,z,E) + GET_COMP(x-1,y-1,z,NE) +
GET_COMP(x-1,y+1,z,SE) + GET_COMP(x-1,y,z-1,TE) +
GET_COMP(x-1,y,z+1,BE);
velY = GET_COMP(x,y-1,z,N) + GET_COMP(x+1,y-1,z,NW) +
GET_COMP(x,y-1,z-1,TN) + GET_COMP(x,y-1,z+1,BN);
velZ = GET_COMP(x,y,z-1,T) + GET_COMP(x,y+1,z-1,TS) +
GET_COMP(x+1,y,z-1,TW);
const double rho =
GET_COMP(x,y,z,C) + GET_COMP(x,y+1,z,S) +
GET_COMP(x+1,y,z,W) + GET_COMP(x,y,z+1,B) +
GET_COMP(x+1,y+1,z,SW) + GET_COMP(x,y+1,z+1,BS) +
GET_COMP(x+1,y,z+1,BW) + velX + velY + velZ;
velX = velX
- GET_COMP(x+1,y,z,W) - GET_COMP(x+1,y-1,z,NW)
- GET_COMP(x+1,y+1,z,SW) - GET_COMP(x+1,y,z-1,TW)
- GET_COMP(x+1,y,z+1,BW);
velY = velY
+ GET_COMP(x-1,y-1,z,NE) - GET_COMP(x,y+1,z,S)
- GET_COMP(x+1,y+1,z,SW) - GET_COMP(x-1,y+1,z,SE)
- GET_COMP(x,y+1,z-1,TS) - GET_COMP(x,y+1,z+1,BS);
velZ = velZ+GET_COMP(x,y-1,z-1,TN) + GET_COMP(x-1,y,z-1,TE) - GET_COMP(x,y,z+1,B) - GET_COMP(x,y-1,z+1,BN) - GET_COMP(x,y+1,z+1,BS) - GET_COMP(x+1,y,z+1,BW) - GET_COMP(x-1,y,z+1,BE);
// density = rho;
// velocityX = velX;
// velocityY = velY;
// velocityZ = velZ;
const double dir_indep_trm = one_third*rho - 0.5*( velX*velX + velY*velY + velZ*velZ );
SET_COMP(C)=omega_trm * GET_COMP(x,y,z,C) + omega_w0*( dir_indep_trm );
SET_COMP(NW)=omega_trm * GET_COMP(x+1,y-1,z,NW) +
omega_w2*( dir_indep_trm - ( velX-velY ) + 1.5*SQR( velX-velY ) );
SET_COMP(SE)=omega_trm * GET_COMP(x-1,y+1,z,SE) +
omega_w2*( dir_indep_trm + ( velX-velY ) + 1.5*SQR( velX-velY ) );
SET_COMP(NE)=omega_trm * GET_COMP(x-1,y-1,z,NE) +
omega_w2*( dir_indep_trm + ( velX+velY ) + 1.5*SQR( velX+velY ) );
SET_COMP(SW)=omega_trm * GET_COMP(x+1,y+1,z,SW) +
omega_w2*( dir_indep_trm - ( velX+velY ) + 1.5*SQR( velX+velY ) );
SET_COMP(TW)=omega_trm * GET_COMP(x+1,y,z-1,TW) + omega_w2*( dir_indep_trm - ( velX-velZ ) + 1.5*SQR( velX-velZ ) );
SET_COMP(BE)=omega_trm * GET_COMP(x-1,y,z+1,BE) + omega_w2*( dir_indep_trm + ( velX-velZ ) + 1.5*SQR( velX-velZ ) );
SET_COMP(TE)=omega_trm * GET_COMP(x-1,y,z-1,TE) + omega_w2*( dir_indep_trm + ( velX+velZ ) + 1.5*SQR( velX+velZ ) );
SET_COMP(BW)=omega_trm * GET_COMP(x+1,y,z+1,BW) + omega_w2*( dir_indep_trm - ( velX+velZ ) + 1.5*SQR( velX+velZ ) );
SET_COMP(TS)=omega_trm * GET_COMP(x,y+1,z-1,TS) + omega_w2*( dir_indep_trm - ( velY-velZ ) + 1.5*SQR( velY-velZ ) );
SET_COMP(BN)=omega_trm * GET_COMP(x,y-1,z+1,BN) + omega_w2*( dir_indep_trm + ( velY-velZ ) + 1.5*SQR( velY-velZ ) );
SET_COMP(TN)=omega_trm * GET_COMP(x,y-1,z-1,TN) + omega_w2*( dir_indep_trm + ( velY+velZ ) + 1.5*SQR( velY+velZ ) );
SET_COMP(BS)=omega_trm * GET_COMP(x,y+1,z+1,BS) + omega_w2*( dir_indep_trm - ( velY+velZ ) + 1.5*SQR( velY+velZ ) );
SET_COMP(N)=omega_trm * GET_COMP(x,y-1,z,N) + omega_w1*( dir_indep_trm + velY + 1.5*SQR(velY));
SET_COMP(S)=omega_trm * GET_COMP(x,y+1,z,S) + omega_w1*( dir_indep_trm - velY + 1.5*SQR(velY));
SET_COMP(E)=omega_trm * GET_COMP(x-1,y,z,E) + omega_w1*( dir_indep_trm + velX + 1.5*SQR(velX));
SET_COMP(W)=omega_trm * GET_COMP(x+1,y,z,W) + omega_w1*( dir_indep_trm - velX + 1.5*SQR(velX));
SET_COMP(T)=omega_trm * GET_COMP(x,y,z-1,T) + omega_w1*( dir_indep_trm + velZ + 1.5*SQR(velZ));
SET_COMP(B)=omega_trm * GET_COMP(x,y,z+1,B) + omega_w1*( dir_indep_trm - velZ + 1.5*SQR(velZ));
}
}
#undef GET_COMP
#undef SET_COMP
#undef C
#undef N
#undef E
#undef W
#undef S
#undef T
#undef B
#undef NW
#undef SW
#undef NE
#undef SE
#undef TW
#undef BW
#undef TE
#undef BE
#undef TN
#undef BN
#undef TS
#undef BS
#define hoody(X, Y, Z) \
hoodOld[LibFlatArray::coord<X, Y, Z>()]
template<int DIM_X, int DIM_Y, int DIM_Z>
__global__ void updateRTMSoA(long dimX, long dimY, long dimZ, double *gridOld, double *gridNew)
{
long x = blockIdx.x * blockDim.x + threadIdx.x + 2;
long y = blockIdx.y * blockDim.y + threadIdx.y + 2;
long z = 2;
long index = z * DIM_X * DIM_Y + y * DIM_X + x;
long offset = DIM_X * DIM_Y;
long end = DIM_X * DIM_Y * (dimZ - 2);
LibFlatArray::soa_accessor_light<Cell, DIM_X, DIM_Y, DIM_Z, 0> hoodNew((char*)gridNew, index);
LibFlatArray::soa_accessor_light<Cell, DIM_X, DIM_Y, DIM_Z, 0> hoodOld((char*)gridOld, index);
double c0 = hoody(0, 0, -2).c();
double c1 = hoody(0, 0, -1).c();
double c2 = hoody(0, 0, 0).c();
double c3 = hoody(0, 0, 1).c();
#pragma unroll 10
for (; index < end; index += offset) {
double c4 = hoody(0, 0, 2).c();
hoodNew[LibFlatArray::coord<0, 0, 0>()].c() =
0.10 * c0 +
0.15 * c1 +
0.20 * c2 +
0.25 * c4 +
0.30 * hoody( 0, -2, 0).c() +
0.35 * hoody( 0, -1, 0).c() +
0.40 * hoody( 0, 1, 0).c() +
0.45 * hoody( 0, 2, 0).c() +
0.50 * hoody(-2, 0, 0).c() +
0.55 * hoody(-1, 0, 0).c() +
0.60 * hoody( 1, 0, 0).c() +
0.65 * hoody( 2, 0, 0).c();
c0 = c1;
c1 = c2;
c2 = c3;
c3 = c4;
}
}
#undef hoody
#define GET_COMP(X, Y, Z, DIR) \
hoodOld[FixedCoord<X, Y, Z>()].DIR()
#define SET_COMP(DIR) \
hoodNew.DIR()
template<int DIM_X, int DIM_Y, int DIM_Z>
__global__ void benchmarkLBMSoA(int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
long myX = blockIdx.x * blockDim.x + threadIdx.x + 2;
long myY = blockIdx.y * blockDim.y + threadIdx.y + 2;
long myZ = 2;
long index = myZ * DIM_X * DIM_Y + myY * DIM_X + myX;
long offset = DIM_X * DIM_Y;
long end = DIM_X * DIM_Y * (dimZ - 2);
long tempIndex = 0;
LibFlatArray::soa_accessor_light<CellLBM, DIM_X, DIM_Y, DIM_Z, 0> hoodNew((char*)gridNew, index);
LibFlatArray::soa_accessor_light<CellLBM, DIM_X, DIM_Y, DIM_Z, 0> hoodOldInternal((char*)gridOld, index);
FixedNeighborhood<
CellLBM,
DIM_X, DIM_Y, DIM_Z, 0,
LibFlatArray::soa_accessor_light, LibFlatArray::soa_accessor_light> hoodOld(
hoodOldInternal, tempIndex, -1, 1, -1, 1, -1, 1);
#pragma unroll 10
for (; index < end; index += offset) {
#define SQR(X) ((X)*(X))
const double omega = 1.0/1.7;
const double omega_trm = 1.0 - omega;
const double omega_w0 = 3.0 * 1.0 / 3.0 * omega;
const double omega_w1 = 3.0*1.0/18.0*omega;
const double omega_w2 = 3.0*1.0/36.0*omega;
const double one_third = 1.0 / 3.0;
const long x = 0;
const long y = 0;
const long z = 0;
double velX, velY, velZ;
velX =
GET_COMP(x-1,y,z,E) + GET_COMP(x-1,y-1,z,NE) +
GET_COMP(x-1,y+1,z,SE) + GET_COMP(x-1,y,z-1,TE) +
GET_COMP(x-1,y,z+1,BE);
velY = GET_COMP(x,y-1,z,N) + GET_COMP(x+1,y-1,z,NW) +
GET_COMP(x,y-1,z-1,TN) + GET_COMP(x,y-1,z+1,BN);
velZ = GET_COMP(x,y,z-1,T) + GET_COMP(x,y+1,z-1,TS) +
GET_COMP(x+1,y,z-1,TW);
const double rho =
GET_COMP(x,y,z,C) + GET_COMP(x,y+1,z,S) +
GET_COMP(x+1,y,z,W) + GET_COMP(x,y,z+1,B) +
GET_COMP(x+1,y+1,z,SW) + GET_COMP(x,y+1,z+1,BS) +
GET_COMP(x+1,y,z+1,BW) + velX + velY + velZ;
velX = velX
- GET_COMP(x+1,y,z,W) - GET_COMP(x+1,y-1,z,NW)
- GET_COMP(x+1,y+1,z,SW) - GET_COMP(x+1,y,z-1,TW)
- GET_COMP(x+1,y,z+1,BW);
velY = velY
+ GET_COMP(x-1,y-1,z,NE) - GET_COMP(x,y+1,z,S)
- GET_COMP(x+1,y+1,z,SW) - GET_COMP(x-1,y+1,z,SE)
- GET_COMP(x,y+1,z-1,TS) - GET_COMP(x,y+1,z+1,BS);
velZ = velZ+GET_COMP(x,y-1,z-1,TN) + GET_COMP(x-1,y,z-1,TE) - GET_COMP(x,y,z+1,B) - GET_COMP(x,y-1,z+1,BN) - GET_COMP(x,y+1,z+1,BS) - GET_COMP(x+1,y,z+1,BW) - GET_COMP(x-1,y,z+1,BE);
// density = rho;
// velocityX = velX;
// velocityY = velY;
// velocityZ = velZ;
const double dir_indep_trm = one_third*rho - 0.5*( velX*velX + velY*velY + velZ*velZ );
SET_COMP(C)=omega_trm * GET_COMP(x,y,z,C) + omega_w0*( dir_indep_trm );
SET_COMP(NW)=omega_trm * GET_COMP(x+1,y-1,z,NW) +
omega_w2*( dir_indep_trm - ( velX-velY ) + 1.5*SQR( velX-velY ) );
SET_COMP(SE)=omega_trm * GET_COMP(x-1,y+1,z,SE) +
omega_w2*( dir_indep_trm + ( velX-velY ) + 1.5*SQR( velX-velY ) );
SET_COMP(NE)=omega_trm * GET_COMP(x-1,y-1,z,NE) +
omega_w2*( dir_indep_trm + ( velX+velY ) + 1.5*SQR( velX+velY ) );
SET_COMP(SW)=omega_trm * GET_COMP(x+1,y+1,z,SW) +
omega_w2*( dir_indep_trm - ( velX+velY ) + 1.5*SQR( velX+velY ) );
SET_COMP(TW)=omega_trm * GET_COMP(x+1,y,z-1,TW) + omega_w2*( dir_indep_trm - ( velX-velZ ) + 1.5*SQR( velX-velZ ) );
SET_COMP(BE)=omega_trm * GET_COMP(x-1,y,z+1,BE) + omega_w2*( dir_indep_trm + ( velX-velZ ) + 1.5*SQR( velX-velZ ) );
SET_COMP(TE)=omega_trm * GET_COMP(x-1,y,z-1,TE) + omega_w2*( dir_indep_trm + ( velX+velZ ) + 1.5*SQR( velX+velZ ) );
SET_COMP(BW)=omega_trm * GET_COMP(x+1,y,z+1,BW) + omega_w2*( dir_indep_trm - ( velX+velZ ) + 1.5*SQR( velX+velZ ) );
SET_COMP(TS)=omega_trm * GET_COMP(x,y+1,z-1,TS) + omega_w2*( dir_indep_trm - ( velY-velZ ) + 1.5*SQR( velY-velZ ) );
SET_COMP(BN)=omega_trm * GET_COMP(x,y-1,z+1,BN) + omega_w2*( dir_indep_trm + ( velY-velZ ) + 1.5*SQR( velY-velZ ) );
SET_COMP(TN)=omega_trm * GET_COMP(x,y-1,z-1,TN) + omega_w2*( dir_indep_trm + ( velY+velZ ) + 1.5*SQR( velY+velZ ) );
SET_COMP(BS)=omega_trm * GET_COMP(x,y+1,z+1,BS) + omega_w2*( dir_indep_trm - ( velY+velZ ) + 1.5*SQR( velY+velZ ) );
SET_COMP(N)=omega_trm * GET_COMP(x,y-1,z,N) + omega_w1*( dir_indep_trm + velY + 1.5*SQR(velY));
SET_COMP(S)=omega_trm * GET_COMP(x,y+1,z,S) + omega_w1*( dir_indep_trm - velY + 1.5*SQR(velY));
SET_COMP(E)=omega_trm * GET_COMP(x-1,y,z,E) + omega_w1*( dir_indep_trm + velX + 1.5*SQR(velX));
SET_COMP(W)=omega_trm * GET_COMP(x+1,y,z,W) + omega_w1*( dir_indep_trm - velX + 1.5*SQR(velX));
SET_COMP(T)=omega_trm * GET_COMP(x,y,z-1,T) + omega_w1*( dir_indep_trm + velZ + 1.5*SQR(velZ));
SET_COMP(B)=omega_trm * GET_COMP(x,y,z+1,B) + omega_w1*( dir_indep_trm - velZ + 1.5*SQR(velZ));
}
}
#undef GET_COMP
#undef SET_COMP
template<int DIM_X, int DIM_Y, int DIM_Z>
class LBMSoA
{
public:
static std::string family()
{
return "LBM";
}
static std::string species()
{
return "gold";
}
static void run(dim3 dimGrid, dim3 dimBlock, int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
hipLaunchKernelGGL(( benchmarkLBMSoA<DIM_X, DIM_Y, DIM_Z>), dim3(dimGrid), dim3(dimBlock), 0, 0, dimX, dimY, dimZ, gridOld, gridNew);
}
static int size()
{
return 20;
}
};
template<int DIM_X, int DIM_Y, int DIM_Z>
class LBMClassic
{
public:
static std::string family()
{
return "LBM";
}
static std::string species()
{
return "pepper";
}
static void run(dim3 dimGrid, dim3 dimBlock, int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
hipLaunchKernelGGL(( updateLBMClassic<DIM_X, DIM_Y, DIM_Z>), dim3(dimGrid), dim3(dimBlock), 0, 0, dimX, dimY, dimZ, gridOld, gridNew);
}
static int size()
{
return 20;
}
};
template<int DIM_X, int DIM_Y, int DIM_Z>
class RTMSoA
{
public:
static std::string family()
{
return "RTM";
}
static std::string species()
{
return "gold";
}
static void run(dim3 dimGrid, dim3 dimBlock, int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
hipLaunchKernelGGL(( updateRTMSoA<DIM_X, DIM_Y, DIM_Z>), dim3(dimGrid), dim3(dimBlock), 0, 0, dimX, dimY, dimZ, gridOld, gridNew);
}
static int size()
{
return 1;
}
};
template<int DIM_X, int DIM_Y, int DIM_Z>
class RTMClassic
{
public:
static std::string family()
{
return "RTM";
}
static std::string species()
{
return "pepper";
}
static void run(dim3 dimGrid, dim3 dimBlock, int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
hipLaunchKernelGGL(( updateRTMClassic<DIM_X, DIM_Y, DIM_Z>), dim3(dimGrid), dim3(dimBlock), 0, 0, dimX, dimY, dimZ, gridOld, gridNew);
}
static int size()
{
return 1;
}
};
template<template<int A, int B, int C> class KERNEL, int DIM_X, int DIM_Y, int DIM_Z>
double benchmarkCUDA(int dimX, int dimY, int dimZ, int repeats)
{
using std::swap;
std::size_t size = DIM_X * DIM_Y * (DIM_Z + 4) * KERNEL<0, 0, 0>::size();
std::size_t bytesize = size * sizeof(double);
std::vector<double> grid(size, 4711);
double *devGridOld;
double *devGridNew;
hipMalloc(&devGridOld, bytesize);
hipMalloc(&devGridNew, bytesize);
CUDAUtil::checkForError();
hipMemcpy(devGridOld, &grid[0], bytesize, hipMemcpyHostToDevice);
hipMemcpy(devGridNew, &grid[0], bytesize, hipMemcpyHostToDevice);
int blockWidth = 1;
for (; blockWidth <= dimX; blockWidth *= 2) {
}
blockWidth /= 2;
blockWidth = (std::min)(256, blockWidth);
dim3 dimBlock(blockWidth, 2, 1);
dim3 dimGrid(dimX / dimBlock.x, dimY / dimBlock.y, 1);
hipDeviceSynchronize();
double seconds = 0;
{
ScopedTimer t(&seconds);
for (int t = 0; t < repeats; ++t) {
KERNEL<DIM_X, DIM_Y, DIM_Z>::run(dimGrid, dimBlock, dimX, dimY, dimZ, devGridOld, devGridNew);
swap(devGridOld, devGridNew);
}
hipDeviceSynchronize();
}
CUDAUtil::checkForError();
hipMemcpy(&grid[0], devGridNew, bytesize, hipMemcpyDeviceToHost);
hipFree(devGridOld);
hipFree(devGridNew);
double updates = 1.0 * dimGrid.x * dimBlock.x * dimGrid.y * dimBlock.y * dimZ * repeats;
double glups = 1e-9 * updates / seconds;
return glups;
}
template<template<int A, int B, int C> class KERNEL>
class BenchmarkCUDA : public GPUBenchmark
{
public:
std::string family()
{
return KERNEL<0, 0, 0>::family();
}
std::string species()
{
return KERNEL<0, 0, 0>::species();
}
std::string unit()
{
return "GLUPS";
}
double performance2(const Coord<3>& dim)
{
#define CASE(DIM, ADD) \
if ((max)(dim) <= DIM) { \
return benchmarkCUDA<KERNEL, DIM + ADD, DIM, DIM>( \
dim.x(), dim.y(), dim.z(), 20); \
}
CASE(32, 0);
CASE(64, 0);
CASE(96, 0);
CASE(128, 0);
CASE(160, 0);
CASE(192, 0);
CASE(256, 0);
CASE(288, 0);
CASE(320, 0);
CASE(352, 0);
CASE(384, 0);
CASE(416, 0);
CASE(448, 0);
CASE(480, 0);
CASE(512, 0);
CASE(544, 0);
#undef CASE
throw std::range_error("dim too large");
}
int (max)(const Coord<3>& coord) const
{
return (std::max)(coord.x(), (std::max)(coord.y(), coord.z()));
}
};
void cudaTests(std::string name, std::string revision, int cudaDevice)
{
hipSetDevice(cudaDevice);
LibFlatArray::evaluate eval(name, revision);
int increment = 4;
for (int d = 32; d <= 544; d += increment) {
eval(BenchmarkCUDA<RTMClassic>(), toVector(Coord<3>::diagonal(d)));
}
for (int d = 32; d <= 544; d += increment) {
eval(BenchmarkCUDA<RTMSoA>(), toVector(Coord<3>::diagonal(d)));
}
for (int d = 32; d <= 160; d += increment) {
Coord<3> dim(d, d, 256 + 32 - 4);
eval(BenchmarkCUDA<LBMClassic>(), toVector(Coord<3>::diagonal(d)));
}
for (int d = 32; d <= 160; d += increment) {
Coord<3> dim(d, d, 256 + 32 - 4);
eval(BenchmarkCUDA<LBMSoA>(), toVector(Coord<3>::diagonal(d)));
}
}
| 2f19377fde77c02f03812a0915b6bed4a0f80413.cu | #include <libgeodecomp/geometry/coord.h>
#include <libgeodecomp/misc/apitraits.h>
#include <libgeodecomp/misc/chronometer.h>
#include <libgeodecomp/misc/cudautil.h>
#include <libgeodecomp/storage/fixedneighborhood.h>
#include <libgeodecomp/storage/soagrid.h>
#include <libflatarray/testbed/gpu_benchmark.hpp>
#include <libflatarray/testbed/evaluate.hpp>
#include <cuda.h>
#include <iostream>
#include <stdexcept>
using namespace LibGeoDecomp;
class GPUBenchmark : public LibFlatArray::gpu_benchmark
{
public:
double performance(std::vector<int> dim)
{
Coord<3> c(dim[0], dim[1], dim[2]);
return performance2(c);
}
virtual double performance2(const Coord<3>& dim) = 0;
};
class Cell
{
public:
double c;
int a;
char b;
};
LIBFLATARRAY_REGISTER_SOA(Cell, ((double)(c))((int)(a))((char)(b)))
class CellLBM
{
public:
class API :
public APITraits::HasStencil<Stencils::Moore<3, 1> >,
public APITraits::HasCubeTopology<3>
{};
double C;
double N;
double E;
double W;
double S;
double T;
double B;
double NW;
double NE;
double SW;
double SE;
double TW;
double BW;
double TE;
double BE;
double TN;
double BN;
double TS;
double BS;
};
LIBFLATARRAY_REGISTER_SOA(CellLBM, ((double)(C))((double)(N))((double)(E))((double)(W))((double)(S))((double)(T))((double)(B))((double)(NW))((double)(SW))((double)(NE))((double)(SE))((double)(TW))((double)(BW))((double)(TE))((double)(BE))((double)(TN))((double)(BN))((double)(TS))((double)(BS)))
#define hoody(X, Y, Z) \
gridOld[z * dimX * dimY + y * dimX + x + X + Y * dimX + Z * dimX * dimY]
template<int DIM_X, int DIM_Y, int DIM_Z>
__global__ void updateRTMClassic(int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
int x = blockIdx.x * blockDim.x + threadIdx.x + 2;
int y = blockIdx.y * blockDim.y + threadIdx.y + 2;
int z = 2;
double c0 = hoody(0, 0, -2);
double c1 = hoody(0, 0, -1);
double c2 = hoody(0, 0, 0);
double c3 = hoody(0, 0, 1);
#pragma unroll 10
for (; z < (dimZ - 2); ++z) {
double c4 = hoody(0, 0, 2);
gridNew[z * dimX * dimY + y * dimX + x] =
0.10 * c0 +
0.15 * c1 +
0.20 * c2 +
0.25 * c4 +
0.30 * hoody( 0, -2, 0) +
0.35 * hoody( 0, -1, 0) +
0.40 * hoody( 0, 1, 0) +
0.45 * hoody( 0, 2, 0) +
0.50 * hoody(-2, 0, 0) +
0.55 * hoody(-1, 0, 0) +
0.60 * hoody( 1, 0, 0) +
0.65 * hoody( 2, 0, 0);
c0 = c1;
c1 = c2;
c2 = c3;
c3 = c4;
}
}
#undef hoody
#define C 0
#define N 1
#define E 2
#define W 3
#define S 4
#define T 5
#define B 6
#define NW 7
#define SW 8
#define NE 9
#define SE 10
#define TW 11
#define BW 12
#define TE 13
#define BE 14
#define TN 15
#define BN 16
#define TS 17
#define BS 18
#define GET_COMP(X, Y, Z, DIR) \
gridOld[(Z) * dimX * dimY + (Y) * dimX + (X) + (DIR) * dimX * dimY * dimZ]
#define SET_COMP(DIR) \
gridNew[z * dimX * dimY + y * dimX + x + (DIR) * dimX * dimY * dimZ]
template<int DIM_X, int DIM_Y, int DIM_Z>
__global__ void updateLBMClassic(int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
int x = blockIdx.x * blockDim.x + threadIdx.x + 2;
int y = blockIdx.y * blockDim.y + threadIdx.y + 2;
int z = 2;
#pragma unroll 10
for (; z < (dimZ - 2); z += 1) {
#define SQR(X) ((X)*(X))
const double omega = 1.0/1.7;
const double omega_trm = 1.0 - omega;
const double omega_w0 = 3.0 * 1.0 / 3.0 * omega;
const double omega_w1 = 3.0*1.0/18.0*omega;
const double omega_w2 = 3.0*1.0/36.0*omega;
const double one_third = 1.0 / 3.0;
double velX, velY, velZ;
velX =
GET_COMP(x-1,y,z,E) + GET_COMP(x-1,y-1,z,NE) +
GET_COMP(x-1,y+1,z,SE) + GET_COMP(x-1,y,z-1,TE) +
GET_COMP(x-1,y,z+1,BE);
velY = GET_COMP(x,y-1,z,N) + GET_COMP(x+1,y-1,z,NW) +
GET_COMP(x,y-1,z-1,TN) + GET_COMP(x,y-1,z+1,BN);
velZ = GET_COMP(x,y,z-1,T) + GET_COMP(x,y+1,z-1,TS) +
GET_COMP(x+1,y,z-1,TW);
const double rho =
GET_COMP(x,y,z,C) + GET_COMP(x,y+1,z,S) +
GET_COMP(x+1,y,z,W) + GET_COMP(x,y,z+1,B) +
GET_COMP(x+1,y+1,z,SW) + GET_COMP(x,y+1,z+1,BS) +
GET_COMP(x+1,y,z+1,BW) + velX + velY + velZ;
velX = velX
- GET_COMP(x+1,y,z,W) - GET_COMP(x+1,y-1,z,NW)
- GET_COMP(x+1,y+1,z,SW) - GET_COMP(x+1,y,z-1,TW)
- GET_COMP(x+1,y,z+1,BW);
velY = velY
+ GET_COMP(x-1,y-1,z,NE) - GET_COMP(x,y+1,z,S)
- GET_COMP(x+1,y+1,z,SW) - GET_COMP(x-1,y+1,z,SE)
- GET_COMP(x,y+1,z-1,TS) - GET_COMP(x,y+1,z+1,BS);
velZ = velZ+GET_COMP(x,y-1,z-1,TN) + GET_COMP(x-1,y,z-1,TE) - GET_COMP(x,y,z+1,B) - GET_COMP(x,y-1,z+1,BN) - GET_COMP(x,y+1,z+1,BS) - GET_COMP(x+1,y,z+1,BW) - GET_COMP(x-1,y,z+1,BE);
// density = rho;
// velocityX = velX;
// velocityY = velY;
// velocityZ = velZ;
const double dir_indep_trm = one_third*rho - 0.5*( velX*velX + velY*velY + velZ*velZ );
SET_COMP(C)=omega_trm * GET_COMP(x,y,z,C) + omega_w0*( dir_indep_trm );
SET_COMP(NW)=omega_trm * GET_COMP(x+1,y-1,z,NW) +
omega_w2*( dir_indep_trm - ( velX-velY ) + 1.5*SQR( velX-velY ) );
SET_COMP(SE)=omega_trm * GET_COMP(x-1,y+1,z,SE) +
omega_w2*( dir_indep_trm + ( velX-velY ) + 1.5*SQR( velX-velY ) );
SET_COMP(NE)=omega_trm * GET_COMP(x-1,y-1,z,NE) +
omega_w2*( dir_indep_trm + ( velX+velY ) + 1.5*SQR( velX+velY ) );
SET_COMP(SW)=omega_trm * GET_COMP(x+1,y+1,z,SW) +
omega_w2*( dir_indep_trm - ( velX+velY ) + 1.5*SQR( velX+velY ) );
SET_COMP(TW)=omega_trm * GET_COMP(x+1,y,z-1,TW) + omega_w2*( dir_indep_trm - ( velX-velZ ) + 1.5*SQR( velX-velZ ) );
SET_COMP(BE)=omega_trm * GET_COMP(x-1,y,z+1,BE) + omega_w2*( dir_indep_trm + ( velX-velZ ) + 1.5*SQR( velX-velZ ) );
SET_COMP(TE)=omega_trm * GET_COMP(x-1,y,z-1,TE) + omega_w2*( dir_indep_trm + ( velX+velZ ) + 1.5*SQR( velX+velZ ) );
SET_COMP(BW)=omega_trm * GET_COMP(x+1,y,z+1,BW) + omega_w2*( dir_indep_trm - ( velX+velZ ) + 1.5*SQR( velX+velZ ) );
SET_COMP(TS)=omega_trm * GET_COMP(x,y+1,z-1,TS) + omega_w2*( dir_indep_trm - ( velY-velZ ) + 1.5*SQR( velY-velZ ) );
SET_COMP(BN)=omega_trm * GET_COMP(x,y-1,z+1,BN) + omega_w2*( dir_indep_trm + ( velY-velZ ) + 1.5*SQR( velY-velZ ) );
SET_COMP(TN)=omega_trm * GET_COMP(x,y-1,z-1,TN) + omega_w2*( dir_indep_trm + ( velY+velZ ) + 1.5*SQR( velY+velZ ) );
SET_COMP(BS)=omega_trm * GET_COMP(x,y+1,z+1,BS) + omega_w2*( dir_indep_trm - ( velY+velZ ) + 1.5*SQR( velY+velZ ) );
SET_COMP(N)=omega_trm * GET_COMP(x,y-1,z,N) + omega_w1*( dir_indep_trm + velY + 1.5*SQR(velY));
SET_COMP(S)=omega_trm * GET_COMP(x,y+1,z,S) + omega_w1*( dir_indep_trm - velY + 1.5*SQR(velY));
SET_COMP(E)=omega_trm * GET_COMP(x-1,y,z,E) + omega_w1*( dir_indep_trm + velX + 1.5*SQR(velX));
SET_COMP(W)=omega_trm * GET_COMP(x+1,y,z,W) + omega_w1*( dir_indep_trm - velX + 1.5*SQR(velX));
SET_COMP(T)=omega_trm * GET_COMP(x,y,z-1,T) + omega_w1*( dir_indep_trm + velZ + 1.5*SQR(velZ));
SET_COMP(B)=omega_trm * GET_COMP(x,y,z+1,B) + omega_w1*( dir_indep_trm - velZ + 1.5*SQR(velZ));
}
}
#undef GET_COMP
#undef SET_COMP
#undef C
#undef N
#undef E
#undef W
#undef S
#undef T
#undef B
#undef NW
#undef SW
#undef NE
#undef SE
#undef TW
#undef BW
#undef TE
#undef BE
#undef TN
#undef BN
#undef TS
#undef BS
#define hoody(X, Y, Z) \
hoodOld[LibFlatArray::coord<X, Y, Z>()]
template<int DIM_X, int DIM_Y, int DIM_Z>
__global__ void updateRTMSoA(long dimX, long dimY, long dimZ, double *gridOld, double *gridNew)
{
long x = blockIdx.x * blockDim.x + threadIdx.x + 2;
long y = blockIdx.y * blockDim.y + threadIdx.y + 2;
long z = 2;
long index = z * DIM_X * DIM_Y + y * DIM_X + x;
long offset = DIM_X * DIM_Y;
long end = DIM_X * DIM_Y * (dimZ - 2);
LibFlatArray::soa_accessor_light<Cell, DIM_X, DIM_Y, DIM_Z, 0> hoodNew((char*)gridNew, index);
LibFlatArray::soa_accessor_light<Cell, DIM_X, DIM_Y, DIM_Z, 0> hoodOld((char*)gridOld, index);
double c0 = hoody(0, 0, -2).c();
double c1 = hoody(0, 0, -1).c();
double c2 = hoody(0, 0, 0).c();
double c3 = hoody(0, 0, 1).c();
#pragma unroll 10
for (; index < end; index += offset) {
double c4 = hoody(0, 0, 2).c();
hoodNew[LibFlatArray::coord<0, 0, 0>()].c() =
0.10 * c0 +
0.15 * c1 +
0.20 * c2 +
0.25 * c4 +
0.30 * hoody( 0, -2, 0).c() +
0.35 * hoody( 0, -1, 0).c() +
0.40 * hoody( 0, 1, 0).c() +
0.45 * hoody( 0, 2, 0).c() +
0.50 * hoody(-2, 0, 0).c() +
0.55 * hoody(-1, 0, 0).c() +
0.60 * hoody( 1, 0, 0).c() +
0.65 * hoody( 2, 0, 0).c();
c0 = c1;
c1 = c2;
c2 = c3;
c3 = c4;
}
}
#undef hoody
#define GET_COMP(X, Y, Z, DIR) \
hoodOld[FixedCoord<X, Y, Z>()].DIR()
#define SET_COMP(DIR) \
hoodNew.DIR()
template<int DIM_X, int DIM_Y, int DIM_Z>
__global__ void benchmarkLBMSoA(int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
long myX = blockIdx.x * blockDim.x + threadIdx.x + 2;
long myY = blockIdx.y * blockDim.y + threadIdx.y + 2;
long myZ = 2;
long index = myZ * DIM_X * DIM_Y + myY * DIM_X + myX;
long offset = DIM_X * DIM_Y;
long end = DIM_X * DIM_Y * (dimZ - 2);
long tempIndex = 0;
LibFlatArray::soa_accessor_light<CellLBM, DIM_X, DIM_Y, DIM_Z, 0> hoodNew((char*)gridNew, index);
LibFlatArray::soa_accessor_light<CellLBM, DIM_X, DIM_Y, DIM_Z, 0> hoodOldInternal((char*)gridOld, index);
FixedNeighborhood<
CellLBM,
DIM_X, DIM_Y, DIM_Z, 0,
LibFlatArray::soa_accessor_light, LibFlatArray::soa_accessor_light> hoodOld(
hoodOldInternal, tempIndex, -1, 1, -1, 1, -1, 1);
#pragma unroll 10
for (; index < end; index += offset) {
#define SQR(X) ((X)*(X))
const double omega = 1.0/1.7;
const double omega_trm = 1.0 - omega;
const double omega_w0 = 3.0 * 1.0 / 3.0 * omega;
const double omega_w1 = 3.0*1.0/18.0*omega;
const double omega_w2 = 3.0*1.0/36.0*omega;
const double one_third = 1.0 / 3.0;
const long x = 0;
const long y = 0;
const long z = 0;
double velX, velY, velZ;
velX =
GET_COMP(x-1,y,z,E) + GET_COMP(x-1,y-1,z,NE) +
GET_COMP(x-1,y+1,z,SE) + GET_COMP(x-1,y,z-1,TE) +
GET_COMP(x-1,y,z+1,BE);
velY = GET_COMP(x,y-1,z,N) + GET_COMP(x+1,y-1,z,NW) +
GET_COMP(x,y-1,z-1,TN) + GET_COMP(x,y-1,z+1,BN);
velZ = GET_COMP(x,y,z-1,T) + GET_COMP(x,y+1,z-1,TS) +
GET_COMP(x+1,y,z-1,TW);
const double rho =
GET_COMP(x,y,z,C) + GET_COMP(x,y+1,z,S) +
GET_COMP(x+1,y,z,W) + GET_COMP(x,y,z+1,B) +
GET_COMP(x+1,y+1,z,SW) + GET_COMP(x,y+1,z+1,BS) +
GET_COMP(x+1,y,z+1,BW) + velX + velY + velZ;
velX = velX
- GET_COMP(x+1,y,z,W) - GET_COMP(x+1,y-1,z,NW)
- GET_COMP(x+1,y+1,z,SW) - GET_COMP(x+1,y,z-1,TW)
- GET_COMP(x+1,y,z+1,BW);
velY = velY
+ GET_COMP(x-1,y-1,z,NE) - GET_COMP(x,y+1,z,S)
- GET_COMP(x+1,y+1,z,SW) - GET_COMP(x-1,y+1,z,SE)
- GET_COMP(x,y+1,z-1,TS) - GET_COMP(x,y+1,z+1,BS);
velZ = velZ+GET_COMP(x,y-1,z-1,TN) + GET_COMP(x-1,y,z-1,TE) - GET_COMP(x,y,z+1,B) - GET_COMP(x,y-1,z+1,BN) - GET_COMP(x,y+1,z+1,BS) - GET_COMP(x+1,y,z+1,BW) - GET_COMP(x-1,y,z+1,BE);
// density = rho;
// velocityX = velX;
// velocityY = velY;
// velocityZ = velZ;
const double dir_indep_trm = one_third*rho - 0.5*( velX*velX + velY*velY + velZ*velZ );
SET_COMP(C)=omega_trm * GET_COMP(x,y,z,C) + omega_w0*( dir_indep_trm );
SET_COMP(NW)=omega_trm * GET_COMP(x+1,y-1,z,NW) +
omega_w2*( dir_indep_trm - ( velX-velY ) + 1.5*SQR( velX-velY ) );
SET_COMP(SE)=omega_trm * GET_COMP(x-1,y+1,z,SE) +
omega_w2*( dir_indep_trm + ( velX-velY ) + 1.5*SQR( velX-velY ) );
SET_COMP(NE)=omega_trm * GET_COMP(x-1,y-1,z,NE) +
omega_w2*( dir_indep_trm + ( velX+velY ) + 1.5*SQR( velX+velY ) );
SET_COMP(SW)=omega_trm * GET_COMP(x+1,y+1,z,SW) +
omega_w2*( dir_indep_trm - ( velX+velY ) + 1.5*SQR( velX+velY ) );
SET_COMP(TW)=omega_trm * GET_COMP(x+1,y,z-1,TW) + omega_w2*( dir_indep_trm - ( velX-velZ ) + 1.5*SQR( velX-velZ ) );
SET_COMP(BE)=omega_trm * GET_COMP(x-1,y,z+1,BE) + omega_w2*( dir_indep_trm + ( velX-velZ ) + 1.5*SQR( velX-velZ ) );
SET_COMP(TE)=omega_trm * GET_COMP(x-1,y,z-1,TE) + omega_w2*( dir_indep_trm + ( velX+velZ ) + 1.5*SQR( velX+velZ ) );
SET_COMP(BW)=omega_trm * GET_COMP(x+1,y,z+1,BW) + omega_w2*( dir_indep_trm - ( velX+velZ ) + 1.5*SQR( velX+velZ ) );
SET_COMP(TS)=omega_trm * GET_COMP(x,y+1,z-1,TS) + omega_w2*( dir_indep_trm - ( velY-velZ ) + 1.5*SQR( velY-velZ ) );
SET_COMP(BN)=omega_trm * GET_COMP(x,y-1,z+1,BN) + omega_w2*( dir_indep_trm + ( velY-velZ ) + 1.5*SQR( velY-velZ ) );
SET_COMP(TN)=omega_trm * GET_COMP(x,y-1,z-1,TN) + omega_w2*( dir_indep_trm + ( velY+velZ ) + 1.5*SQR( velY+velZ ) );
SET_COMP(BS)=omega_trm * GET_COMP(x,y+1,z+1,BS) + omega_w2*( dir_indep_trm - ( velY+velZ ) + 1.5*SQR( velY+velZ ) );
SET_COMP(N)=omega_trm * GET_COMP(x,y-1,z,N) + omega_w1*( dir_indep_trm + velY + 1.5*SQR(velY));
SET_COMP(S)=omega_trm * GET_COMP(x,y+1,z,S) + omega_w1*( dir_indep_trm - velY + 1.5*SQR(velY));
SET_COMP(E)=omega_trm * GET_COMP(x-1,y,z,E) + omega_w1*( dir_indep_trm + velX + 1.5*SQR(velX));
SET_COMP(W)=omega_trm * GET_COMP(x+1,y,z,W) + omega_w1*( dir_indep_trm - velX + 1.5*SQR(velX));
SET_COMP(T)=omega_trm * GET_COMP(x,y,z-1,T) + omega_w1*( dir_indep_trm + velZ + 1.5*SQR(velZ));
SET_COMP(B)=omega_trm * GET_COMP(x,y,z+1,B) + omega_w1*( dir_indep_trm - velZ + 1.5*SQR(velZ));
}
}
#undef GET_COMP
#undef SET_COMP
template<int DIM_X, int DIM_Y, int DIM_Z>
class LBMSoA
{
public:
static std::string family()
{
return "LBM";
}
static std::string species()
{
return "gold";
}
static void run(dim3 dimGrid, dim3 dimBlock, int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
benchmarkLBMSoA<DIM_X, DIM_Y, DIM_Z><<<dimGrid, dimBlock>>>(dimX, dimY, dimZ, gridOld, gridNew);
}
static int size()
{
return 20;
}
};
template<int DIM_X, int DIM_Y, int DIM_Z>
class LBMClassic
{
public:
static std::string family()
{
return "LBM";
}
static std::string species()
{
return "pepper";
}
static void run(dim3 dimGrid, dim3 dimBlock, int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
updateLBMClassic<DIM_X, DIM_Y, DIM_Z><<<dimGrid, dimBlock>>>(dimX, dimY, dimZ, gridOld, gridNew);
}
static int size()
{
return 20;
}
};
template<int DIM_X, int DIM_Y, int DIM_Z>
class RTMSoA
{
public:
static std::string family()
{
return "RTM";
}
static std::string species()
{
return "gold";
}
static void run(dim3 dimGrid, dim3 dimBlock, int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
updateRTMSoA<DIM_X, DIM_Y, DIM_Z><<<dimGrid, dimBlock>>>(dimX, dimY, dimZ, gridOld, gridNew);
}
static int size()
{
return 1;
}
};
template<int DIM_X, int DIM_Y, int DIM_Z>
class RTMClassic
{
public:
static std::string family()
{
return "RTM";
}
static std::string species()
{
return "pepper";
}
static void run(dim3 dimGrid, dim3 dimBlock, int dimX, int dimY, int dimZ, double *gridOld, double *gridNew)
{
updateRTMClassic<DIM_X, DIM_Y, DIM_Z><<<dimGrid, dimBlock>>>(dimX, dimY, dimZ, gridOld, gridNew);
}
static int size()
{
return 1;
}
};
template<template<int A, int B, int C> class KERNEL, int DIM_X, int DIM_Y, int DIM_Z>
double benchmarkCUDA(int dimX, int dimY, int dimZ, int repeats)
{
using std::swap;
std::size_t size = DIM_X * DIM_Y * (DIM_Z + 4) * KERNEL<0, 0, 0>::size();
std::size_t bytesize = size * sizeof(double);
std::vector<double> grid(size, 4711);
double *devGridOld;
double *devGridNew;
cudaMalloc(&devGridOld, bytesize);
cudaMalloc(&devGridNew, bytesize);
CUDAUtil::checkForError();
cudaMemcpy(devGridOld, &grid[0], bytesize, cudaMemcpyHostToDevice);
cudaMemcpy(devGridNew, &grid[0], bytesize, cudaMemcpyHostToDevice);
int blockWidth = 1;
for (; blockWidth <= dimX; blockWidth *= 2) {
}
blockWidth /= 2;
blockWidth = (std::min)(256, blockWidth);
dim3 dimBlock(blockWidth, 2, 1);
dim3 dimGrid(dimX / dimBlock.x, dimY / dimBlock.y, 1);
cudaDeviceSynchronize();
double seconds = 0;
{
ScopedTimer t(&seconds);
for (int t = 0; t < repeats; ++t) {
KERNEL<DIM_X, DIM_Y, DIM_Z>::run(dimGrid, dimBlock, dimX, dimY, dimZ, devGridOld, devGridNew);
swap(devGridOld, devGridNew);
}
cudaDeviceSynchronize();
}
CUDAUtil::checkForError();
cudaMemcpy(&grid[0], devGridNew, bytesize, cudaMemcpyDeviceToHost);
cudaFree(devGridOld);
cudaFree(devGridNew);
double updates = 1.0 * dimGrid.x * dimBlock.x * dimGrid.y * dimBlock.y * dimZ * repeats;
double glups = 1e-9 * updates / seconds;
return glups;
}
template<template<int A, int B, int C> class KERNEL>
class BenchmarkCUDA : public GPUBenchmark
{
public:
std::string family()
{
return KERNEL<0, 0, 0>::family();
}
std::string species()
{
return KERNEL<0, 0, 0>::species();
}
std::string unit()
{
return "GLUPS";
}
double performance2(const Coord<3>& dim)
{
#define CASE(DIM, ADD) \
if ((max)(dim) <= DIM) { \
return benchmarkCUDA<KERNEL, DIM + ADD, DIM, DIM>( \
dim.x(), dim.y(), dim.z(), 20); \
}
CASE(32, 0);
CASE(64, 0);
CASE(96, 0);
CASE(128, 0);
CASE(160, 0);
CASE(192, 0);
CASE(256, 0);
CASE(288, 0);
CASE(320, 0);
CASE(352, 0);
CASE(384, 0);
CASE(416, 0);
CASE(448, 0);
CASE(480, 0);
CASE(512, 0);
CASE(544, 0);
#undef CASE
throw std::range_error("dim too large");
}
int (max)(const Coord<3>& coord) const
{
return (std::max)(coord.x(), (std::max)(coord.y(), coord.z()));
}
};
void cudaTests(std::string name, std::string revision, int cudaDevice)
{
cudaSetDevice(cudaDevice);
LibFlatArray::evaluate eval(name, revision);
int increment = 4;
for (int d = 32; d <= 544; d += increment) {
eval(BenchmarkCUDA<RTMClassic>(), toVector(Coord<3>::diagonal(d)));
}
for (int d = 32; d <= 544; d += increment) {
eval(BenchmarkCUDA<RTMSoA>(), toVector(Coord<3>::diagonal(d)));
}
for (int d = 32; d <= 160; d += increment) {
Coord<3> dim(d, d, 256 + 32 - 4);
eval(BenchmarkCUDA<LBMClassic>(), toVector(Coord<3>::diagonal(d)));
}
for (int d = 32; d <= 160; d += increment) {
Coord<3> dim(d, d, 256 + 32 - 4);
eval(BenchmarkCUDA<LBMSoA>(), toVector(Coord<3>::diagonal(d)));
}
}
|
26b921cd7ccb70f40196a9b2ce28132b974cccdf.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include "kernels/cuda_helpers.h"
#include "kernels/tensor_operators.h"
#include "tensors/tensor.h"
namespace marian {
__global__ void gFill(float *d_in, int size, float val) {
for(int bid = 0; bid < size; bid += blockDim.x * gridDim.x) {
int index = bid + threadIdx.x + blockDim.x * blockIdx.x;
if(index < size) {
d_in[index] = val;
}
}
}
float TensorBase::get(size_t i) {
hipSetDevice(device_);
float temp;
CUDA_CHECK(
hipMemcpy(&temp, data() + i, sizeof(float), hipMemcpyDeviceToHost));
hipStreamSynchronize(0);
return temp;
}
void TensorBase::set(size_t i, float value) {
hipSetDevice(device_);
CUDA_CHECK(
hipMemcpy(data() + i, &value, sizeof(float), hipMemcpyHostToDevice));
hipStreamSynchronize(0);
}
void TensorBase::get(std::vector<float> &v) {
CUDA_CHECK(hipSetDevice(device_));
v.resize(size());
CUDA_CHECK(hipMemcpy(
v.data(), data(), size() * sizeof(float), hipMemcpyDeviceToHost));
hipStreamSynchronize(0);
}
void TensorBase::set(float value) {
hipSetDevice(device_);
int threads = ::min(512, (int)size());
int blocks = (size() / threads) + (size() % threads != 0);
hipLaunchKernelGGL(( gFill), dim3(blocks), dim3(threads), 0, 0, data(), size(), value);
hipStreamSynchronize(0);
}
void TensorBase::set(const std::vector<float> &v) {
CUDA_CHECK(hipSetDevice(device_));
CUDA_CHECK(hipMemcpy(
data(), v.data(), v.size() * sizeof(float), hipMemcpyHostToDevice));
hipStreamSynchronize(0);
}
void TensorBase::setSparse(const std::vector<size_t> &k,
const std::vector<float> &v) {
hipSetDevice(device_);
SetSparse(data(), k, v);
hipStreamSynchronize(0);
}
void TensorBase::copyFrom(Tensor in) {
hipSetDevice(device_);
CUDA_CHECK(hipMemcpy(data(),
(float *)in->data(),
in->size() * sizeof(float),
hipMemcpyDefault));
hipStreamSynchronize(0);
}
std::string TensorBase::debug() {
hipSetDevice(device_);
std::stringstream strm;
assert(shape_.size());
strm << shape_;
strm << " device=" << device_;
strm << " ptr=" << (size_t)memory_->data();
strm << " bytes=" << memory_->size();
strm << std::endl;
// values
size_t totSize = shape_.elements();
std::vector<float> values(totSize);
get(values);
size_t dispCols = 5;
strm << std::fixed << std::setprecision(8) << std::setfill(' ');
for(size_t l = 0; l < shape()[3]; ++l) {
for(size_t k = 0; k < shape()[2]; ++k) {
strm << "[ ";
if(shape()[0] > 10) {
for(size_t i = 0; i < shape()[0] && i < dispCols; ++i) {
if(i > 0)
strm << std::endl << " ";
float sum = 0;
for(size_t j = 0; j < shape()[1]; ++j)
sum += values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)];
strm << std::setw(12) << sum << " | ";
for(size_t j = 0; j < shape()[1] && j < dispCols; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
if(shape()[1] > dispCols)
strm << "... ";
for(size_t j = shape()[1] - dispCols; j < shape()[1]; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
}
strm << std::endl << " ...";
for(size_t i = shape()[0] - dispCols; i < shape()[0]; ++i) {
if(i > 0)
strm << std::endl << " ";
float sum = 0;
for(size_t j = 0; j < shape()[1]; ++j)
sum += values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)];
strm << std::setw(12) << sum << " | ";
for(size_t j = 0; j < shape()[1] && j < dispCols; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
if(shape()[1] > dispCols)
strm << "... ";
for(size_t j = shape()[1] - dispCols; j < shape()[1]; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
}
} else {
for(size_t i = 0; i < shape()[0] && i < 10; ++i) {
if(i > 0)
strm << std::endl << " ";
float sum = 0;
for(size_t j = 0; j < shape()[1]; ++j)
sum += values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)];
strm << std::setw(12) << sum << " | ";
for(size_t j = 0; j < shape()[1] && j < dispCols; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
if(shape()[1] > dispCols)
strm << "... ";
for(size_t j = shape()[1] - dispCols; j < shape()[1]; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
}
}
strm << "]" << std::endl;
}
}
return strm.str();
}
Tensor operator<<(Tensor t, const std::vector<float> &v) {
t->set(v);
return t;
}
Tensor operator>>(Tensor t, std::vector<float> &v) {
t->get(v);
return t;
}
}
| 26b921cd7ccb70f40196a9b2ce28132b974cccdf.cu |
#include <cuda.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include "kernels/cuda_helpers.h"
#include "kernels/tensor_operators.h"
#include "tensors/tensor.h"
namespace marian {
__global__ void gFill(float *d_in, int size, float val) {
for(int bid = 0; bid < size; bid += blockDim.x * gridDim.x) {
int index = bid + threadIdx.x + blockDim.x * blockIdx.x;
if(index < size) {
d_in[index] = val;
}
}
}
float TensorBase::get(size_t i) {
cudaSetDevice(device_);
float temp;
CUDA_CHECK(
cudaMemcpy(&temp, data() + i, sizeof(float), cudaMemcpyDeviceToHost));
cudaStreamSynchronize(0);
return temp;
}
void TensorBase::set(size_t i, float value) {
cudaSetDevice(device_);
CUDA_CHECK(
cudaMemcpy(data() + i, &value, sizeof(float), cudaMemcpyHostToDevice));
cudaStreamSynchronize(0);
}
void TensorBase::get(std::vector<float> &v) {
CUDA_CHECK(cudaSetDevice(device_));
v.resize(size());
CUDA_CHECK(cudaMemcpy(
v.data(), data(), size() * sizeof(float), cudaMemcpyDeviceToHost));
cudaStreamSynchronize(0);
}
void TensorBase::set(float value) {
cudaSetDevice(device_);
int threads = std::min(512, (int)size());
int blocks = (size() / threads) + (size() % threads != 0);
gFill<<<blocks, threads>>>(data(), size(), value);
cudaStreamSynchronize(0);
}
void TensorBase::set(const std::vector<float> &v) {
CUDA_CHECK(cudaSetDevice(device_));
CUDA_CHECK(cudaMemcpy(
data(), v.data(), v.size() * sizeof(float), cudaMemcpyHostToDevice));
cudaStreamSynchronize(0);
}
void TensorBase::setSparse(const std::vector<size_t> &k,
const std::vector<float> &v) {
cudaSetDevice(device_);
SetSparse(data(), k, v);
cudaStreamSynchronize(0);
}
void TensorBase::copyFrom(Tensor in) {
cudaSetDevice(device_);
CUDA_CHECK(cudaMemcpy(data(),
(float *)in->data(),
in->size() * sizeof(float),
cudaMemcpyDefault));
cudaStreamSynchronize(0);
}
std::string TensorBase::debug() {
cudaSetDevice(device_);
std::stringstream strm;
assert(shape_.size());
strm << shape_;
strm << " device=" << device_;
strm << " ptr=" << (size_t)memory_->data();
strm << " bytes=" << memory_->size();
strm << std::endl;
// values
size_t totSize = shape_.elements();
std::vector<float> values(totSize);
get(values);
size_t dispCols = 5;
strm << std::fixed << std::setprecision(8) << std::setfill(' ');
for(size_t l = 0; l < shape()[3]; ++l) {
for(size_t k = 0; k < shape()[2]; ++k) {
strm << "[ ";
if(shape()[0] > 10) {
for(size_t i = 0; i < shape()[0] && i < dispCols; ++i) {
if(i > 0)
strm << std::endl << " ";
float sum = 0;
for(size_t j = 0; j < shape()[1]; ++j)
sum += values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)];
strm << std::setw(12) << sum << " | ";
for(size_t j = 0; j < shape()[1] && j < dispCols; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
if(shape()[1] > dispCols)
strm << "... ";
for(size_t j = shape()[1] - dispCols; j < shape()[1]; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
}
strm << std::endl << " ...";
for(size_t i = shape()[0] - dispCols; i < shape()[0]; ++i) {
if(i > 0)
strm << std::endl << " ";
float sum = 0;
for(size_t j = 0; j < shape()[1]; ++j)
sum += values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)];
strm << std::setw(12) << sum << " | ";
for(size_t j = 0; j < shape()[1] && j < dispCols; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
if(shape()[1] > dispCols)
strm << "... ";
for(size_t j = shape()[1] - dispCols; j < shape()[1]; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
}
} else {
for(size_t i = 0; i < shape()[0] && i < 10; ++i) {
if(i > 0)
strm << std::endl << " ";
float sum = 0;
for(size_t j = 0; j < shape()[1]; ++j)
sum += values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)];
strm << std::setw(12) << sum << " | ";
for(size_t j = 0; j < shape()[1] && j < dispCols; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
if(shape()[1] > dispCols)
strm << "... ";
for(size_t j = shape()[1] - dispCols; j < shape()[1]; ++j) {
strm << std::setw(12)
<< values[i * shape().stride(0) + j * shape().stride(1)
+ k * shape().stride(2)
+ l * shape().stride(3)]
<< " ";
}
}
}
strm << "]" << std::endl;
}
}
return strm.str();
}
Tensor operator<<(Tensor t, const std::vector<float> &v) {
t->set(v);
return t;
}
Tensor operator>>(Tensor t, std::vector<float> &v) {
t->get(v);
return t;
}
}
|
a419eb8052c60553324a6dfd6d535569d9858647.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <random>
#include <string>
#include <vector>
#include "dali/core/dev_buffer.h"
#include "dali/core/format.h"
#include "dali/core/math_util.h"
#include "dali/core/tensor_shape_print.h"
#include "dali/kernels/imgproc/resample/separable.h"
#include "dali/kernels/test/test_data.h"
#include "dali/kernels/scratch.h"
#include "dali/kernels/imgproc/resample.h"
#include "dali/kernels/imgproc/resample_cpu.h"
#include "dali/kernels/test/resampling_test/resampling_test_params.h"
#include "dali/test/cv_mat_utils.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/test_tensors.h"
using std::cout;
using std::endl;
namespace dali {
namespace kernels {
namespace resample_test {
static constexpr int kMaxChannels = 16;
struct Bubble {
vec3 centre;
float color[kMaxChannels];
float frequency;
float decay;
};
template <typename T>
__global__ void DrawBubblesKernel(T *data, ivec3 size, int nch,
const Bubble *bubbles, int nbubbles) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= size.x || y >= size.y || z >= size.z)
return;
T *pixel = &data[nch * (x + size.x * (y + size.y * z))];
vec3 pos(x + 0.5f, y + 0.5f, z + 0.5f);
float color[kMaxChannels] = { 0 };
for (int i = 0; i < nbubbles; i++) {
float dsq = (bubbles[i].centre - pos).length_square();
float d = dsq*rsqrt(dsq);
float magnitude = expf(bubbles[i].decay * dsq);
float phase = bubbles[i].frequency * d;
for (int c = 0; c < nch; c++)
color[c] += bubbles[i].color[c] * (1 + cos(phase)) * magnitude * 0.5f;
}
for (int c = 0; c < nch; c++)
pixel[c] = ConvertSatNorm<T>(color[c]);
}
template <typename T>
struct TestDataGenerator {
DeviceBuffer<Bubble> gpu_bubbles;
template <int ndim>
void DrawBubbles(const TensorView<StorageGPU, T, ndim> &tensor, span<const Bubble> bubbles,
hipStream_t stream) {
static_assert(ndim == 4 || ndim == DynamicDimensions, "Tensor must be 4D or dynamic (and 4D)");
assert(tensor.dim() == 4 && "Tensor must be 4D");
gpu_bubbles.from_host(bubbles.data(), bubbles.size(), stream);
ivec3 size(tensor.shape[2], tensor.shape[1], tensor.shape[0]);
int nch = tensor.shape[3];
assert(tensor.shape[3] <= kMaxChannels);
dim3 block(32, 32, 1);
dim3 grid(div_ceil(size.x, 32), div_ceil(size.y, 32), size.z);
hipLaunchKernelGGL(( DrawBubblesKernel), dim3(grid), dim3(block), 0, stream, tensor.data, size, nch,
gpu_bubbles, bubbles.size());
}
template <int ndim>
void GenerateTestData(const TensorView<StorageGPU, T, ndim> &tensor, int num_bubbles = 5,
hipStream_t stream = 0) {
static_assert(ndim == 4 || ndim == DynamicDimensions, "Tensor must be 4D or dynamic (and 4D)");
assert(tensor.dim() == 4 && "Tensor must be 4D");
std::mt19937_64 rng(1234);
std::uniform_real_distribution<float> dist(0, 1);
std::uniform_real_distribution<float> freq_dist(M_PI/10, M_PI/3);
std::uniform_real_distribution<float> sigma_dist(10, 100);
auto shape = tensor.shape;
int nch = shape[3];
assert(nch <= kMaxChannels);
std::vector<Bubble> bubbles(num_bubbles);
for (int i = 0; i < num_bubbles; i++) {
bubbles[i].centre = { shape[2] * dist(rng), shape[1] * dist(rng), shape[0] * dist(rng) };
for (int c = 0; c < nch; c++)
bubbles[i].color[c] = dist(rng);
bubbles[i].frequency = freq_dist(rng);
bubbles[i].decay = -1/(M_SQRT2 * sigma_dist(rng));
}
DrawBubbles(tensor, make_span(bubbles), stream);
}
};
// Slices - duplicate params and shapes for depth slices as if they were additional samples
template <int ndim>
TensorListShape<ndim == DynamicDimensions ? DynamicDimensions : ndim-1>
GetSliceShapes(const TensorListShape<ndim> &tls) {
TensorListShape<ndim == DynamicDimensions ? DynamicDimensions : ndim-1> slice_tls;
int N = tls.num_samples();
int total_slices = 0;
for (int i = 0; i < N; i++) {
total_slices += tls.tensor_shape_span(i)[0];
}
int D = tls.sample_dim() - 1;
slice_tls.resize(total_slices, D);
for (int i = 0, slice = 0; i < N; i++) {
auto ts = tls.tensor_shape_span(i);
for (int z = 0; z < ts[0]; z++, slice++) {
auto slice_ts = slice_tls.tensor_shape_span(slice);
for (int d = 0; d < D; d++) {
slice_ts[d] = ts[d+1];
}
}
}
return slice_tls;
}
template <typename Storage, typename T, int ndim>
auto GetSliceImages(const TensorListView<Storage, T, ndim> &volumes) {
return reshape(volumes, GetSliceShapes(volumes.shape), true);
}
template <int ndim>
void GetSliceParams(vector<ResamplingParams2D> &slice_params,
span<const ResamplingParams3D> params,
const TensorListShape<ndim> &in_shape) {
slice_params.clear();
int N = in_shape.num_samples();
assert(static_cast<int>(params.size()) == N);
for (int i = 0; i < N; i++) {
int depth = in_shape.tensor_shape_span(i)[0];
ResamplingParams2D p;
p[0] = params[i][1];
p[1] = params[i][2];
for (int z = 0; z < depth; z++) {
slice_params.push_back(p);
}
}
}
// ZShapes, ZImages - resize Z dim, fuse XY and keep old size
template <int ndim>
auto GetZShapes(const TensorListShape<ndim> &tls) {
return collapse_dim(tls, 1);
}
template <typename Storage, typename T, int ndim>
auto GetZImages(const TensorListView<Storage, T, ndim> &volumes) {
return reshape(volumes, GetZShapes(volumes.shape), true);
}
/**
* @param z_params - parameters for resizing along Z axis, keeping fused XY intact
* @param params - original parameters
* @param in_shape - input shape for _this stage_ (if Z is resized after XY, it is tmp_shape)
*
* @remarks This function cannot work with ROI in X/Y axes - it must be run as the second stage
* (after resizing all the slices).
*/
template <int ndim>
void GetZParams(vector<ResamplingParams2D> &z_params,
span<const ResamplingParams3D> params,
const TensorListShape<ndim> &in_shape) {
z_params.clear();
int N = in_shape.num_samples();
assert(static_cast<int>(params.size()) == N);
for (int i = 0; i < N; i++) {
auto sample_shape = in_shape.tensor_shape_span(i);
int depth = sample_shape[0];
ResamplingParams2D p = {};
p[0] = params[i][0];
p[1].output_size = sample_shape[1] * sample_shape[2];
p[1].roi.start = 0;
p[1].roi.end = p[1].output_size;
z_params.push_back(p);
}
}
/**
* @brief Use 2x 2D resampling to achieve 3D
*
* The first step decomposes the resampling into slices and resamples XY dimensions, fusing depth
* and batch dim.
* The second step fuses XY dimensions into generalized rows - which is OK, since we don't resize
* that dimension and ROI is already applied. The Z dimension becomes the new Y.
*
* The result may differ slightly between this and true 3D resampling, because the order of
* operations is not optimized and may be different.
*/
template <typename Out, typename In>
void Resample3Dvia2D(TestTensorList<Out> &out,
TestTensorList<In> &in,
span<const ResamplingParams3D> params,
hipStream_t stream) {
TestTensorList<float> tmp;
auto in_view = in.template gpu<4>(stream);
const auto &in_shape = in_view.shape;
assert(in_shape.sample_dim() == 4);
TensorListShape<4> tmp_shape, out_shape;
int N = in_shape.num_samples();
tmp_shape.resize(N);
out_shape.resize(N);
for (int i = 0; i < N; i++) {
auto in_sample_shape = in_shape.tensor_shape_span(i);
auto tmp_sample_shape = tmp_shape.tensor_shape_span(i);
auto out_sample_shape = out_shape.tensor_shape_span(i);
for (int d = 0; d < 3; d++) {
out_sample_shape[d] = params[i][d].output_size;
if (out_sample_shape[d] == KeepOriginalSize)
out_sample_shape[d] = in_sample_shape[d];
tmp_sample_shape[d] = d == 0 ? in_sample_shape[d] : out_sample_shape[d];
}
tmp_sample_shape[3] = out_sample_shape[3] = in_sample_shape[3]; // number of channels
}
tmp.reshape(tmp_shape);
out.reshape(out_shape);
auto tmp_view = tmp.gpu<4>(stream);
auto out_view = out.template gpu<4>(stream);
vector<ResamplingParams2D> params_xy;
vector<ResamplingParams2D> params_z;
GetSliceParams(params_xy, params, in_shape);
auto in_slices = GetSliceImages(in_view);
auto tmp_slices = GetSliceImages(tmp_view);
assert(in_slices.num_samples() == tmp_slices.num_samples());
ScratchpadAllocator sa;
{
ResampleGPU<float, In, 2> res_xy;
KernelContext ctx;
ctx.gpu.stream = stream;
auto req = res_xy.Setup(ctx, in_slices, make_span(params_xy));
sa.Reserve(req.scratch_sizes);
auto scratch = sa.GetScratchpad();
ctx.scratchpad = &scratch;
assert(req.output_shapes[0] == tmp_slices.shape);
res_xy.Run(ctx, tmp_slices, in_slices, make_span(params_xy));
}
GetZParams(params_z, params, tmp_shape);
auto tmp_z = GetZImages(tmp_view);
auto out_z = GetZImages(out_view);
{
ResampleGPU<Out, float, 2> res_z;
KernelContext ctx;
ctx.gpu.stream = stream;
auto req = res_z.Setup(ctx, tmp_z, make_span(params_z));
sa.Reserve(req.scratch_sizes);
auto scratch = sa.GetScratchpad();
ctx.scratchpad = &scratch;
assert(req.output_shapes[0] == out_z.shape);
res_z.Run(ctx, out_z, tmp_z, make_span(params_z));
}
}
template <typename TestParams>
class Resample3DTest;
template <typename Out, typename In, ResamplingFilterType interp>
struct ResamplingTestParams {
using OutputType = Out;
using InputType = In;
static constexpr ResamplingFilterType interp_type() { return interp; }
};
template <typename Out, typename In, ResamplingFilterType interp>
class Resample3DTest<ResamplingTestParams<Out, In, interp>>
: public ::testing::Test {
public:
Resample3DTest() {
InitShapes();
}
protected:
void InitShapes() {
in_shapes.resize(3);
out_shapes.resize(3);
// NOTE: The shapes are chosen as to avoid source pixel centers exactly halfway
// between original pixels, because it can lead to rounding discrepancies between
// cpu and gpu variants (and we're using two-pass GPU as a reference here).
// 3 channels
in_shapes[0] = {{
{ 40, 60, 50, 3 },
{ 32, 80, 120, 3 },
}};
out_shapes[0] = {{
{ 51, 40, 70, 3 },
{ 73, 87, 29, 3 },
}};
// variable number of channels
in_shapes[1] = {{
{ 10, 200, 120, 1 },
{ 100, 10, 10, 3 },
{ 70, 80, 90, 6 },
}};
out_shapes[1] = {{
{ 31, 200, 120, 1 },
{ 51, 27, 33, 3 },
{ 73, 181, 43, 6 },
}};
// many channels
in_shapes[2] = {{
{ 40, 40, 40, 11 },
}};
out_shapes[2] = {{
{ 51, 51, 51, 11 },
}};
}
vector<ResamplingParams3D> GenerateParams(const TensorListShape<4> &out_shape,
const TensorListShape<4> &in_shape) {
vector<ResamplingParams3D> params;
params.resize(in_shape.num_samples());
std::bernoulli_distribution dist;
std::uniform_real_distribution<float> start_dist(0.05, 0.3);
std::uniform_real_distribution<float> end_dist(0.7, 0.95);
for (int i = 0; i < in_shape.num_samples(); i++) {
auto in_sample_shape = in_shape.tensor_shape_span(i);
for (int d = 0; d < 3; d++) {
params[i][d].min_filter = interp;
params[i][d].mag_filter = interp;
params[i][d].output_size = out_shape.tensor_shape_span(i)[d];
if (d == 2) {
do {
params[i][d].roi.use_roi = true;
params[i][d].roi.start = start_dist(rng) * in_sample_shape[d];
params[i][d].roi.end = end_dist(rng) * in_sample_shape[d];
if (dist(rng))
std::swap(params[i][d].roi.start, params[i][d].roi.end);
} while (interp == ResamplingFilterType::Nearest &&
!CheckNN(params[i][d].output_size, params[i][d].roi.start, params[i][d].roi.end));
}
}
}
return params;
}
// Checks for possible rounding problems leading to selecting different source pixel
// when running NN resampling.
static bool CheckNN(int size, float start, float end) {
float step = (end - start) / size;
float x = start + step * 0.5f;
for (int i = 0; i < size; i++, x += step) {
if (std::abs(x - ::floor(x)) < 0.01f)
return false;
}
return true;
}
void RunGPU() {
hipStream_t stream = 0;
ResampleGPU<Out, In, 3> kernel;
KernelContext ctx;
ctx.gpu.stream = stream;
ScratchpadAllocator sa;
TestDataGenerator<In> tdg;
TestTensorList<In> in;
TestTensorList<Out> out, ref;
int niter = NumIter();
for (int iter = 0; iter < niter; iter++) {
const TensorListShape<4> &in_shape = in_shapes[iter];
int N = in_shape.num_samples();
in.reshape(in_shape);
for (int i = 0; i < N; i++)
tdg.GenerateTestData(in.gpu(stream)[i], 30, stream);
const TensorListShape<4> &out_shape = out_shapes[iter];
vector<ResamplingParams3D> params = GenerateParams(out_shape, in_shape);
Resample3Dvia2D(ref, in, make_span(params), stream);
auto ref_cpu = ref.cpu(stream);
assert(ref_cpu.shape == out_shape);
auto req = kernel.Setup(ctx, in.template gpu<4>(stream), make_span(params));
ASSERT_EQ(req.output_shapes.size(), 1u) << "Expected only 1 output";
ASSERT_EQ(req.output_shapes[0], out_shape) << "Unexpected output shape";
out.reshape(out_shape);
CUDA_CALL(
hipMemsetAsync(out.gpu(stream).data[0], 0, sizeof(Out)*out_shape.num_elements(), stream));
sa.Reserve(req.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
ctx.scratchpad = &scratchpad;
auto out_gpu = out.template gpu<4>(stream);
kernel.Run(ctx, out_gpu, in.template gpu<4>(stream), make_span(params));
auto out_cpu = out.cpu(stream);
if (interp == ResamplingFilterType::Nearest) {
Check(out_cpu, ref_cpu);
} else {
// Epsilons are quite big because, processing order in the reference is forced to be XYZ
// or YXZ, whereas the tested implementation can use any order.
double eps = std::is_integral<Out>::value ? 1 : 1e-3;
Check(out_cpu, ref_cpu, EqualEpsRel(eps, 1e-4));
}
}
}
void RunCPU() {
hipStream_t stream = 0;
ResampleCPU<Out, In, 3> kernel;
KernelContext ctx;
ScratchpadAllocator sa;
TestDataGenerator<In> tdg;
TestTensorList<In> in;
TestTensorList<Out> out, ref;
int niter = NumIter();
for (int iter = 0; iter < niter; iter++) {
const TensorListShape<4> &in_shape = in_shapes[iter];
int N = in_shape.num_samples();
in.reshape(in_shape);
for (int i = 0; i < N; i++)
tdg.GenerateTestData(in.gpu(stream)[i], 10, stream);
const TensorListShape<4> &out_shape = out_shapes[iter];
out.reshape(out_shape);
memset(out.cpu(stream).data[0], 0, sizeof(Out)*out_shape.num_elements());
vector<ResamplingParams3D> params = GenerateParams(out_shape, in_shape);
if (iter != 1)
continue;
Resample3Dvia2D(ref, in, make_span(params), stream);
auto ref_cpu = ref.cpu(stream);
ref.invalidate_gpu();
assert(ref_cpu.shape == out_shape);
auto in_cpu = in.template cpu<4>(stream);
auto out_cpu = out.template cpu<4>(stream);
for (int i = 0; i < N; i++) {
auto req = kernel.Setup(ctx, in_cpu[i], params[i]);
ASSERT_EQ(req.output_shapes.size(), 1u) << "Expected only 1 output";
ASSERT_EQ(req.output_shapes[0][0], out_shape[i]) << "Unexpected output shape";
sa.Reserve(req.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_cpu[i], in_cpu[i], params[i]);
if (interp == ResamplingFilterType::Nearest) {
Check(out_cpu[i], ref_cpu[i]);
} else {
// Epsilons are quite big because:
// - GPU uses fma
// - GPU uses different rounding
// - processing order in the reference is forced to be XYZ or YXZ, whereas
// the tested implementation can use any order.
double eps = std::is_integral<Out>::value ? 1 :
std::is_integral<In>::value ? max_value<In>()*1e-6 : 1e-5;
Check(out_cpu[i], ref_cpu[i], EqualEpsRel(eps, 2e-3));
}
}
}
}
vector<TensorListShape<4>> in_shapes, out_shapes;
int NumIter() const { return in_shapes.size(); }
std::mt19937_64 rng{1234};
};
using Resample3DTestTypes = ::testing::Types<
ResamplingTestParams<uint8_t, uint8_t, ResamplingFilterType::Nearest>,
ResamplingTestParams<float, uint8_t, ResamplingFilterType::Linear>,
ResamplingTestParams<int16_t, int16_t, ResamplingFilterType::Cubic>,
ResamplingTestParams<float, uint16_t, ResamplingFilterType::Lanczos3>
>;
TYPED_TEST_SUITE(Resample3DTest, Resample3DTestTypes);
TYPED_TEST(Resample3DTest, TestGPU) {
this->RunGPU();
}
TYPED_TEST(Resample3DTest, TestCPU) {
this->RunCPU();
}
} // namespace resample_test
} // namespace kernels
} // namespace dali
| a419eb8052c60553324a6dfd6d535569d9858647.cu | // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <random>
#include <string>
#include <vector>
#include "dali/core/dev_buffer.h"
#include "dali/core/format.h"
#include "dali/core/math_util.h"
#include "dali/core/tensor_shape_print.h"
#include "dali/kernels/imgproc/resample/separable.h"
#include "dali/kernels/test/test_data.h"
#include "dali/kernels/scratch.h"
#include "dali/kernels/imgproc/resample.h"
#include "dali/kernels/imgproc/resample_cpu.h"
#include "dali/kernels/test/resampling_test/resampling_test_params.h"
#include "dali/test/cv_mat_utils.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/test_tensors.h"
using std::cout;
using std::endl;
namespace dali {
namespace kernels {
namespace resample_test {
static constexpr int kMaxChannels = 16;
struct Bubble {
vec3 centre;
float color[kMaxChannels];
float frequency;
float decay;
};
template <typename T>
__global__ void DrawBubblesKernel(T *data, ivec3 size, int nch,
const Bubble *bubbles, int nbubbles) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= size.x || y >= size.y || z >= size.z)
return;
T *pixel = &data[nch * (x + size.x * (y + size.y * z))];
vec3 pos(x + 0.5f, y + 0.5f, z + 0.5f);
float color[kMaxChannels] = { 0 };
for (int i = 0; i < nbubbles; i++) {
float dsq = (bubbles[i].centre - pos).length_square();
float d = dsq*rsqrt(dsq);
float magnitude = expf(bubbles[i].decay * dsq);
float phase = bubbles[i].frequency * d;
for (int c = 0; c < nch; c++)
color[c] += bubbles[i].color[c] * (1 + cos(phase)) * magnitude * 0.5f;
}
for (int c = 0; c < nch; c++)
pixel[c] = ConvertSatNorm<T>(color[c]);
}
template <typename T>
struct TestDataGenerator {
DeviceBuffer<Bubble> gpu_bubbles;
template <int ndim>
void DrawBubbles(const TensorView<StorageGPU, T, ndim> &tensor, span<const Bubble> bubbles,
cudaStream_t stream) {
static_assert(ndim == 4 || ndim == DynamicDimensions, "Tensor must be 4D or dynamic (and 4D)");
assert(tensor.dim() == 4 && "Tensor must be 4D");
gpu_bubbles.from_host(bubbles.data(), bubbles.size(), stream);
ivec3 size(tensor.shape[2], tensor.shape[1], tensor.shape[0]);
int nch = tensor.shape[3];
assert(tensor.shape[3] <= kMaxChannels);
dim3 block(32, 32, 1);
dim3 grid(div_ceil(size.x, 32), div_ceil(size.y, 32), size.z);
DrawBubblesKernel<<<grid, block, 0, stream>>>(tensor.data, size, nch,
gpu_bubbles, bubbles.size());
}
template <int ndim>
void GenerateTestData(const TensorView<StorageGPU, T, ndim> &tensor, int num_bubbles = 5,
cudaStream_t stream = 0) {
static_assert(ndim == 4 || ndim == DynamicDimensions, "Tensor must be 4D or dynamic (and 4D)");
assert(tensor.dim() == 4 && "Tensor must be 4D");
std::mt19937_64 rng(1234);
std::uniform_real_distribution<float> dist(0, 1);
std::uniform_real_distribution<float> freq_dist(M_PI/10, M_PI/3);
std::uniform_real_distribution<float> sigma_dist(10, 100);
auto shape = tensor.shape;
int nch = shape[3];
assert(nch <= kMaxChannels);
std::vector<Bubble> bubbles(num_bubbles);
for (int i = 0; i < num_bubbles; i++) {
bubbles[i].centre = { shape[2] * dist(rng), shape[1] * dist(rng), shape[0] * dist(rng) };
for (int c = 0; c < nch; c++)
bubbles[i].color[c] = dist(rng);
bubbles[i].frequency = freq_dist(rng);
bubbles[i].decay = -1/(M_SQRT2 * sigma_dist(rng));
}
DrawBubbles(tensor, make_span(bubbles), stream);
}
};
// Slices - duplicate params and shapes for depth slices as if they were additional samples
template <int ndim>
TensorListShape<ndim == DynamicDimensions ? DynamicDimensions : ndim-1>
GetSliceShapes(const TensorListShape<ndim> &tls) {
TensorListShape<ndim == DynamicDimensions ? DynamicDimensions : ndim-1> slice_tls;
int N = tls.num_samples();
int total_slices = 0;
for (int i = 0; i < N; i++) {
total_slices += tls.tensor_shape_span(i)[0];
}
int D = tls.sample_dim() - 1;
slice_tls.resize(total_slices, D);
for (int i = 0, slice = 0; i < N; i++) {
auto ts = tls.tensor_shape_span(i);
for (int z = 0; z < ts[0]; z++, slice++) {
auto slice_ts = slice_tls.tensor_shape_span(slice);
for (int d = 0; d < D; d++) {
slice_ts[d] = ts[d+1];
}
}
}
return slice_tls;
}
template <typename Storage, typename T, int ndim>
auto GetSliceImages(const TensorListView<Storage, T, ndim> &volumes) {
return reshape(volumes, GetSliceShapes(volumes.shape), true);
}
template <int ndim>
void GetSliceParams(vector<ResamplingParams2D> &slice_params,
span<const ResamplingParams3D> params,
const TensorListShape<ndim> &in_shape) {
slice_params.clear();
int N = in_shape.num_samples();
assert(static_cast<int>(params.size()) == N);
for (int i = 0; i < N; i++) {
int depth = in_shape.tensor_shape_span(i)[0];
ResamplingParams2D p;
p[0] = params[i][1];
p[1] = params[i][2];
for (int z = 0; z < depth; z++) {
slice_params.push_back(p);
}
}
}
// ZShapes, ZImages - resize Z dim, fuse XY and keep old size
template <int ndim>
auto GetZShapes(const TensorListShape<ndim> &tls) {
return collapse_dim(tls, 1);
}
template <typename Storage, typename T, int ndim>
auto GetZImages(const TensorListView<Storage, T, ndim> &volumes) {
return reshape(volumes, GetZShapes(volumes.shape), true);
}
/**
* @param z_params - parameters for resizing along Z axis, keeping fused XY intact
* @param params - original parameters
* @param in_shape - input shape for _this stage_ (if Z is resized after XY, it is tmp_shape)
*
* @remarks This function cannot work with ROI in X/Y axes - it must be run as the second stage
* (after resizing all the slices).
*/
template <int ndim>
void GetZParams(vector<ResamplingParams2D> &z_params,
span<const ResamplingParams3D> params,
const TensorListShape<ndim> &in_shape) {
z_params.clear();
int N = in_shape.num_samples();
assert(static_cast<int>(params.size()) == N);
for (int i = 0; i < N; i++) {
auto sample_shape = in_shape.tensor_shape_span(i);
int depth = sample_shape[0];
ResamplingParams2D p = {};
p[0] = params[i][0];
p[1].output_size = sample_shape[1] * sample_shape[2];
p[1].roi.start = 0;
p[1].roi.end = p[1].output_size;
z_params.push_back(p);
}
}
/**
* @brief Use 2x 2D resampling to achieve 3D
*
* The first step decomposes the resampling into slices and resamples XY dimensions, fusing depth
* and batch dim.
* The second step fuses XY dimensions into generalized rows - which is OK, since we don't resize
* that dimension and ROI is already applied. The Z dimension becomes the new Y.
*
* The result may differ slightly between this and true 3D resampling, because the order of
* operations is not optimized and may be different.
*/
template <typename Out, typename In>
void Resample3Dvia2D(TestTensorList<Out> &out,
TestTensorList<In> &in,
span<const ResamplingParams3D> params,
cudaStream_t stream) {
TestTensorList<float> tmp;
auto in_view = in.template gpu<4>(stream);
const auto &in_shape = in_view.shape;
assert(in_shape.sample_dim() == 4);
TensorListShape<4> tmp_shape, out_shape;
int N = in_shape.num_samples();
tmp_shape.resize(N);
out_shape.resize(N);
for (int i = 0; i < N; i++) {
auto in_sample_shape = in_shape.tensor_shape_span(i);
auto tmp_sample_shape = tmp_shape.tensor_shape_span(i);
auto out_sample_shape = out_shape.tensor_shape_span(i);
for (int d = 0; d < 3; d++) {
out_sample_shape[d] = params[i][d].output_size;
if (out_sample_shape[d] == KeepOriginalSize)
out_sample_shape[d] = in_sample_shape[d];
tmp_sample_shape[d] = d == 0 ? in_sample_shape[d] : out_sample_shape[d];
}
tmp_sample_shape[3] = out_sample_shape[3] = in_sample_shape[3]; // number of channels
}
tmp.reshape(tmp_shape);
out.reshape(out_shape);
auto tmp_view = tmp.gpu<4>(stream);
auto out_view = out.template gpu<4>(stream);
vector<ResamplingParams2D> params_xy;
vector<ResamplingParams2D> params_z;
GetSliceParams(params_xy, params, in_shape);
auto in_slices = GetSliceImages(in_view);
auto tmp_slices = GetSliceImages(tmp_view);
assert(in_slices.num_samples() == tmp_slices.num_samples());
ScratchpadAllocator sa;
{
ResampleGPU<float, In, 2> res_xy;
KernelContext ctx;
ctx.gpu.stream = stream;
auto req = res_xy.Setup(ctx, in_slices, make_span(params_xy));
sa.Reserve(req.scratch_sizes);
auto scratch = sa.GetScratchpad();
ctx.scratchpad = &scratch;
assert(req.output_shapes[0] == tmp_slices.shape);
res_xy.Run(ctx, tmp_slices, in_slices, make_span(params_xy));
}
GetZParams(params_z, params, tmp_shape);
auto tmp_z = GetZImages(tmp_view);
auto out_z = GetZImages(out_view);
{
ResampleGPU<Out, float, 2> res_z;
KernelContext ctx;
ctx.gpu.stream = stream;
auto req = res_z.Setup(ctx, tmp_z, make_span(params_z));
sa.Reserve(req.scratch_sizes);
auto scratch = sa.GetScratchpad();
ctx.scratchpad = &scratch;
assert(req.output_shapes[0] == out_z.shape);
res_z.Run(ctx, out_z, tmp_z, make_span(params_z));
}
}
template <typename TestParams>
class Resample3DTest;
template <typename Out, typename In, ResamplingFilterType interp>
struct ResamplingTestParams {
using OutputType = Out;
using InputType = In;
static constexpr ResamplingFilterType interp_type() { return interp; }
};
template <typename Out, typename In, ResamplingFilterType interp>
class Resample3DTest<ResamplingTestParams<Out, In, interp>>
: public ::testing::Test {
public:
Resample3DTest() {
InitShapes();
}
protected:
void InitShapes() {
in_shapes.resize(3);
out_shapes.resize(3);
// NOTE: The shapes are chosen as to avoid source pixel centers exactly halfway
// between original pixels, because it can lead to rounding discrepancies between
// cpu and gpu variants (and we're using two-pass GPU as a reference here).
// 3 channels
in_shapes[0] = {{
{ 40, 60, 50, 3 },
{ 32, 80, 120, 3 },
}};
out_shapes[0] = {{
{ 51, 40, 70, 3 },
{ 73, 87, 29, 3 },
}};
// variable number of channels
in_shapes[1] = {{
{ 10, 200, 120, 1 },
{ 100, 10, 10, 3 },
{ 70, 80, 90, 6 },
}};
out_shapes[1] = {{
{ 31, 200, 120, 1 },
{ 51, 27, 33, 3 },
{ 73, 181, 43, 6 },
}};
// many channels
in_shapes[2] = {{
{ 40, 40, 40, 11 },
}};
out_shapes[2] = {{
{ 51, 51, 51, 11 },
}};
}
vector<ResamplingParams3D> GenerateParams(const TensorListShape<4> &out_shape,
const TensorListShape<4> &in_shape) {
vector<ResamplingParams3D> params;
params.resize(in_shape.num_samples());
std::bernoulli_distribution dist;
std::uniform_real_distribution<float> start_dist(0.05, 0.3);
std::uniform_real_distribution<float> end_dist(0.7, 0.95);
for (int i = 0; i < in_shape.num_samples(); i++) {
auto in_sample_shape = in_shape.tensor_shape_span(i);
for (int d = 0; d < 3; d++) {
params[i][d].min_filter = interp;
params[i][d].mag_filter = interp;
params[i][d].output_size = out_shape.tensor_shape_span(i)[d];
if (d == 2) {
do {
params[i][d].roi.use_roi = true;
params[i][d].roi.start = start_dist(rng) * in_sample_shape[d];
params[i][d].roi.end = end_dist(rng) * in_sample_shape[d];
if (dist(rng))
std::swap(params[i][d].roi.start, params[i][d].roi.end);
} while (interp == ResamplingFilterType::Nearest &&
!CheckNN(params[i][d].output_size, params[i][d].roi.start, params[i][d].roi.end));
}
}
}
return params;
}
// Checks for possible rounding problems leading to selecting different source pixel
// when running NN resampling.
static bool CheckNN(int size, float start, float end) {
float step = (end - start) / size;
float x = start + step * 0.5f;
for (int i = 0; i < size; i++, x += step) {
if (std::abs(x - std::floor(x)) < 0.01f)
return false;
}
return true;
}
void RunGPU() {
cudaStream_t stream = 0;
ResampleGPU<Out, In, 3> kernel;
KernelContext ctx;
ctx.gpu.stream = stream;
ScratchpadAllocator sa;
TestDataGenerator<In> tdg;
TestTensorList<In> in;
TestTensorList<Out> out, ref;
int niter = NumIter();
for (int iter = 0; iter < niter; iter++) {
const TensorListShape<4> &in_shape = in_shapes[iter];
int N = in_shape.num_samples();
in.reshape(in_shape);
for (int i = 0; i < N; i++)
tdg.GenerateTestData(in.gpu(stream)[i], 30, stream);
const TensorListShape<4> &out_shape = out_shapes[iter];
vector<ResamplingParams3D> params = GenerateParams(out_shape, in_shape);
Resample3Dvia2D(ref, in, make_span(params), stream);
auto ref_cpu = ref.cpu(stream);
assert(ref_cpu.shape == out_shape);
auto req = kernel.Setup(ctx, in.template gpu<4>(stream), make_span(params));
ASSERT_EQ(req.output_shapes.size(), 1u) << "Expected only 1 output";
ASSERT_EQ(req.output_shapes[0], out_shape) << "Unexpected output shape";
out.reshape(out_shape);
CUDA_CALL(
cudaMemsetAsync(out.gpu(stream).data[0], 0, sizeof(Out)*out_shape.num_elements(), stream));
sa.Reserve(req.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
ctx.scratchpad = &scratchpad;
auto out_gpu = out.template gpu<4>(stream);
kernel.Run(ctx, out_gpu, in.template gpu<4>(stream), make_span(params));
auto out_cpu = out.cpu(stream);
if (interp == ResamplingFilterType::Nearest) {
Check(out_cpu, ref_cpu);
} else {
// Epsilons are quite big because, processing order in the reference is forced to be XYZ
// or YXZ, whereas the tested implementation can use any order.
double eps = std::is_integral<Out>::value ? 1 : 1e-3;
Check(out_cpu, ref_cpu, EqualEpsRel(eps, 1e-4));
}
}
}
void RunCPU() {
cudaStream_t stream = 0;
ResampleCPU<Out, In, 3> kernel;
KernelContext ctx;
ScratchpadAllocator sa;
TestDataGenerator<In> tdg;
TestTensorList<In> in;
TestTensorList<Out> out, ref;
int niter = NumIter();
for (int iter = 0; iter < niter; iter++) {
const TensorListShape<4> &in_shape = in_shapes[iter];
int N = in_shape.num_samples();
in.reshape(in_shape);
for (int i = 0; i < N; i++)
tdg.GenerateTestData(in.gpu(stream)[i], 10, stream);
const TensorListShape<4> &out_shape = out_shapes[iter];
out.reshape(out_shape);
memset(out.cpu(stream).data[0], 0, sizeof(Out)*out_shape.num_elements());
vector<ResamplingParams3D> params = GenerateParams(out_shape, in_shape);
if (iter != 1)
continue;
Resample3Dvia2D(ref, in, make_span(params), stream);
auto ref_cpu = ref.cpu(stream);
ref.invalidate_gpu();
assert(ref_cpu.shape == out_shape);
auto in_cpu = in.template cpu<4>(stream);
auto out_cpu = out.template cpu<4>(stream);
for (int i = 0; i < N; i++) {
auto req = kernel.Setup(ctx, in_cpu[i], params[i]);
ASSERT_EQ(req.output_shapes.size(), 1u) << "Expected only 1 output";
ASSERT_EQ(req.output_shapes[0][0], out_shape[i]) << "Unexpected output shape";
sa.Reserve(req.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_cpu[i], in_cpu[i], params[i]);
if (interp == ResamplingFilterType::Nearest) {
Check(out_cpu[i], ref_cpu[i]);
} else {
// Epsilons are quite big because:
// - GPU uses fma
// - GPU uses different rounding
// - processing order in the reference is forced to be XYZ or YXZ, whereas
// the tested implementation can use any order.
double eps = std::is_integral<Out>::value ? 1 :
std::is_integral<In>::value ? max_value<In>()*1e-6 : 1e-5;
Check(out_cpu[i], ref_cpu[i], EqualEpsRel(eps, 2e-3));
}
}
}
}
vector<TensorListShape<4>> in_shapes, out_shapes;
int NumIter() const { return in_shapes.size(); }
std::mt19937_64 rng{1234};
};
using Resample3DTestTypes = ::testing::Types<
ResamplingTestParams<uint8_t, uint8_t, ResamplingFilterType::Nearest>,
ResamplingTestParams<float, uint8_t, ResamplingFilterType::Linear>,
ResamplingTestParams<int16_t, int16_t, ResamplingFilterType::Cubic>,
ResamplingTestParams<float, uint16_t, ResamplingFilterType::Lanczos3>
>;
TYPED_TEST_SUITE(Resample3DTest, Resample3DTestTypes);
TYPED_TEST(Resample3DTest, TestGPU) {
this->RunGPU();
}
TYPED_TEST(Resample3DTest, TestCPU) {
this->RunCPU();
}
} // namespace resample_test
} // namespace kernels
} // namespace dali
|
5fd4c0fb74c5cc081a6cbb3a426f7fb290f83710.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <stdio.h>
# include <stdlib.h>
# include <hip/hip_runtime.h>
# define N (2048)
# define THREADS_PER_BLOCK 512
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
int main(void)
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N*sizeof(int); // we need space for N integers
// allocate device copies of a, b, c
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
for(int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i+1;
}
// copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// launch add() kernel with blocks and threads
hipLaunchKernelGGL(( add), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0, d_a, d_b, d_c);
// copy device result back to host copy of c
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%5d + %5d = %5d\n", a[i], b[i], c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 5fd4c0fb74c5cc081a6cbb3a426f7fb290f83710.cu |
# include <stdio.h>
# include <stdlib.h>
# include <cuda.h>
# define N (2048)
# define THREADS_PER_BLOCK 512
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
int main(void)
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N*sizeof(int); // we need space for N integers
// allocate device copies of a, b, c
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
for(int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i+1;
}
// copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel with blocks and threads
add<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(d_a, d_b, d_c);
// copy device result back to host copy of c
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%5d + %5d = %5d\n", a[i], b[i], c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
cccb68657e6f8073e5cedb758764f6205df2b40d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void softmax_kernel(int nrow, int ncol, const float *input, float *output) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
for (int thread_id = id; thread_id < nrow; thread_id += blockDim.x * gridDim.x)
{
float maxval = input[thread_id * ncol];
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input[thread_id * ncol + x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input[thread_id * ncol + x] - maxval);
}
for (int x = 0; x < ncol; ++x) {
output[thread_id * ncol + x] = exp(input[thread_id * ncol + x] - maxval) / sum;
}
}
}
int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
/* TODO: Your code here */
assert (input->ndim == 2);
assert (output->ndim == 2);
assert (input->shape[0] == output->shape[0] && input->shape[1] == output->shape[1]);
int nrow = input->shape[0];
int ncol = input->shape[1];
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
if (stream_handle)
hipLaunchKernelGGL(( softmax_kernel), dim3(1), dim3(THREADS_PER_BLOCK), 0, *(hipStream_t*)stream_handle->handle, nrow, ncol, input_data, output_data);
else
hipLaunchKernelGGL(( softmax_kernel), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, nrow, ncol, input_data, output_data);
if(p != NULL){
int size_input = 1, size_output = 1;
for(int i = 0; i < input -> ndim; i++)
size_input *= input -> shape[i];
for(int i = 0; i < output -> ndim; i++)
size_output *= output -> shape[i];
p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
}
| cccb68657e6f8073e5cedb758764f6205df2b40d.cu | #include "gpu_runtime.h"
__global__ void softmax_kernel(int nrow, int ncol, const float *input, float *output) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
for (int thread_id = id; thread_id < nrow; thread_id += blockDim.x * gridDim.x)
{
float maxval = input[thread_id * ncol];
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input[thread_id * ncol + x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input[thread_id * ncol + x] - maxval);
}
for (int x = 0; x < ncol; ++x) {
output[thread_id * ncol + x] = exp(input[thread_id * ncol + x] - maxval) / sum;
}
}
}
int DLGpuSoftmax(const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
/* TODO: Your code here */
assert (input->ndim == 2);
assert (output->ndim == 2);
assert (input->shape[0] == output->shape[0] && input->shape[1] == output->shape[1]);
int nrow = input->shape[0];
int ncol = input->shape[1];
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
if (stream_handle)
softmax_kernel<<<1, THREADS_PER_BLOCK, 0, *(cudaStream_t*)stream_handle->handle>>>(nrow, ncol, input_data, output_data);
else
softmax_kernel<<<1, THREADS_PER_BLOCK>>>(nrow, ncol, input_data, output_data);
if(p != NULL){
int size_input = 1, size_output = 1;
for(int i = 0; i < input -> ndim; i++)
size_input *= input -> shape[i];
for(int i = 0; i < output -> ndim; i++)
size_output *= output -> shape[i];
p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
p -> workspace_memory = 0;
}
return 0;
}
|
68c435e57b771e7068f1f2918a11de9f7bd2949f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMeanLinearKernel(void* input, sd::LongType const* inputShape, int* starts, int* lengths,
sd::LongType numOfClasses, void* output,
sd::LongType const* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, segment, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
// extern __shared__ unsigned char shmem[];
// val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
//[zIndex] =
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
// val[segment] = ;
z[zIndex] = T(x[shape::getIndexOffset(start, inputShape)] / lengths[segment]);
// val[segment] = z[zIndex];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
if (lengths[segment]) sd::math::atomics::sd_atomicAdd(&z[zIndex], T(x[xIndex] / lengths[segment]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void unsortedSegmentMeanLinearKernel(void* input, sd::LongType const* inputShape, void* indices,
sd::LongType const* indicesShape, int* starts, int* lengths,
sd::LongType numOfClasses, void* output,
sd::LongType const* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ I* y; // int threadsPerSegment, start, finish;
auto segment = blockIdx.x; // /
if (threadIdx.x == 0) {
// threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
// threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
y = reinterpret_cast<I*>(indices);
// extern __shared__ unsigned char shmem[];
// val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
// if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
// start = starts[segment];
// finish = start + lengths[segment];
if (lengths[segment] > 0)
z[zIndex] = T(x[shape::getIndexOffset(starts[segment], inputShape)] / T(lengths[segment]));
else
z[zIndex] = 0; // DataTypeUtils::max<T>();
// val[segment] = z[zIndex];
// }
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment && e != starts[segment]) {
sd::math::atomics::sd_atomicAdd(&z[zIndex], T(x[xIndex] / T(lengths[segment])));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentMean kernel
template <typename T, typename I>
static SD_KERNEL void segmentMeanTadKernel(void* inputBuf, sd::LongType const* inputShape,
sd::LongType const* inputTads, sd::LongType const* inputTadOffsets,
I* indices, int* starts, int* lengths, sd::LongType numOfClasses,
void* outputBuf, sd::LongType const* outputShape,
sd::LongType const* outputTads, sd::LongType const* outputTadOffsets) {
__shared__ T* val;
__shared__ sd::LongType len, zIndex, total;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<T*>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::sd_atomicAdd(&z[zIndex], T(x[xIndex] / lengths[segment]));
}
} else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[segment]) sd::math::atomics::sd_atomicAdd(&z[zIndex], T(x[xIndex] / lengths[segment]));
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// segmen mean
template <typename T, typename I>
static void segmentMeanFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
sd::LongType numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
NDArray::prepareSpecialUse({output}, {input, indices});
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
if (input->isVector()) {
hipLaunchKernelGGL(( segmentMeanLinearKernel<T, I>), dim3(numClasses), dim3(input->lengthOf()), numClasses * 32 + 32, *stream,
input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
hipLaunchKernelGGL(( segmentMeanTadKernel<T, I>), dim3(input->sizeAt(0)), dim3(512), 2048, *stream,
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentMeanFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), segmentMeanFunctor_, (context, input, indices, output),
SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentMeanFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( unsortedSegmentMeanLinearKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream,
input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
} else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
sd::LongType const* inputTads = packX.specialShapeInfo();
sd::LongType const* inputTadOffsets = packX.specialOffsets();
sd::LongType const* outputTads = packZ.specialShapeInfo();
sd::LongType const* outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
hipLaunchKernelGGL(( segmentMeanTadKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream,
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentMeanFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMeanFunctor_,
(context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMeanBPLinearKernel(void* inputBuf, sd::LongType const* inputShape, void* eps,
sd::LongType const* epsShape, void* indicesBuf,
sd::LongType const* indicesShape, int* lengths, void* outputBuf,
sd::LongType const* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ sd::LongType xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = T(gradOut[gradOffsetO] / float(lengths[classIndex]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMeanBPTadKernel(void* inputBuf, sd::LongType const* inputShape, void* eps,
sd::LongType const* epsShape, void* indicesBuf,
sd::LongType const* indicesShape, int* lengths, void* outputBuf,
sd::LongType const* outputShape, sd::LongType const* inputTad,
sd::LongType const* inputOffsets, sd::LongType const* gradOutTad,
sd::LongType const* gradOutOffsets, sd::LongType const* outTad,
sd::LongType const* outOffsets) {
__shared__ T* x;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ sd::LongType xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
// auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[i]; // yIndex];
T* currentOut = z + outOffsets[i];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
auto zIndex = shape::getIndexOffset(e, outTad);
auto gradIndex = shape::getIndexOffset(e, gradOutTad);
if (lengths[segment] > 0) currentOut[zIndex] = T(outGrad[gradIndex] / float(lengths[segment]));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// backrop for mean
template <typename T, typename I>
sd::Status segmentMeanFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
hipLaunchKernelGGL(( segmentMeanBPLinearKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream,
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(),
// dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
sd::LongType const* inputTads = packX.specialShapeInfo();
sd::LongType const* inputTadOffsets = packX.specialOffsets();
sd::LongType const* outputTads = packZ.specialShapeInfo();
sd::LongType const* outputTadOffsets = packZ.specialOffsets();
sd::LongType const* gradOutTads = packGradOut.specialShapeInfo();
sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentMeanBPTadKernel<T, I>), dim3(indices->lengthOf()), dim3(input->lengthOf()), 256, *stream,
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(),
output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads,
outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
// segmen mean bp main
sd::Status segmentMeanFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMeanFunctorBP_,
(context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static sd::Status unsortedSegmentMeanFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
hipLaunchKernelGGL(( segmentMeanBPLinearKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream,
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(),
// dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
sd::LongType const* inputTads = packX.specialShapeInfo();
sd::LongType const* inputTadOffsets = packX.specialOffsets();
sd::LongType const* outputTads = packZ.specialShapeInfo();
sd::LongType const* outputTadOffsets = packZ.specialOffsets();
sd::LongType const* gradOutTads = packGradOut.specialShapeInfo();
sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentMeanBPTadKernel<T, I>), dim3(indices->lengthOf()), dim3(input->lengthOf()), 256, *stream,
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(),
output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads,
outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
sd::Status unsortedSegmentMeanFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
sd::LongType numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMeanFunctorBP_,
(context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
} // namespace helpers
} // namespace ops
} // namespace sd
| 68c435e57b771e7068f1f2918a11de9f7bd2949f.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMeanLinearKernel(void* input, sd::LongType const* inputShape, int* starts, int* lengths,
sd::LongType numOfClasses, void* output,
sd::LongType const* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, segment, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
// extern __shared__ unsigned char shmem[];
// val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
//[zIndex] =
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
// val[segment] = ;
z[zIndex] = T(x[shape::getIndexOffset(start, inputShape)] / lengths[segment]);
// val[segment] = z[zIndex];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
if (lengths[segment]) sd::math::atomics::sd_atomicAdd(&z[zIndex], T(x[xIndex] / lengths[segment]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void unsortedSegmentMeanLinearKernel(void* input, sd::LongType const* inputShape, void* indices,
sd::LongType const* indicesShape, int* starts, int* lengths,
sd::LongType numOfClasses, void* output,
sd::LongType const* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, zIndex;
__shared__ T* x;
__shared__ T* z;
__shared__ I* y; // int threadsPerSegment, start, finish;
auto segment = blockIdx.x; // /
if (threadIdx.x == 0) {
// threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
// threadsPerSegment;
x = reinterpret_cast<T*>(input);
z = reinterpret_cast<T*>(output);
y = reinterpret_cast<I*>(indices);
// extern __shared__ unsigned char shmem[];
// val = reinterpret_cast<T*>(shmem);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
// if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
// start = starts[segment];
// finish = start + lengths[segment];
if (lengths[segment] > 0)
z[zIndex] = T(x[shape::getIndexOffset(starts[segment], inputShape)] / T(lengths[segment]));
else
z[zIndex] = 0; // DataTypeUtils::max<T>();
// val[segment] = z[zIndex];
// }
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment && e != starts[segment]) {
sd::math::atomics::sd_atomicAdd(&z[zIndex], T(x[xIndex] / T(lengths[segment])));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentMean kernel
template <typename T, typename I>
static SD_KERNEL void segmentMeanTadKernel(void* inputBuf, sd::LongType const* inputShape,
sd::LongType const* inputTads, sd::LongType const* inputTadOffsets,
I* indices, int* starts, int* lengths, sd::LongType numOfClasses,
void* outputBuf, sd::LongType const* outputShape,
sd::LongType const* outputTads, sd::LongType const* outputTadOffsets) {
__shared__ T* val;
__shared__ sd::LongType len, zIndex, total;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
if (threadIdx.x == 0) {
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<T*>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::sd_atomicAdd(&z[zIndex], T(x[xIndex] / lengths[segment]));
}
} else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[segment]) sd::math::atomics::sd_atomicAdd(&z[zIndex], T(x[xIndex] / lengths[segment]));
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// segmen mean
template <typename T, typename I>
static void segmentMeanFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
sd::LongType numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
NDArray::prepareSpecialUse({output}, {input, indices});
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
if (input->isVector()) {
segmentMeanLinearKernel<T, I><<<numClasses, input->lengthOf(), numClasses * 32 + 32, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
segmentMeanTadKernel<T, I><<<input->sizeAt(0), 512, 2048, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentMeanFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), segmentMeanFunctor_, (context, input, indices, output),
SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentMeanFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
unsortedSegmentMeanLinearKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
} else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
sd::LongType const* inputTads = packX.specialShapeInfo();
sd::LongType const* inputTadOffsets = packX.specialOffsets();
sd::LongType const* outputTads = packZ.specialShapeInfo();
sd::LongType const* outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
segmentMeanTadKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentMeanFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMeanFunctor_,
(context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMeanBPLinearKernel(void* inputBuf, sd::LongType const* inputShape, void* eps,
sd::LongType const* epsShape, void* indicesBuf,
sd::LongType const* indicesShape, int* lengths, void* outputBuf,
sd::LongType const* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ sd::LongType xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = T(gradOut[gradOffsetO] / float(lengths[classIndex]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentMeanBPTadKernel(void* inputBuf, sd::LongType const* inputShape, void* eps,
sd::LongType const* epsShape, void* indicesBuf,
sd::LongType const* indicesShape, int* lengths, void* outputBuf,
sd::LongType const* outputShape, sd::LongType const* inputTad,
sd::LongType const* inputOffsets, sd::LongType const* gradOutTad,
sd::LongType const* gradOutOffsets, sd::LongType const* outTad,
sd::LongType const* outOffsets) {
__shared__ T* x;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ sd::LongType xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
// auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[i]; // yIndex];
T* currentOut = z + outOffsets[i];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
auto zIndex = shape::getIndexOffset(e, outTad);
auto gradIndex = shape::getIndexOffset(e, gradOutTad);
if (lengths[segment] > 0) currentOut[zIndex] = T(outGrad[gradIndex] / float(lengths[segment]));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// backrop for mean
template <typename T, typename I>
sd::Status segmentMeanFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
segmentMeanBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(),
// dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
sd::LongType const* inputTads = packX.specialShapeInfo();
sd::LongType const* inputTadOffsets = packX.specialOffsets();
sd::LongType const* outputTads = packZ.specialShapeInfo();
sd::LongType const* outputTadOffsets = packZ.specialOffsets();
sd::LongType const* gradOutTads = packGradOut.specialShapeInfo();
sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets();
segmentMeanBPTadKernel<T, I><<<indices->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(),
output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads,
outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
// segmen mean bp main
sd::Status segmentMeanFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMeanFunctorBP_,
(context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static sd::Status unsortedSegmentMeanFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
segmentMeanBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(),
// dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
sd::LongType const* inputTads = packX.specialShapeInfo();
sd::LongType const* inputTadOffsets = packX.specialOffsets();
sd::LongType const* outputTads = packZ.specialShapeInfo();
sd::LongType const* outputTadOffsets = packZ.specialOffsets();
sd::LongType const* gradOutTads = packGradOut.specialShapeInfo();
sd::LongType const* gradOutTadOffsets = packGradOut.specialOffsets();
segmentMeanBPTadKernel<T, I><<<indices->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(),
output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads,
outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
sd::Status unsortedSegmentMeanFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
sd::LongType numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMeanFunctorBP_,
(context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
e6a948a0562cbd190ab3c47f3fe4ba4ee0e926d8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1=0;
float Value2=0;
float Value3=0;
float Value=0;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| e6a948a0562cbd190ab3c47f3fe4ba4ee0e926d8.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1=0;
float Value2=0;
float Value3=0;
float Value=0;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1+I2;
Value3=I1-I2;
Value1+=Value2;
Value1+=Value2;
Value2=Value3-Value1;
Value1=Value2+Value3;
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
e0ac1929439642b1d5b52c6a069fbcfdf83feaa8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
void infoDevices();
void printDevProp(hipDeviceProp_t devProp);
__global__ void matrix_sum(int* A, int* B, int* C, int numCol, int numRow)
{
uint pos = COL + ROW * numCol;
if (pos < numCol * numRow) {
C[pos] = A[pos] + B[pos];
}
}
void infoDevices()
{
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
for (int i = 0; i < devCount; ++i) {
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
}
void printDevProp(hipDeviceProp_t devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i) {
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
}
for (int i = 0; i < 3; ++i) {
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
}
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main(int argc, char const* argv[])
{
//MEDIR TIEMPOS :D
if (argc < 4) {
printf("Tamao matrix, cantidad de bloques, threads x bloque\n");
return 2;
}
// infoDevices();
int N, M;
sscanf(argv[1], "%d", &N);
M = N;
unsigned int blocks;
sscanf(argv[2], "%d", &blocks);
int threads;
sscanf(argv[3], "%d", &threads);
dim3 dimGrid(blocks, blocks , 1);
dim3 dimBlock(threads, threads, 1);
int size = N * M * sizeof(int);
int* hA = (int*) calloc(N * M, sizeof(int));
int* hB = (int*) calloc(N * M, sizeof(int));
int* hC = (int*) calloc(N * M, sizeof(int));
int* dA;
int* dB;
int* dC;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
cudaCheck(
hipMalloc((void**)&dA, size)
);
cudaCheck(
hipMalloc((void**)&dB, size)
);
cudaCheck(
hipMalloc((void**)&dC, size)
);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
hA[j + i * M] = i;
hB[j + i * M] = j;
}
}
// print_matrix_msg("A", hA, N, M);
// print_matrix_msg("B", hB, N, M);
cudaCheck(
hipMemcpy(dA, hA, N * M * sizeof(int), hipMemcpyHostToDevice)
);
cudaCheck(
hipMemcpy(dB, hB, N * M * sizeof(int), hipMemcpyHostToDevice)
);
cudaCheck(
hipMemcpy(dC, hC, N * M * sizeof(int), hipMemcpyHostToDevice)
);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( matrix_sum) , dim3(dimGrid), dim3(dimBlock), 0, 0, dA, dB, dC, N, M);
CUDA_CHECK_ERROR();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float gpu_time;
hipEventElapsedTime(&gpu_time, start, stop);
cudaCheck(
hipMemcpy(hC, dC, N * M * sizeof(int), hipMemcpyDeviceToHost)
);
// print_matrix_msg("C = A + B", hC, N, M);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
if (hC[j + i * M] != (hA[j + i * M] + hB[j + i * M]) ) {
return 1;
}
}
}
hipEventRecord(start, 0);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
hC[j + i * M] = hA[j + i * M] + hB[j + i * M];
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float cpu_time;
hipEventElapsedTime(&cpu_time, start, stop);
printf("%g %g\n", gpu_time, cpu_time);
hipEventDestroy(start);
hipEventDestroy(stop);
free(hA);
free(hB);
free(hC);
hipFree(dA);
hipFree(dB);
hipFree(dC);
return 0;
} | e0ac1929439642b1d5b52c6a069fbcfdf83feaa8.cu | #include "utils.h"
void infoDevices();
void printDevProp(cudaDeviceProp devProp);
__global__ void matrix_sum(int* A, int* B, int* C, int numCol, int numRow)
{
uint pos = COL + ROW * numCol;
if (pos < numCol * numRow) {
C[pos] = A[pos] + B[pos];
}
}
void infoDevices()
{
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
for (int i = 0; i < devCount; ++i) {
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
}
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %lu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %lu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i) {
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
}
for (int i = 0; i < 3; ++i) {
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
}
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %lu\n", devProp.totalConstMem);
printf("Texture alignment: %lu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main(int argc, char const* argv[])
{
//MEDIR TIEMPOS :D
if (argc < 4) {
printf("Tamaño matrix, cantidad de bloques, threads x bloque\n");
return 2;
}
// infoDevices();
int N, M;
sscanf(argv[1], "%d", &N);
M = N;
unsigned int blocks;
sscanf(argv[2], "%d", &blocks);
int threads;
sscanf(argv[3], "%d", &threads);
dim3 dimGrid(blocks, blocks , 1);
dim3 dimBlock(threads, threads, 1);
int size = N * M * sizeof(int);
int* hA = (int*) calloc(N * M, sizeof(int));
int* hB = (int*) calloc(N * M, sizeof(int));
int* hC = (int*) calloc(N * M, sizeof(int));
int* dA;
int* dB;
int* dC;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaCheck(
cudaMalloc((void**)&dA, size)
);
cudaCheck(
cudaMalloc((void**)&dB, size)
);
cudaCheck(
cudaMalloc((void**)&dC, size)
);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
hA[j + i * M] = i;
hB[j + i * M] = j;
}
}
// print_matrix_msg("A", hA, N, M);
// print_matrix_msg("B", hB, N, M);
cudaCheck(
cudaMemcpy(dA, hA, N * M * sizeof(int), cudaMemcpyHostToDevice)
);
cudaCheck(
cudaMemcpy(dB, hB, N * M * sizeof(int), cudaMemcpyHostToDevice)
);
cudaCheck(
cudaMemcpy(dC, hC, N * M * sizeof(int), cudaMemcpyHostToDevice)
);
cudaEventRecord(start, 0);
matrix_sum <<< dimGrid, dimBlock>>>(dA, dB, dC, N, M);
CUDA_CHECK_ERROR();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float gpu_time;
cudaEventElapsedTime(&gpu_time, start, stop);
cudaCheck(
cudaMemcpy(hC, dC, N * M * sizeof(int), cudaMemcpyDeviceToHost)
);
// print_matrix_msg("C = A + B", hC, N, M);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
if (hC[j + i * M] != (hA[j + i * M] + hB[j + i * M]) ) {
return 1;
}
}
}
cudaEventRecord(start, 0);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
hC[j + i * M] = hA[j + i * M] + hB[j + i * M];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float cpu_time;
cudaEventElapsedTime(&cpu_time, start, stop);
printf("%g %g\n", gpu_time, cpu_time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(hA);
free(hB);
free(hC);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
return 0;
} |
78183035a06ec7ebb83b2f471f78c0d2446a351f.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This code implements the interleaved and neighbor-paired approaches to
* parallel reduction in CUDA. For this example, the sum operation is used. A
* variety of optimizations on parallel reduction aimed at reducing divergence
* are also demonstrated, such as unrolling.
*/
// Recursive Implementation of Interleaved Pair Approach
int recursiveReduce(int *data, int const size)
{
// terminate check
if (size == 1) return data[0];
// renew the stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++)
{
data[i] += data[i + stride];
}
// call recursively
return recursiveReduce(data, stride);
}
// Neighbored Pair Implementation with divergence
__global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Neighbored Pair Implementation with less divergence
__global__ void reduceNeighboredLess (int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// convert tid into local array index
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Interleaved Pair Implementation with less divergence
__global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
// g_idata[idx] = g_idata[idx] + g_idata[idx + blockDim.x] + g_idata[idx + 2*blockDim.x] + g_idata[idx + 3*blockDim.x];
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling8 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling8New (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int *ptr = g_idata + idx;
int tmp = 0;
// Increment tmp 8 times with values strided by blockDim.x
for (int i = 0; i < 8; i++) {
tmp += *ptr; ptr += blockDim.x;
}
g_idata[idx] = tmp;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling16 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 16 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 16;
// unrolling 16
if (idx + 15 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
int c1 = g_idata[idx + 8 * blockDim.x];
int c2 = g_idata[idx + 9 * blockDim.x];
int c3 = g_idata[idx + 10 * blockDim.x];
int c4 = g_idata[idx + 11 * blockDim.x];
int d1 = g_idata[idx + 12 * blockDim.x];
int d2 = g_idata[idx + 13 * blockDim.x];
int d3 = g_idata[idx + 14 * blockDim.x];
int d4 = g_idata[idx + 15 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4 + c1 + c2 + c3 + c4
+ d1 + d2 + d3 + d4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling last warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = 512; // initial block size
if(argc > 1)
{
blocksize = atoi(argv[1]); // block size from command line argument
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
{
// mask off high 2 bytes to force max number to 255
h_idata[i] = (int)( rand() & 0xFF );
}
memcpy (tmp, h_idata, bytes);
double iStart, iElaps;
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **) &d_idata, bytes));
CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
iStart = seconds();
int cpu_sum = recursiveReduce (tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// kernel 1: reduceNeighbored
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 2: reduceNeighbored with less divergence
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceNeighboredLess), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 3: reduceInterleaved
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceInterleaved), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 4: reduceUnrolling2
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling2), dim3(grid.x / 2), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x);
// kernel 5: reduceUnrolling4
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling4), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x);
// kernel 6: reduceUnrolling8
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 6: reduceUnrolling8New
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling8New), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling8New elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 7: reduceUnrolling16
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling16), dim3(grid.x / 16), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 16 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling16 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 16, block.x);
// kernel 8: reduceUnrollWarps8
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrollWarps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 9: reduceCompleteUnrollWarsp8
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceCompleteUnrollWarps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 9: reduceCompleteUnroll
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
switch (blocksize)
{
case 1024:
hipLaunchKernelGGL(( reduceCompleteUnroll<1024>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata,
size);
break;
case 512:
hipLaunchKernelGGL(( reduceCompleteUnroll<512>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata,
size);
break;
case 256:
hipLaunchKernelGGL(( reduceCompleteUnroll<256>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata,
size);
break;
case 128:
hipLaunchKernelGGL(( reduceCompleteUnroll<128>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata,
size);
break;
case 64:
hipLaunchKernelGGL(( reduceCompleteUnroll<64>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size);
break;
}
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// reset device
CHECK(hipDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
| 78183035a06ec7ebb83b2f471f78c0d2446a351f.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This code implements the interleaved and neighbor-paired approaches to
* parallel reduction in CUDA. For this example, the sum operation is used. A
* variety of optimizations on parallel reduction aimed at reducing divergence
* are also demonstrated, such as unrolling.
*/
// Recursive Implementation of Interleaved Pair Approach
int recursiveReduce(int *data, int const size)
{
// terminate check
if (size == 1) return data[0];
// renew the stride
int const stride = size / 2;
// in-place reduction
for (int i = 0; i < stride; i++)
{
data[i] += data[i + stride];
}
// call recursively
return recursiveReduce(data, stride);
}
// Neighbored Pair Implementation with divergence
__global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Neighbored Pair Implementation with less divergence
__global__ void reduceNeighboredLess (int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// convert tid into local array index
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Interleaved Pair Implementation with less divergence
__global__ void reduceInterleaved (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
// g_idata[idx] = g_idata[idx] + g_idata[idx + blockDim.x] + g_idata[idx + 2*blockDim.x] + g_idata[idx + 3*blockDim.x];
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling8 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling8New (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int *ptr = g_idata + idx;
int tmp = 0;
// Increment tmp 8 times with values strided by blockDim.x
for (int i = 0; i < 8; i++) {
tmp += *ptr; ptr += blockDim.x;
}
g_idata[idx] = tmp;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling16 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 16 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 16;
// unrolling 16
if (idx + 15 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
int c1 = g_idata[idx + 8 * blockDim.x];
int c2 = g_idata[idx + 9 * blockDim.x];
int c3 = g_idata[idx + 10 * blockDim.x];
int c4 = g_idata[idx + 11 * blockDim.x];
int d1 = g_idata[idx + 12 * blockDim.x];
int d2 = g_idata[idx + 13 * blockDim.x];
int d3 = g_idata[idx + 14 * blockDim.x];
int d4 = g_idata[idx + 15 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4 + c1 + c2 + c3 + c4
+ d1 + d2 + d3 + d4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling last warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = 512; // initial block size
if(argc > 1)
{
blocksize = atoi(argv[1]); // block size from command line argument
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
{
// mask off high 2 bytes to force max number to 255
h_idata[i] = (int)( rand() & 0xFF );
}
memcpy (tmp, h_idata, bytes);
double iStart, iElaps;
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **) &d_idata, bytes));
CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
iStart = seconds();
int cpu_sum = recursiveReduce (tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// kernel 1: reduceNeighbored
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 2: reduceNeighbored with less divergence
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceNeighboredLess<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 3: reduceInterleaved
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceInterleaved<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 4: reduceUnrolling2
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x);
// kernel 5: reduceUnrolling4
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x);
// kernel 6: reduceUnrolling8
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 6: reduceUnrolling8New
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling8New<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling8New elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 7: reduceUnrolling16
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling16<<<grid.x / 16, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 16 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling16 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 16, block.x);
// kernel 8: reduceUnrollWarps8
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 9: reduceCompleteUnrollWarsp8
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 9: reduceCompleteUnroll
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
switch (blocksize)
{
case 1024:
reduceCompleteUnroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 512:
reduceCompleteUnroll<512><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 256:
reduceCompleteUnroll<256><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 128:
reduceCompleteUnroll<128><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 64:
reduceCompleteUnroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
}
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// reset device
CHECK(cudaDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
884db5b063982caaaef6b03bbccc74088615463b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Ahmad Abdelfattah
@author Azzam Haidar
@generated from magmablas/zgemm_batched_smallsq.cu, normal z -> c, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define SLDA(N) ( (N==15||N==23||N==31)? N : (N+1) )
extern __shared__ magmaFloatComplex zdata[];
template<int N>
__global__ void
cgemm_batched_smallsq_kernel(
const magma_trans_t transA, magma_trans_t transB,
const magmaFloatComplex alpha, magmaFloatComplex const * const * dA_array, int ai, int aj, int ldda,
magmaFloatComplex const * const * dB_array, int bi, int bj, int lddb,
const magmaFloatComplex beta, magmaFloatComplex** dC_array, int ci, int cj, int lddc,
const int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tz = threadIdx.z;
const int bx = blockIdx.x;
const int batchid = bx * blockDim.z + tz;
if(batchid >= batchCount) return;
const magmaFloatComplex* __restrict__ dA = dA_array[batchid] + aj * ldda + ai;
const magmaFloatComplex* __restrict__ dB = dB_array[batchid] + bj * lddb + bi;
magmaFloatComplex* __restrict__ dC = dC_array[batchid] + cj * lddc + ci;
magmaFloatComplex rC = MAGMA_C_ZERO;
magmaFloatComplex rTmp = MAGMA_C_ZERO;
const int slda = SLDA(N);
const int sldb = SLDA(N);
magmaFloatComplex* sA = (magmaFloatComplex*)(zdata);
magmaFloatComplex* sB = (magmaFloatComplex*)(zdata + blockDim.z * slda * N);
sA += tz * slda * N;
sB += tz * sldb * N;
// read A & B
if(transA == MagmaNoTrans){
sA[ty * slda + tx] = dA[ty * ldda + tx];
}
else{
sA[tx * slda + ty] = (transA == MagmaTrans) ? dA[ty * ldda + tx] : MAGMA_C_CONJ( dA[ty * ldda + tx] );
}
if(transB == MagmaNoTrans){
sB[ty * sldb + tx] = dB[ty * lddb + tx];
}
else{
sB[tx * sldb + ty] = (transB == MagmaTrans) ? dB[ty * lddb + tx] : MAGMA_C_CONJ( dB[ty * lddb + tx] );
}
__syncthreads();
if(beta != MAGMA_C_ZERO){
rC = beta * dC[ty * lddc + tx];
}
// multiply
rTmp = MAGMA_C_ZERO;
#pragma unroll
for(int j = 0; j < N; j++){
rTmp += sA[j * slda + tx] * sB[ty * sldb + j];
}
rC += alpha * rTmp;
// write from rC
dC[ty * lddc + tx] = rC;
}
extern "C" void
magmablas_cgemm_batched_smallsq(
magma_trans_t transA, magma_trans_t transB,
magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex alpha,
magmaFloatComplex const * const * dA_array, magma_int_t ai, magma_int_t aj, magma_int_t ldda,
magmaFloatComplex const * const * dB_array, magma_int_t bi, magma_int_t bj, magma_int_t lddb,
magmaFloatComplex beta,
magmaFloatComplex **dC_array, magma_int_t ci, magma_int_t cj, magma_int_t lddc,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if( !(m == n && n == k) ){
printf("Only square sizes are supported\n");
info = -1;
}
if( m > 32){
printf("Only square sizes of up to 32 are supported\n");
info = -1;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
}
if ( m <= 0 || n <= 0 || k <= 0 ) return;
magma_int_t ntcol = magma_get_cgemm_batched_ntcol( m );
magma_int_t shmem = ( SLDA(m)*m + SLDA(n)*n ) * sizeof(magmaFloatComplex);
shmem *= ntcol;
const int nblocks = magma_ceildiv(batchCount, ntcol);
dim3 grid(nblocks, 1, 1);
dim3 threads(m, m, ntcol);
switch(m){
case 1:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 2:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 3:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 4:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 5:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 6:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 7:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 8:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 9:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel< 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 10:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 11:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 12:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 13:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 14:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 15:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 16:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 17:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 18:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 19:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 20:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 21:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 22:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 23:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 24:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 25:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 26:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 27:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 28:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 29:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 30:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 31:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 32:hipLaunchKernelGGL(( cgemm_batched_smallsq_kernel<32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
default:;
}
}
| 884db5b063982caaaef6b03bbccc74088615463b.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Ahmad Abdelfattah
@author Azzam Haidar
@generated from magmablas/zgemm_batched_smallsq.cu, normal z -> c, Wed Jan 2 14:18:51 2019
*/
#include "magma_internal.h"
#include "magma_templates.h"
#include "batched_kernel_param.h"
#define SLDA(N) ( (N==15||N==23||N==31)? N : (N+1) )
extern __shared__ magmaFloatComplex zdata[];
template<int N>
__global__ void
cgemm_batched_smallsq_kernel(
const magma_trans_t transA, magma_trans_t transB,
const magmaFloatComplex alpha, magmaFloatComplex const * const * dA_array, int ai, int aj, int ldda,
magmaFloatComplex const * const * dB_array, int bi, int bj, int lddb,
const magmaFloatComplex beta, magmaFloatComplex** dC_array, int ci, int cj, int lddc,
const int batchCount)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tz = threadIdx.z;
const int bx = blockIdx.x;
const int batchid = bx * blockDim.z + tz;
if(batchid >= batchCount) return;
const magmaFloatComplex* __restrict__ dA = dA_array[batchid] + aj * ldda + ai;
const magmaFloatComplex* __restrict__ dB = dB_array[batchid] + bj * lddb + bi;
magmaFloatComplex* __restrict__ dC = dC_array[batchid] + cj * lddc + ci;
magmaFloatComplex rC = MAGMA_C_ZERO;
magmaFloatComplex rTmp = MAGMA_C_ZERO;
const int slda = SLDA(N);
const int sldb = SLDA(N);
magmaFloatComplex* sA = (magmaFloatComplex*)(zdata);
magmaFloatComplex* sB = (magmaFloatComplex*)(zdata + blockDim.z * slda * N);
sA += tz * slda * N;
sB += tz * sldb * N;
// read A & B
if(transA == MagmaNoTrans){
sA[ty * slda + tx] = dA[ty * ldda + tx];
}
else{
sA[tx * slda + ty] = (transA == MagmaTrans) ? dA[ty * ldda + tx] : MAGMA_C_CONJ( dA[ty * ldda + tx] );
}
if(transB == MagmaNoTrans){
sB[ty * sldb + tx] = dB[ty * lddb + tx];
}
else{
sB[tx * sldb + ty] = (transB == MagmaTrans) ? dB[ty * lddb + tx] : MAGMA_C_CONJ( dB[ty * lddb + tx] );
}
__syncthreads();
if(beta != MAGMA_C_ZERO){
rC = beta * dC[ty * lddc + tx];
}
// multiply
rTmp = MAGMA_C_ZERO;
#pragma unroll
for(int j = 0; j < N; j++){
rTmp += sA[j * slda + tx] * sB[ty * sldb + j];
}
rC += alpha * rTmp;
// write from rC
dC[ty * lddc + tx] = rC;
}
extern "C" void
magmablas_cgemm_batched_smallsq(
magma_trans_t transA, magma_trans_t transB,
magma_int_t m, magma_int_t n, magma_int_t k,
magmaFloatComplex alpha,
magmaFloatComplex const * const * dA_array, magma_int_t ai, magma_int_t aj, magma_int_t ldda,
magmaFloatComplex const * const * dB_array, magma_int_t bi, magma_int_t bj, magma_int_t lddb,
magmaFloatComplex beta,
magmaFloatComplex **dC_array, magma_int_t ci, magma_int_t cj, magma_int_t lddc,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans )
info = -1;
else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans )
info = -2;
else if ( m < 0 )
info = -3;
else if ( n < 0 )
info = -4;
else if ( k < 0 )
info = -5;
else if ( transA == MagmaNoTrans ? ldda < m : ldda < k )
info = -8;
else if ( transB == MagmaNoTrans ? lddb < k : lddb < n )
info = -10;
else if ( lddc < m )
info = -13;
if( !(m == n && n == k) ){
printf("Only square sizes are supported\n");
info = -1;
}
if( m > 32){
printf("Only square sizes of up to 32 are supported\n");
info = -1;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
}
if ( m <= 0 || n <= 0 || k <= 0 ) return;
magma_int_t ntcol = magma_get_cgemm_batched_ntcol( m );
magma_int_t shmem = ( SLDA(m)*m + SLDA(n)*n ) * sizeof(magmaFloatComplex);
shmem *= ntcol;
const int nblocks = magma_ceildiv(batchCount, ntcol);
dim3 grid(nblocks, 1, 1);
dim3 threads(m, m, ntcol);
switch(m){
case 1: cgemm_batched_smallsq_kernel< 1><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 2: cgemm_batched_smallsq_kernel< 2><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 3: cgemm_batched_smallsq_kernel< 3><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 4: cgemm_batched_smallsq_kernel< 4><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 5: cgemm_batched_smallsq_kernel< 5><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 6: cgemm_batched_smallsq_kernel< 6><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 7: cgemm_batched_smallsq_kernel< 7><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 8: cgemm_batched_smallsq_kernel< 8><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 9: cgemm_batched_smallsq_kernel< 9><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 10: cgemm_batched_smallsq_kernel<10><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 11: cgemm_batched_smallsq_kernel<11><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 12: cgemm_batched_smallsq_kernel<12><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 13: cgemm_batched_smallsq_kernel<13><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 14: cgemm_batched_smallsq_kernel<14><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 15: cgemm_batched_smallsq_kernel<15><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 16: cgemm_batched_smallsq_kernel<16><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 17: cgemm_batched_smallsq_kernel<17><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 18: cgemm_batched_smallsq_kernel<18><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 19: cgemm_batched_smallsq_kernel<19><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 20: cgemm_batched_smallsq_kernel<20><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 21: cgemm_batched_smallsq_kernel<21><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 22: cgemm_batched_smallsq_kernel<22><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 23: cgemm_batched_smallsq_kernel<23><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 24: cgemm_batched_smallsq_kernel<24><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 25: cgemm_batched_smallsq_kernel<25><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 26: cgemm_batched_smallsq_kernel<26><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 27: cgemm_batched_smallsq_kernel<27><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 28: cgemm_batched_smallsq_kernel<28><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 29: cgemm_batched_smallsq_kernel<29><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 30: cgemm_batched_smallsq_kernel<30><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 31: cgemm_batched_smallsq_kernel<31><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
case 32: cgemm_batched_smallsq_kernel<32><<<grid, threads, shmem, queue->cuda_stream()>>>(transA, transB, alpha, dA_array, ai, aj, ldda, dB_array, bi, bj, lddb, beta, dC_array, ci, cj, lddc, batchCount); break;
default:;
}
}
|
b600a8e7a7e3941eb1e7a7e857f2be0c8bc42d8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <stdio.h>
#define M 750000000
#define N M/100
int d;
__global__ void add( int *a, int *b, int *c, int i ) {
int tid = i*N+blockIdx.x; // vector index
if (tid < M)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int *a= new int[M], *b=new int[M], *c=new int[M];
int *dev_a, *dev_b, *dev_c;
float tiempo1, tiempo2;
hipEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<M; i++)
a[i] = b[i] = i+1;
hipEventCreate(&inicio1); // Se inicializan
hipEventCreate(&fin1);
hipEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, M * sizeof(int) );
hipMalloc( (void**)&dev_b, M * sizeof(int) );
hipMalloc( (void**)&dev_c, M * sizeof(int) );
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( dev_a, a, M * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, M * sizeof(int), hipMemcpyHostToDevice );
hipEventCreate(&inicio2); // Se inicializan
hipEventCreate(&fin2);
hipEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio
d = M/N;
for (int i=0; i<d; i++)
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c, i );
hipEventRecord( fin2, 0); // Se toma el tiempo final.
hipEventSynchronize( fin2 ); // Se sincroniza
hipEventElapsedTime( &tiempo2, inicio2, fin2 );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, M * sizeof(int), hipMemcpyDeviceToHost );
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
hipEventRecord( fin1, 0); // Se toma el tiempo final.
hipEventSynchronize( fin1 ); // Se sincroniza
hipEventElapsedTime( &tiempo1, inicio1, fin1 );
// display the results
//for (int i=0; i<M; i++)
//printf( "%d + %d = %d\n", a[i], b[i], c[i] );
free(a);
free(b);
free(c);
printf("%f\t%f\tms\n", tiempo2, tiempo1);
//printf("Tiempo total %f ms\n", tiempo1);
return 0;
}
| b600a8e7a7e3941eb1e7a7e857f2be0c8bc42d8b.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <stdio.h>
#define M 750000000
#define N M/100
int d;
__global__ void add( int *a, int *b, int *c, int i ) {
int tid = i*N+blockIdx.x; // vector index
if (tid < M)
c[tid] = a[tid] + b[tid];
}
int main( void ) {
int *a= new int[M], *b=new int[M], *c=new int[M];
int *dev_a, *dev_b, *dev_c;
float tiempo1, tiempo2;
cudaEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<M; i++)
a[i] = b[i] = i+1;
cudaEventCreate(&inicio1); // Se inicializan
cudaEventCreate(&fin1);
cudaEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, M * sizeof(int) );
cudaMalloc( (void**)&dev_b, M * sizeof(int) );
cudaMalloc( (void**)&dev_c, M * sizeof(int) );
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, M * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, M * sizeof(int), cudaMemcpyHostToDevice );
cudaEventCreate(&inicio2); // Se inicializan
cudaEventCreate(&fin2);
cudaEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio
d = M/N;
for (int i=0; i<d; i++)
add<<<N,1>>>( dev_a, dev_b, dev_c, i );
cudaEventRecord( fin2, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin2 ); // Se sincroniza
cudaEventElapsedTime( &tiempo2, inicio2, fin2 );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, M * sizeof(int), cudaMemcpyDeviceToHost );
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
cudaEventRecord( fin1, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin1 ); // Se sincroniza
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
// display the results
//for (int i=0; i<M; i++)
//printf( "%d + %d = %d\n", a[i], b[i], c[i] );
free(a);
free(b);
free(c);
printf("%f\t%f\tms\n", tiempo2, tiempo1);
//printf("Tiempo total %f ms\n", tiempo1);
return 0;
}
|
cea632b2e97c8a59e1f3e1bb2264e4a307a03df7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
int main() {
int nDevices;
int numSMs;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, i);
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf("Device SM count: %d\n", numSMs);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
| cea632b2e97c8a59e1f3e1bb2264e4a307a03df7.cu | #include <iostream>
#include <math.h>
int main() {
int nDevices;
int numSMs;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, i);
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf("Device SM count: %d\n", numSMs);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
|
cadff6b91f4d80391f17721da74c9856acbe9c3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, hipStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment_)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment_),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment_, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment_)
{
NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->currentSize += size;
this->_maxSize = ::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment_)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment_)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->_maxSize = ::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(hipFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(hipHostFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[each*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[each*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
hipStream_t cuStream)
{
(void)cuStream;
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
hipLaunchKernelGGL(( drawRects<T>), dim3(grid), dim3(block), 0, 0, d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
hipStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
| cadff6b91f4d80391f17721da74c9856acbe9c3e.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <string>
#include <vector>
#include "NCV.hpp"
using namespace std;
//==============================================================================
//
// Error handling helpers
//
//==============================================================================
static void stdDebugOutput(const string &msg)
{
cout << msg;
}
static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput;
void ncvDebugOutput(const string &msg)
{
debugOutputHandler(msg);
}
void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func)
{
debugOutputHandler = func;
}
//==============================================================================
//
// Memory wrappers and helpers
//
//==============================================================================
Ncv32u alignUp(Ncv32u what, Ncv32u alignment)
{
Ncv32u alignMask = alignment-1;
Ncv32u inverseAlignMask = ~alignMask;
Ncv32u res = (what + alignMask) & inverseAlignMask;
return res;
}
void NCVMemPtr::clear()
{
ptr = NULL;
memtype = NCVMemoryTypeNone;
}
void NCVMemSegment::clear()
{
begin.clear();
size = 0;
}
NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, size_t sz, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
memcpy(dst, src, sz);
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType,
const void *src, Ncv32u srcPitch, NCVMemoryType srcType,
Ncv32u widthbytes, Ncv32u height, cudaStream_t cuStream)
{
NCVStatus ncvStat;
switch (dstType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
for (Ncv32u i=0; i<height; i++)
{
memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
case NCVMemoryTypeDevice:
switch (srcType)
{
case NCVMemoryTypeHostPageable:
case NCVMemoryTypeHostPinned:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
case NCVMemoryTypeDevice:
if (cuStream != 0)
{
ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR);
}
else
{
ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR);
}
ncvStat = NCV_SUCCESS;
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
break;
default:
ncvStat = NCV_MEM_RESIDENCE_ERROR;
}
return ncvStat;
}
//===================================================================
//
// NCVMemStackAllocator class members implementation
//
//===================================================================
NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment_)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
begin(NULL),
end(NULL),
_memType(NCVMemoryTypeNone),
_alignment(alignment_),
bReusesMemory(false)
{
NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2");
}
NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, size_t capacity, Ncv32u alignment_, void *reusePtr)
:
currentSize(0),
_maxSize(0),
allocBegin(NULL),
_memType(memT),
_alignment(alignment_)
{
NcvBool bProperAlignment = (alignment_ & (alignment_ - 1)) == 0;
ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2");
ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type");
allocBegin = NULL;
if (reusePtr == NULL && capacity != 0)
{
bReusesMemory = false;
switch (memT)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&allocBegin, capacity), );
break;
case NCVMemoryTypeHostPageable:
allocBegin = (Ncv8u *)malloc(capacity);
break;
default:;
}
}
else
{
bReusesMemory = true;
allocBegin = (Ncv8u *)reusePtr;
}
if (capacity == 0)
{
allocBegin = (Ncv8u *)(0x1);
}
if (!isCounting())
{
begin = allocBegin;
end = begin + capacity;
}
}
NCVMemStackAllocator::~NCVMemStackAllocator()
{
if (allocBegin != NULL)
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction");
if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1)))
{
switch (_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(allocBegin), );
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(allocBegin), );
break;
case NCVMemoryTypeHostPageable:
free(allocBegin);
break;
default:;
}
}
allocBegin = NULL;
}
}
NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
size = alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->currentSize += size;
this->_maxSize = std::max(this->_maxSize, this->currentSize);
if (!isCounting())
{
size_t availSize = end - begin;
ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY);
}
seg.begin.ptr = begin;
seg.begin.memtype = this->_memType;
seg.size = size;
begin += size;
return NCV_SUCCESS;
}
NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER);
currentSize -= seg.size;
begin -= seg.size;
seg.clear();
ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC);
return NCV_SUCCESS;
}
NcvBool NCVMemStackAllocator::isInitialized(void) const
{
return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL;
}
NcvBool NCVMemStackAllocator::isCounting(void) const
{
return this->_memType == NCVMemoryTypeNone;
}
NCVMemoryType NCVMemStackAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemStackAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemStackAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// NCVMemNativeAllocator class members implementation
//
//===================================================================
NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment_)
:
currentSize(0),
_maxSize(0),
_memType(memT),
_alignment(alignment_)
{
ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", );
}
NCVMemNativeAllocator::~NCVMemNativeAllocator()
{
ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak");
}
NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, size_t size)
{
seg.clear();
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaMallocHost(&seg.begin.ptr, size), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
seg.begin.ptr = (Ncv8u *)malloc(size);
break;
default:;
}
this->currentSize += alignUp(static_cast<Ncv32u>(size), this->_alignment);
this->_maxSize = std::max(this->_maxSize, this->currentSize);
seg.begin.memtype = this->_memType;
seg.size = size;
return NCV_SUCCESS;
}
NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg)
{
ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC);
ncvAssertReturn(currentSize >= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC);
currentSize -= alignUp(static_cast<Ncv32u>(seg.size), this->_alignment);
switch (this->_memType)
{
case NCVMemoryTypeDevice:
ncvAssertCUDAReturn(cudaFree(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPinned:
ncvAssertCUDAReturn(cudaFreeHost(seg.begin.ptr), NCV_CUDA_ERROR);
break;
case NCVMemoryTypeHostPageable:
free(seg.begin.ptr);
break;
default:;
}
seg.clear();
return NCV_SUCCESS;
}
NcvBool NCVMemNativeAllocator::isInitialized(void) const
{
return (this->_alignment != 0);
}
NcvBool NCVMemNativeAllocator::isCounting(void) const
{
return false;
}
NCVMemoryType NCVMemNativeAllocator::memType(void) const
{
return this->_memType;
}
Ncv32u NCVMemNativeAllocator::alignment(void) const
{
return this->_alignment;
}
size_t NCVMemNativeAllocator::maxSize(void) const
{
return this->_maxSize;
}
//===================================================================
//
// Time and timer routines
//
//===================================================================
typedef struct _NcvTimeMoment NcvTimeMoment;
#if defined(_WIN32) || defined(_WIN64)
#include <Windows.h>
typedef struct _NcvTimeMoment
{
LONGLONG moment, freq;
} NcvTimeMoment;
static void _ncvQueryMoment(NcvTimeMoment *t)
{
QueryPerformanceFrequency((LARGE_INTEGER *)&(t->freq));
QueryPerformanceCounter((LARGE_INTEGER *)&(t->moment));
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->moment / t->freq;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return 1000.0 * 2 * ((t2->moment) - (t1->moment)) / (t1->freq + t2->freq);
}
#elif defined(__GNUC__)
#include <sys/time.h>
typedef struct _NcvTimeMoment
{
struct timeval tv;
struct timezone tz;
} NcvTimeMoment;
void _ncvQueryMoment(NcvTimeMoment *t)
{
gettimeofday(& t->tv, & t->tz);
}
double _ncvMomentToMicroseconds(NcvTimeMoment *t)
{
return 1000000.0 * t->tv.tv_sec + (double)t->tv.tv_usec;
}
double _ncvMomentsDiffToMicroseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return (((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000000 + (double)t2->tv.tv_usec - (double)t1->tv.tv_usec);
}
double _ncvMomentsDiffToMilliseconds(NcvTimeMoment *t1, NcvTimeMoment *t2)
{
return ((double)t2->tv.tv_sec - (double)t1->tv.tv_sec) * 1000;
}
#endif //#if defined(_WIN32) || defined(_WIN64)
struct _NcvTimer
{
NcvTimeMoment t1, t2;
};
NcvTimer ncvStartTimer(void)
{
struct _NcvTimer *t;
t = (struct _NcvTimer *)malloc(sizeof(struct _NcvTimer));
_ncvQueryMoment(&t->t1);
return t;
}
double ncvEndQueryTimerUs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMicroseconds(&t->t1, &t->t2);
free(t);
return res;
}
double ncvEndQueryTimerMs(NcvTimer t)
{
double res;
_ncvQueryMoment(&t->t2);
res = _ncvMomentsDiffToMilliseconds(&t->t1, &t->t2);
free(t);
return res;
}
//===================================================================
//
// Operations with rectangles
//
//===================================================================
//from OpenCV
void groupRectangles(std::vector<NcvRect32u> &hypotheses, int groupThreshold, double eps, std::vector<Ncv32u> *weights);
NCVStatus ncvGroupRectangles_host(NCVVector<NcvRect32u> &hypotheses,
Ncv32u &numHypotheses,
Ncv32u minNeighbors,
Ncv32f intersectEps,
NCVVector<Ncv32u> *hypothesesWeights)
{
ncvAssertReturn(hypotheses.memType() == NCVMemoryTypeHostPageable ||
hypotheses.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
if (hypothesesWeights != NULL)
{
ncvAssertReturn(hypothesesWeights->memType() == NCVMemoryTypeHostPageable ||
hypothesesWeights->memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR);
}
if (numHypotheses == 0)
{
return NCV_SUCCESS;
}
std::vector<NcvRect32u> rects(numHypotheses);
memcpy(&rects[0], hypotheses.ptr(), numHypotheses * sizeof(NcvRect32u));
std::vector<Ncv32u> weights;
if (hypothesesWeights != NULL)
{
groupRectangles(rects, minNeighbors, intersectEps, &weights);
}
else
{
groupRectangles(rects, minNeighbors, intersectEps, NULL);
}
numHypotheses = (Ncv32u)rects.size();
if (numHypotheses > 0)
{
memcpy(hypotheses.ptr(), &rects[0], numHypotheses * sizeof(NcvRect32u));
}
if (hypothesesWeights != NULL)
{
memcpy(hypothesesWeights->ptr(), &weights[0], numHypotheses * sizeof(Ncv32u));
}
return NCV_SUCCESS;
}
template <class T>
static NCVStatus drawRectsWrapperHost(T *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
T color)
{
ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects != 0, NCV_SUCCESS);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
for (Ncv32u i=0; i<numRects; i++)
{
NcvRect32u rect = h_rects[i];
if (rect.x < dstWidth)
{
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[each*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[each*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[rect.y*dstStride+j] = color;
}
}
if (rect.y + rect.height - 1 < dstHeight)
{
for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++)
{
h_dst[(rect.y+rect.height-1)*dstStride+j] = color;
}
}
}
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv8u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *h_rects,
Ncv32u numRects,
Ncv32u color)
{
return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color);
}
const Ncv32u NUMTHREADS_DRAWRECTS = 32;
const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5;
template <class T>
__global__ void drawRects(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color)
{
Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x;
if (blockId > numRects * 4)
{
return;
}
NcvRect32u curRect = d_rects[blockId >> 2];
NcvBool bVertical = blockId & 0x1;
NcvBool bTopLeft = blockId & 0x2;
Ncv32u pt0x, pt0y;
if (bVertical)
{
Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1;
pt0y = curRect.y;
if (pt0x < dstWidth)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptY < pt0y + curRect.height && ptY < dstHeight)
{
d_dst[ptY * dstStride + pt0x] = color;
}
}
}
}
else
{
Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2;
pt0x = curRect.x;
pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1;
if (pt0y < dstHeight)
{
for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++)
{
Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x;
if (ptX < pt0x + curRect.width && ptX < dstWidth)
{
d_dst[pt0y * dstStride + ptX] = color;
}
}
}
}
}
template <class T>
static NCVStatus drawRectsWrapperDevice(T *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
T color,
cudaStream_t cuStream)
{
(void)cuStream;
ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR);
ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID);
ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP);
ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID);
if (numRects == 0)
{
return NCV_SUCCESS;
}
dim3 grid(numRects * 4);
dim3 block(NUMTHREADS_DRAWRECTS);
if (grid.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
}
drawRects<T><<<grid, block>>>(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color);
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
return NCV_SUCCESS;
}
NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv8u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst,
Ncv32u dstStride,
Ncv32u dstWidth,
Ncv32u dstHeight,
NcvRect32u *d_rects,
Ncv32u numRects,
Ncv32u color,
cudaStream_t cuStream)
{
return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream);
}
|
63a519e8b00571beb008f0b3b0befa7ff3bb5a5c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
#include <algorithm>
#include <chrono>
#include <hip/hip_runtime.h>
#define NUM_THREADS 128
#define NUM_BLOCKS 256
// interpolation
__host__ __device__
float interp(const int3 d, const unsigned char f[], float x, float y, float z)
{
int ix, iy, iz;
float dx1, dy1, dz1, dx2, dy2, dz2;
int k111,k112,k121,k122,k211,k212,k221,k222;
float vf;
const unsigned char *ff;
ix = floorf(x); dx1=x-ix; dx2=1.f-dx1;
iy = floorf(y); dy1=y-iy; dy2=1.f-dy1;
iz = floorf(z); dz1=z-iz; dz2=1.f-dz1;
ff = f + ix-1+d.x*(iy-1+d.y*(iz-1));
k222 = ff[ 0]; k122 = ff[ 1];
k212 = ff[d.x]; k112 = ff[d.x+1];
ff += d.x*d.y;
k221 = ff[ 0]; k121 = ff[ 1];
k211 = ff[d.x]; k111 = ff[d.x+1];
vf = (((k222*dx2+k122*dx1)*dy2 + (k212*dx2+k112*dx1)*dy1))*dz2 +
(((k221*dx2+k121*dx1)*dy2 + (k211*dx2+k111*dx1)*dy1))*dz1;
return(vf);
}
__global__ void spm (
const float *__restrict__ M,
const int data_size,
const unsigned char *__restrict__ g_d,
const unsigned char *__restrict__ f_d,
const int3 dg,
const int3 df,
unsigned char *__restrict__ ivf_d,
unsigned char *__restrict__ ivg_d,
bool *__restrict__ data_threshold_d)
{
// 97 random values
const float ran[] = {
0.656619,0.891183,0.488144,0.992646,0.373326,0.531378,0.181316,0.501944,0.422195,
0.660427,0.673653,0.95733,0.191866,0.111216,0.565054,0.969166,0.0237439,0.870216,
0.0268766,0.519529,0.192291,0.715689,0.250673,0.933865,0.137189,0.521622,0.895202,
0.942387,0.335083,0.437364,0.471156,0.14931,0.135864,0.532498,0.725789,0.398703,
0.358419,0.285279,0.868635,0.626413,0.241172,0.978082,0.640501,0.229849,0.681335,
0.665823,0.134718,0.0224933,0.262199,0.116515,0.0693182,0.85293,0.180331,0.0324186,
0.733926,0.536517,0.27603,0.368458,0.0128863,0.889206,0.866021,0.254247,0.569481,
0.159265,0.594364,0.3311,0.658613,0.863634,0.567623,0.980481,0.791832,0.152594,
0.833027,0.191863,0.638987,0.669,0.772088,0.379818,0.441585,0.48306,0.608106,
0.175996,0.00202556,0.790224,0.513609,0.213229,0.10345,0.157337,0.407515,0.407757,
0.0526927,0.941815,0.149972,0.384374,0.311059,0.168534,0.896648};
const int idx = blockIdx.x * NUM_THREADS + threadIdx.x;
int x_datasize=(dg.x-2);
int y_datasize=(dg.y-2);
for(int i = idx; i < data_size; i += NUM_THREADS*NUM_BLOCKS)
{
float xx_temp = (i%x_datasize)+1.f;
float yy_temp = ((int)floorf((float)i/x_datasize)%y_datasize)+1.f;
float zz_temp = (floorf((float)i/x_datasize))/y_datasize+1.f;
// generate rx,ry,rz coordinates
float rx = xx_temp + ran[i%97];
float ry = yy_temp + ran[i%97];
float rz = zz_temp + ran[i%97];
// rigid transformation over rx,ry,rz coordinates
float xp = M[0]*rx + M[4]*ry + M[ 8]*rz + M[12];
float yp = M[1]*rx + M[5]*ry + M[ 9]*rz+ M[13];
float zp = M[2]*rx + M[6]*ry + M[10]*rz+ M[14];
if (zp>=1.f && zp<df.z && yp>=1.f && yp<df.y && xp>=1.f && xp<df.x)
{
// interpolation
ivf_d[i] = floorf(interp(df, f_d, xp,yp,zp)+0.5f);
ivg_d[i] = floorf(interp(dg, g_d, rx,ry,rz)+0.5f);
data_threshold_d[i] = true;
}
else
{
ivf_d[i] = 0;
ivg_d[i] = 0;
data_threshold_d[i] = false;
}
}
}
void spm_reference (
const float *M,
const int data_size,
const unsigned char *g_d,
const unsigned char *f_d,
const int3 dg,
const int3 df,
unsigned char *ivf_d,
unsigned char *ivg_d,
bool *data_threshold_d)
{
// 97 random values
const float ran[] = {
0.656619,0.891183,0.488144,0.992646,0.373326,0.531378,0.181316,0.501944,0.422195,
0.660427,0.673653,0.95733,0.191866,0.111216,0.565054,0.969166,0.0237439,0.870216,
0.0268766,0.519529,0.192291,0.715689,0.250673,0.933865,0.137189,0.521622,0.895202,
0.942387,0.335083,0.437364,0.471156,0.14931,0.135864,0.532498,0.725789,0.398703,
0.358419,0.285279,0.868635,0.626413,0.241172,0.978082,0.640501,0.229849,0.681335,
0.665823,0.134718,0.0224933,0.262199,0.116515,0.0693182,0.85293,0.180331,0.0324186,
0.733926,0.536517,0.27603,0.368458,0.0128863,0.889206,0.866021,0.254247,0.569481,
0.159265,0.594364,0.3311,0.658613,0.863634,0.567623,0.980481,0.791832,0.152594,
0.833027,0.191863,0.638987,0.669,0.772088,0.379818,0.441585,0.48306,0.608106,
0.175996,0.00202556,0.790224,0.513609,0.213229,0.10345,0.157337,0.407515,0.407757,
0.0526927,0.941815,0.149972,0.384374,0.311059,0.168534,0.896648};
int x_datasize=(dg.x-2);
int y_datasize=(dg.y-2);
for(int i = 0; i < data_size; i++)
{
float xx_temp = (i%x_datasize)+1.f;
float yy_temp = ((int)floorf((float)i/x_datasize)%y_datasize)+1.f;
float zz_temp = (floorf((float)i/x_datasize))/y_datasize+1.f;
// generate rx,ry,rz coordinates
float rx = xx_temp + ran[i%97];
float ry = yy_temp + ran[i%97];
float rz = zz_temp + ran[i%97];
// rigid transformation over rx,ry,rz coordinates
float xp = M[0]*rx + M[4]*ry + M[ 8]*rz + M[12];
float yp = M[1]*rx + M[5]*ry + M[ 9]*rz+ M[13];
float zp = M[2]*rx + M[6]*ry + M[10]*rz+ M[14];
if (zp>=1.f && zp<df.z && yp>=1.f && yp<df.y && xp>=1.f && xp<df.x)
{
// interpolation
ivf_d[i] = floorf(interp(df, f_d, xp,yp,zp)+0.5f);
ivg_d[i] = floorf(interp(dg, g_d, rx,ry,rz)+0.5f);
data_threshold_d[i] = true;
}
else
{
ivf_d[i] = 0;
ivg_d[i] = 0;
data_threshold_d[i] = false;
}
}
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <dimension> <repeat>\n", argv[0]);
return 1;
}
int v = atoi(argv[1]);
int repeat = atoi(argv[2]);
int3 g_vol = {v,v,v};
int3 f_vol = {v,v,v};
const int data_size = (g_vol.x+1) * (g_vol.y+1) * (g_vol.z+5);
const int vol_size = g_vol.x * g_vol.y * g_vol.z;
int *hist_d = (int*) malloc (65536*sizeof(int));
int *hist_h = (int*) malloc (65536*sizeof(int));
memset(hist_d, 0, sizeof(int)*65536);
memset(hist_h, 0, sizeof(int)*65536);
unsigned char *ivf_h = (unsigned char *)malloc(vol_size*sizeof(unsigned char));
unsigned char *ivg_h = (unsigned char *)malloc(vol_size*sizeof(unsigned char));
bool *data_threshold_h = (bool *)malloc(vol_size*sizeof(bool));
srand(123);
float M_h[16];
for (int i = 0; i < 16; i++) M_h[i] = (float)rand() / (float)RAND_MAX;
float *M_d;
hipMalloc((void**)&M_d,16*sizeof(float));
hipMemcpy(M_d,M_h,16*sizeof(float),hipMemcpyHostToDevice);
unsigned char* g_h = (unsigned char*) malloc (data_size * sizeof(unsigned char));
unsigned char* f_h = (unsigned char*) malloc (data_size * sizeof(unsigned char));
for (int i = 0; i < data_size; i++) {
g_h[i] = rand() % 256;
f_h[i] = rand() % 256;
}
unsigned char *g_d, *f_d;
hipMalloc((void**)&g_d, data_size * sizeof(unsigned char));
hipMalloc((void**)&f_d, data_size * sizeof(unsigned char));
hipMemcpy(g_d, g_h, data_size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(f_d, f_h, data_size*sizeof(unsigned char), hipMemcpyHostToDevice);
unsigned char *ivf_d, *ivg_d;
hipMalloc((void**)&ivf_d,vol_size*sizeof(unsigned char));
hipMalloc((void**)&ivg_d,vol_size*sizeof(unsigned char));
bool *data_threshold_d;
hipMalloc((void**)&data_threshold_d,vol_size*sizeof(bool));
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(spm, NUM_BLOCKS, NUM_THREADS, 0, 0, M_d, vol_size, g_d, f_d, g_vol, f_vol,
ivf_d,ivg_d,data_threshold_d);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (ms)\n", (time * 1e-6f) / repeat);
hipMemcpy(ivf_h,ivf_d,vol_size*sizeof(unsigned char),hipMemcpyDeviceToHost);
hipMemcpy(ivg_h,ivg_d,vol_size*sizeof(unsigned char),hipMemcpyDeviceToHost);
hipMemcpy(data_threshold_h,data_threshold_d,vol_size*sizeof(bool),hipMemcpyDeviceToHost);
int count = 0;
for(int i = 0; i < vol_size; i++)
{
if (data_threshold_h[i]) {
hist_d[ivf_h[i]+ivg_h[i]*256] += 1;
count++;
}
}
printf("Device count: %d\n", count);
count = 0;
spm_reference(M_h, vol_size, g_h, f_h, g_vol, f_vol, ivf_h, ivg_h, data_threshold_h);
for(int i = 0; i < vol_size; i++)
{
if (data_threshold_h[i]) {
hist_h[ivf_h[i]+ivg_h[i]*256] += 1;
count++;
}
}
printf("Host count: %d\n", count);
int max_diff = 0;
for(int i = 0; i < 65536; i++) {
if (hist_h[i] != hist_d[i]) {
max_diff = ::max(max_diff, abs(hist_h[i] - hist_d[i]));
}
}
printf("Maximum difference %d\n", max_diff);
free(hist_h);
free(hist_d);
free(ivf_h);
free(ivg_h);
free(g_h);
free(f_h);
free(data_threshold_h);
hipFree(M_d);
hipFree(g_d);
hipFree(f_d);
hipFree(ivf_d);
hipFree(ivg_d);
hipFree(data_threshold_d);
return 0;
}
| 63a519e8b00571beb008f0b3b0befa7ff3bb5a5c.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
#include <algorithm>
#include <chrono>
#include <hip/hip_runtime.h>
#define NUM_THREADS 128
#define NUM_BLOCKS 256
// interpolation
__host__ __device__
float interp(const int3 d, const unsigned char f[], float x, float y, float z)
{
int ix, iy, iz;
float dx1, dy1, dz1, dx2, dy2, dz2;
int k111,k112,k121,k122,k211,k212,k221,k222;
float vf;
const unsigned char *ff;
ix = floorf(x); dx1=x-ix; dx2=1.f-dx1;
iy = floorf(y); dy1=y-iy; dy2=1.f-dy1;
iz = floorf(z); dz1=z-iz; dz2=1.f-dz1;
ff = f + ix-1+d.x*(iy-1+d.y*(iz-1));
k222 = ff[ 0]; k122 = ff[ 1];
k212 = ff[d.x]; k112 = ff[d.x+1];
ff += d.x*d.y;
k221 = ff[ 0]; k121 = ff[ 1];
k211 = ff[d.x]; k111 = ff[d.x+1];
vf = (((k222*dx2+k122*dx1)*dy2 + (k212*dx2+k112*dx1)*dy1))*dz2 +
(((k221*dx2+k121*dx1)*dy2 + (k211*dx2+k111*dx1)*dy1))*dz1;
return(vf);
}
__global__ void spm (
const float *__restrict__ M,
const int data_size,
const unsigned char *__restrict__ g_d,
const unsigned char *__restrict__ f_d,
const int3 dg,
const int3 df,
unsigned char *__restrict__ ivf_d,
unsigned char *__restrict__ ivg_d,
bool *__restrict__ data_threshold_d)
{
// 97 random values
const float ran[] = {
0.656619,0.891183,0.488144,0.992646,0.373326,0.531378,0.181316,0.501944,0.422195,
0.660427,0.673653,0.95733,0.191866,0.111216,0.565054,0.969166,0.0237439,0.870216,
0.0268766,0.519529,0.192291,0.715689,0.250673,0.933865,0.137189,0.521622,0.895202,
0.942387,0.335083,0.437364,0.471156,0.14931,0.135864,0.532498,0.725789,0.398703,
0.358419,0.285279,0.868635,0.626413,0.241172,0.978082,0.640501,0.229849,0.681335,
0.665823,0.134718,0.0224933,0.262199,0.116515,0.0693182,0.85293,0.180331,0.0324186,
0.733926,0.536517,0.27603,0.368458,0.0128863,0.889206,0.866021,0.254247,0.569481,
0.159265,0.594364,0.3311,0.658613,0.863634,0.567623,0.980481,0.791832,0.152594,
0.833027,0.191863,0.638987,0.669,0.772088,0.379818,0.441585,0.48306,0.608106,
0.175996,0.00202556,0.790224,0.513609,0.213229,0.10345,0.157337,0.407515,0.407757,
0.0526927,0.941815,0.149972,0.384374,0.311059,0.168534,0.896648};
const int idx = blockIdx.x * NUM_THREADS + threadIdx.x;
int x_datasize=(dg.x-2);
int y_datasize=(dg.y-2);
for(int i = idx; i < data_size; i += NUM_THREADS*NUM_BLOCKS)
{
float xx_temp = (i%x_datasize)+1.f;
float yy_temp = ((int)floorf((float)i/x_datasize)%y_datasize)+1.f;
float zz_temp = (floorf((float)i/x_datasize))/y_datasize+1.f;
// generate rx,ry,rz coordinates
float rx = xx_temp + ran[i%97];
float ry = yy_temp + ran[i%97];
float rz = zz_temp + ran[i%97];
// rigid transformation over rx,ry,rz coordinates
float xp = M[0]*rx + M[4]*ry + M[ 8]*rz + M[12];
float yp = M[1]*rx + M[5]*ry + M[ 9]*rz+ M[13];
float zp = M[2]*rx + M[6]*ry + M[10]*rz+ M[14];
if (zp>=1.f && zp<df.z && yp>=1.f && yp<df.y && xp>=1.f && xp<df.x)
{
// interpolation
ivf_d[i] = floorf(interp(df, f_d, xp,yp,zp)+0.5f);
ivg_d[i] = floorf(interp(dg, g_d, rx,ry,rz)+0.5f);
data_threshold_d[i] = true;
}
else
{
ivf_d[i] = 0;
ivg_d[i] = 0;
data_threshold_d[i] = false;
}
}
}
void spm_reference (
const float *M,
const int data_size,
const unsigned char *g_d,
const unsigned char *f_d,
const int3 dg,
const int3 df,
unsigned char *ivf_d,
unsigned char *ivg_d,
bool *data_threshold_d)
{
// 97 random values
const float ran[] = {
0.656619,0.891183,0.488144,0.992646,0.373326,0.531378,0.181316,0.501944,0.422195,
0.660427,0.673653,0.95733,0.191866,0.111216,0.565054,0.969166,0.0237439,0.870216,
0.0268766,0.519529,0.192291,0.715689,0.250673,0.933865,0.137189,0.521622,0.895202,
0.942387,0.335083,0.437364,0.471156,0.14931,0.135864,0.532498,0.725789,0.398703,
0.358419,0.285279,0.868635,0.626413,0.241172,0.978082,0.640501,0.229849,0.681335,
0.665823,0.134718,0.0224933,0.262199,0.116515,0.0693182,0.85293,0.180331,0.0324186,
0.733926,0.536517,0.27603,0.368458,0.0128863,0.889206,0.866021,0.254247,0.569481,
0.159265,0.594364,0.3311,0.658613,0.863634,0.567623,0.980481,0.791832,0.152594,
0.833027,0.191863,0.638987,0.669,0.772088,0.379818,0.441585,0.48306,0.608106,
0.175996,0.00202556,0.790224,0.513609,0.213229,0.10345,0.157337,0.407515,0.407757,
0.0526927,0.941815,0.149972,0.384374,0.311059,0.168534,0.896648};
int x_datasize=(dg.x-2);
int y_datasize=(dg.y-2);
for(int i = 0; i < data_size; i++)
{
float xx_temp = (i%x_datasize)+1.f;
float yy_temp = ((int)floorf((float)i/x_datasize)%y_datasize)+1.f;
float zz_temp = (floorf((float)i/x_datasize))/y_datasize+1.f;
// generate rx,ry,rz coordinates
float rx = xx_temp + ran[i%97];
float ry = yy_temp + ran[i%97];
float rz = zz_temp + ran[i%97];
// rigid transformation over rx,ry,rz coordinates
float xp = M[0]*rx + M[4]*ry + M[ 8]*rz + M[12];
float yp = M[1]*rx + M[5]*ry + M[ 9]*rz+ M[13];
float zp = M[2]*rx + M[6]*ry + M[10]*rz+ M[14];
if (zp>=1.f && zp<df.z && yp>=1.f && yp<df.y && xp>=1.f && xp<df.x)
{
// interpolation
ivf_d[i] = floorf(interp(df, f_d, xp,yp,zp)+0.5f);
ivg_d[i] = floorf(interp(dg, g_d, rx,ry,rz)+0.5f);
data_threshold_d[i] = true;
}
else
{
ivf_d[i] = 0;
ivg_d[i] = 0;
data_threshold_d[i] = false;
}
}
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <dimension> <repeat>\n", argv[0]);
return 1;
}
int v = atoi(argv[1]);
int repeat = atoi(argv[2]);
int3 g_vol = {v,v,v};
int3 f_vol = {v,v,v};
const int data_size = (g_vol.x+1) * (g_vol.y+1) * (g_vol.z+5);
const int vol_size = g_vol.x * g_vol.y * g_vol.z;
int *hist_d = (int*) malloc (65536*sizeof(int));
int *hist_h = (int*) malloc (65536*sizeof(int));
memset(hist_d, 0, sizeof(int)*65536);
memset(hist_h, 0, sizeof(int)*65536);
unsigned char *ivf_h = (unsigned char *)malloc(vol_size*sizeof(unsigned char));
unsigned char *ivg_h = (unsigned char *)malloc(vol_size*sizeof(unsigned char));
bool *data_threshold_h = (bool *)malloc(vol_size*sizeof(bool));
srand(123);
float M_h[16];
for (int i = 0; i < 16; i++) M_h[i] = (float)rand() / (float)RAND_MAX;
float *M_d;
hipMalloc((void**)&M_d,16*sizeof(float));
hipMemcpy(M_d,M_h,16*sizeof(float),hipMemcpyHostToDevice);
unsigned char* g_h = (unsigned char*) malloc (data_size * sizeof(unsigned char));
unsigned char* f_h = (unsigned char*) malloc (data_size * sizeof(unsigned char));
for (int i = 0; i < data_size; i++) {
g_h[i] = rand() % 256;
f_h[i] = rand() % 256;
}
unsigned char *g_d, *f_d;
hipMalloc((void**)&g_d, data_size * sizeof(unsigned char));
hipMalloc((void**)&f_d, data_size * sizeof(unsigned char));
hipMemcpy(g_d, g_h, data_size*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(f_d, f_h, data_size*sizeof(unsigned char), hipMemcpyHostToDevice);
unsigned char *ivf_d, *ivg_d;
hipMalloc((void**)&ivf_d,vol_size*sizeof(unsigned char));
hipMalloc((void**)&ivg_d,vol_size*sizeof(unsigned char));
bool *data_threshold_d;
hipMalloc((void**)&data_threshold_d,vol_size*sizeof(bool));
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(spm, NUM_BLOCKS, NUM_THREADS, 0, 0, M_d, vol_size, g_d, f_d, g_vol, f_vol,
ivf_d,ivg_d,data_threshold_d);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (ms)\n", (time * 1e-6f) / repeat);
hipMemcpy(ivf_h,ivf_d,vol_size*sizeof(unsigned char),hipMemcpyDeviceToHost);
hipMemcpy(ivg_h,ivg_d,vol_size*sizeof(unsigned char),hipMemcpyDeviceToHost);
hipMemcpy(data_threshold_h,data_threshold_d,vol_size*sizeof(bool),hipMemcpyDeviceToHost);
int count = 0;
for(int i = 0; i < vol_size; i++)
{
if (data_threshold_h[i]) {
hist_d[ivf_h[i]+ivg_h[i]*256] += 1;
count++;
}
}
printf("Device count: %d\n", count);
count = 0;
spm_reference(M_h, vol_size, g_h, f_h, g_vol, f_vol, ivf_h, ivg_h, data_threshold_h);
for(int i = 0; i < vol_size; i++)
{
if (data_threshold_h[i]) {
hist_h[ivf_h[i]+ivg_h[i]*256] += 1;
count++;
}
}
printf("Host count: %d\n", count);
int max_diff = 0;
for(int i = 0; i < 65536; i++) {
if (hist_h[i] != hist_d[i]) {
max_diff = std::max(max_diff, abs(hist_h[i] - hist_d[i]));
}
}
printf("Maximum difference %d\n", max_diff);
free(hist_h);
free(hist_d);
free(ivf_h);
free(ivg_h);
free(g_h);
free(f_h);
free(data_threshold_h);
hipFree(M_d);
hipFree(g_d);
hipFree(f_d);
hipFree(ivf_d);
hipFree(ivg_d);
hipFree(data_threshold_d);
return 0;
}
|
2bcb761f79068f8083ffe0bcc8e9bc2915c6c69b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// random read but coalesced
#include<stdio.h>
#include<cstdlib>
__global__ void race(volatile int *arr, size_t size, int n, int *out) {
int gid = threadIdx.x + blockDim.x * blockIdx.x;
int ptr = gid;
if (ptr > size) ptr = 0;
for (int i = 0; i < n; i++) {
ptr = arr[ptr];
}
out[gid] = ptr;
}
int main(int argc, char *argv[]) {
int grid_size=10, block_size=128, n=100, step=100;
int warp_size = 32;
if (argc > 4) {
sscanf(argv[1], "%d", &grid_size);
sscanf(argv[2], "%d", &block_size);
sscanf(argv[3], "%d", &n);
sscanf(argv[4], "%d", &step);
}
// make sure block size is multiple of warp size
block_size -= block_size % warp_size;
n -= n % warp_size;
size_t size = n;
size_t total_size = size * sizeof(int);
printf("size = %zd KB\n", total_size / 1024);
int *arr = new int[size];
int *ra = new int[size];
int *out = new int[grid_size * block_size];
{
// create random permutation
for (int i = 0; i < n/warp_size; i++) {
ra[i] = i;
}
for (int i = 1; i < n/warp_size; i++) {
int r = rand()%(i+1);
int tmp = ra[i];
ra[i] = ra[r];
ra[r] = tmp;
}
// create "coalesced" random cycle
for (int i = 1; i < n/warp_size; i++) {
for (int j = 0; j < warp_size; j++) {
arr[ra[i-1]*warp_size + j] = ra[i]*warp_size + j;
}
}
for (int j = 0; j < warp_size; j++) {
arr[ra[n/warp_size-1]*warp_size + j] = ra[0]*warp_size + j;
}
}
int *garr, *gout;
hipMalloc(&garr, total_size);
hipMalloc(&gout, sizeof(int) * grid_size * block_size);
hipMemcpy(garr, arr, total_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( race), dim3(grid_size), dim3(block_size), 0, 0, garr, size, step, gout);
hipMemcpy(out, gout, sizeof(int) * grid_size * block_size, hipMemcpyDeviceToHost);
if (block_size * grid_size > 123) {
printf("out[123] = %d\n", out[123]);
}
return 0;
}
| 2bcb761f79068f8083ffe0bcc8e9bc2915c6c69b.cu | // random read but coalesced
#include<stdio.h>
#include<cstdlib>
__global__ void race(volatile int *arr, size_t size, int n, int *out) {
int gid = threadIdx.x + blockDim.x * blockIdx.x;
int ptr = gid;
if (ptr > size) ptr = 0;
for (int i = 0; i < n; i++) {
ptr = arr[ptr];
}
out[gid] = ptr;
}
int main(int argc, char *argv[]) {
int grid_size=10, block_size=128, n=100, step=100;
int warp_size = 32;
if (argc > 4) {
sscanf(argv[1], "%d", &grid_size);
sscanf(argv[2], "%d", &block_size);
sscanf(argv[3], "%d", &n);
sscanf(argv[4], "%d", &step);
}
// make sure block size is multiple of warp size
block_size -= block_size % warp_size;
n -= n % warp_size;
size_t size = n;
size_t total_size = size * sizeof(int);
printf("size = %zd KB\n", total_size / 1024);
int *arr = new int[size];
int *ra = new int[size];
int *out = new int[grid_size * block_size];
{
// create random permutation
for (int i = 0; i < n/warp_size; i++) {
ra[i] = i;
}
for (int i = 1; i < n/warp_size; i++) {
int r = rand()%(i+1);
int tmp = ra[i];
ra[i] = ra[r];
ra[r] = tmp;
}
// create "coalesced" random cycle
for (int i = 1; i < n/warp_size; i++) {
for (int j = 0; j < warp_size; j++) {
arr[ra[i-1]*warp_size + j] = ra[i]*warp_size + j;
}
}
for (int j = 0; j < warp_size; j++) {
arr[ra[n/warp_size-1]*warp_size + j] = ra[0]*warp_size + j;
}
}
int *garr, *gout;
cudaMalloc(&garr, total_size);
cudaMalloc(&gout, sizeof(int) * grid_size * block_size);
cudaMemcpy(garr, arr, total_size, cudaMemcpyHostToDevice);
race<<<grid_size, block_size>>>(garr, size, step, gout);
cudaMemcpy(out, gout, sizeof(int) * grid_size * block_size, cudaMemcpyDeviceToHost);
if (block_size * grid_size > 123) {
printf("out[123] = %d\n", out[123]);
}
return 0;
}
|
6a789665423c4222c22905e541e2c915ef2b19ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common_define_gpu.h"
#include "common_extern_c.h"
#include "common_data_define.h"
__device__ int device_borderInterpolate(int p, int len, int borderType)
{
int q = p;
if ((unsigned)q < (unsigned)len);
else if (BORDER_REPLICATE == borderType)
{
q = q < 0 ? 0 : len - 1;
}
else if (BORDER_REFLECT == borderType || BORDER_REFLECT_101 == borderType)
{
int delta = borderType == BORDER_REFLECT_101;
if (1 == len)
return 0;
do
{
if (q < 0)
q = -q - 1 + delta;
else
q = len - 1 - (q - len) - delta;
} while ((unsigned)q >= (unsigned)len);
}
else if (borderType == BORDER_WRAP)
{
if (q < 0)
q -= ((q - len + 1) / len)*len;
if (q >= len)
q %= len;
}
else if (borderType == BORDER_CONSTANT)
q = -1;
else
{
;
}
return q;
}
__global__ void device_copyMakeBorder_8u(const unsigned char * srcptr, size_t srcstep, int * srcdim,
unsigned char * dstptr, size_t dststep, int * dstdim,
int top, int left, int channel, int bordertype)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z;
int srcwidth(srcdim[1]), srcheight(srcdim[2]);
int dstwidth(dstdim[1]), dstheight(dstdim[2]);
if (x >= dstwidth || y >= dstheight)return;
int src_x = device_borderInterpolate(x - left, srcdim[1], bordertype);
int src_y = device_borderInterpolate(y - top, srcdim[2], bordertype);
int srcloc = src_y * srcstep + src_x * channel + z;
int dstloc = y * dststep + x * channel + z;
dstptr[dstloc] = srcptr[srcloc];
}
void CopyMakeborderGPU(unsigned char* devsrc, int srcheight, int srcwidth, int channel,
unsigned char* devdst, int dstheight, int dstwidth,
int top, int bottom, int left, int right, int bordertype)
{
bordertype &= ~BORDER_ISOLATED;
int srcDim[3] = { channel,srcwidth,srcheight };
int dstDim[3] = { channel,dstwidth,dstheight };
int srcstep = srcwidth * channel, dststep = dstwidth * channel;
int *srcDimPtr, *dstDimPtr;
int dimSize = sizeof(srcDim);
hipMalloc((&srcDimPtr), sizeof(srcDim));
hipMalloc(&dstDimPtr, dimSize);
hipMemcpy(srcDimPtr, srcDim, dimSize, hipMemcpyHostToDevice);
hipMemcpy(dstDimPtr, dstDim, dimSize, hipMemcpyHostToDevice);
#define DEVIDE 32
dim3 dimblock(DEVIDE, DEVIDE);
dim3 dimgrid((dstwidth + DEVIDE - 1) / DEVIDE, (dstheight + DEVIDE - 1) / DEVIDE, channel);//grid
//std::vector<uchar>value = { 0,0,0,0 };
if (bordertype != 0)
{
device_copyMakeBorder_8u << < dimgrid, dimblock >> > (devsrc, srcstep, srcDimPtr,
devdst, dststep, dstDimPtr, top, left, channel, bordertype);
}
else
{
//copyMakeConstBorder_8u(devsrc, srcstep, srcDimPtr, devdst, dststep, dstDimPtr, top, left, channel, value);
}
hipFree(srcDimPtr);
hipFree(dstDimPtr);
} | 6a789665423c4222c22905e541e2c915ef2b19ad.cu | #include "common_define_gpu.h"
#include "common_extern_c.h"
#include "common_data_define.h"
__device__ int device_borderInterpolate(int p, int len, int borderType)
{
int q = p;
if ((unsigned)q < (unsigned)len);
else if (BORDER_REPLICATE == borderType)
{
q = q < 0 ? 0 : len - 1;
}
else if (BORDER_REFLECT == borderType || BORDER_REFLECT_101 == borderType)
{
int delta = borderType == BORDER_REFLECT_101;
if (1 == len)
return 0;
do
{
if (q < 0)
q = -q - 1 + delta;
else
q = len - 1 - (q - len) - delta;
} while ((unsigned)q >= (unsigned)len);
}
else if (borderType == BORDER_WRAP)
{
if (q < 0)
q -= ((q - len + 1) / len)*len;
if (q >= len)
q %= len;
}
else if (borderType == BORDER_CONSTANT)
q = -1;
else
{
;
}
return q;
}
__global__ void device_copyMakeBorder_8u(const unsigned char * srcptr, size_t srcstep, int * srcdim,
unsigned char * dstptr, size_t dststep, int * dstdim,
int top, int left, int channel, int bordertype)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int z = blockIdx.z;
int srcwidth(srcdim[1]), srcheight(srcdim[2]);
int dstwidth(dstdim[1]), dstheight(dstdim[2]);
if (x >= dstwidth || y >= dstheight)return;
int src_x = device_borderInterpolate(x - left, srcdim[1], bordertype);
int src_y = device_borderInterpolate(y - top, srcdim[2], bordertype);
int srcloc = src_y * srcstep + src_x * channel + z;
int dstloc = y * dststep + x * channel + z;
dstptr[dstloc] = srcptr[srcloc];
}
void CopyMakeborderGPU(unsigned char* devsrc, int srcheight, int srcwidth, int channel,
unsigned char* devdst, int dstheight, int dstwidth,
int top, int bottom, int left, int right, int bordertype)
{
bordertype &= ~BORDER_ISOLATED;
int srcDim[3] = { channel,srcwidth,srcheight };
int dstDim[3] = { channel,dstwidth,dstheight };
int srcstep = srcwidth * channel, dststep = dstwidth * channel;
int *srcDimPtr, *dstDimPtr;
int dimSize = sizeof(srcDim);
cudaMalloc((&srcDimPtr), sizeof(srcDim));
cudaMalloc(&dstDimPtr, dimSize);
cudaMemcpy(srcDimPtr, srcDim, dimSize, cudaMemcpyHostToDevice);
cudaMemcpy(dstDimPtr, dstDim, dimSize, cudaMemcpyHostToDevice);
#define DEVIDE 32
dim3 dimblock(DEVIDE, DEVIDE);
dim3 dimgrid((dstwidth + DEVIDE - 1) / DEVIDE, (dstheight + DEVIDE - 1) / DEVIDE, channel);//gridÁ─╬ČÂ╚
//std::vector<uchar>value = { 0,0,0,0 };
if (bordertype != 0)
{
device_copyMakeBorder_8u << < dimgrid, dimblock >> > (devsrc, srcstep, srcDimPtr,
devdst, dststep, dstDimPtr, top, left, channel, bordertype);
}
else
{
//copyMakeConstBorder_8u(devsrc, srcstep, srcDimPtr, devdst, dststep, dstDimPtr, top, left, channel, value);
}
cudaFree(srcDimPtr);
cudaFree(dstDimPtr);
} |
fcd29ec2572f07062a7ff3683991ee8b958408b3.hip | // !!! This is a file automatically generated by hipify!!!
/**
* correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
/* Problem size */
#define M 2048 * 6
#define N 2048 * 6
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 256
#define DIM_THREAD_BLOCK_KERNEL_2_Y 1
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 32
#define DIM_THREAD_BLOCK_KERNEL_3_Y 8
/* Thread block dimensions for kernel 4*/
#define DIM_THREAD_BLOCK_KERNEL_4_X 256
#define DIM_THREAD_BLOCK_KERNEL_4_Y 1
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01f
#define EPS 0.005f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* data, DATA_TYPE* data_gpu)
{
int i, j;
for (i=0; i < (M+1); i++)
{
for (j=0; j< (N+1); j++)
{
data[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1);
data_gpu[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1);
}
}
}
void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat)
{
int i, j, j1, j2;
// Determine mean of column vectors of input data matrix
for (j = 1; j < (M+1); j++)
{
mean[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 1; j < (M+1); j++)
{
stddev[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt_of_array_cell(stddev, j);
stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j];
}
// Center and reduce the column vectors.
for (i = 1; i < (N+1); i++)
{
for (j = 1; j < (M+1); j++)
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ;
}
}
// Calculate the m * m correlation matrix.
for (j1 = 1; j1 < M; j1++)
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = j1+1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += (data[i*(M+1) + j1] * data[i*(M+1) + j2]);
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
symmat[M*(M+1) + M] = 1.0;
}
void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=1; i < (M+1); i++)
{
for (j=1; j < (N+1); j++)
{
if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], symmat_outputFromGpu[i*N + j]);
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
mean[j] = 0.0;
int i;
for(i=1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void std_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
std[j] = 0.0;
int i;
for(i = 1; i < (N+1); i++)
{
std[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
std[j] /= (FLOAT_N);
std[j] = sqrt(std[j]);
if(std[j] <= EPS)
{
std[j] = 1.0;
}
}
}
__global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1)))
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N) * std[j]);
}
}
__global__ void corr_kernel(DATA_TYPE *symmat, DATA_TYPE *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i, j2;
if ((j1 >= 1) && (j1 < M))
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = (j1 + 1); j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for(i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2];
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
}
void correlationCuda(DATA_TYPE* data_gpu, DATA_TYPE* mean_gpu, DATA_TYPE* stddev_gpu, DATA_TYPE* symmat_gpu)
{
double t_start, t_end;
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
t_start = rtclock();
hipLaunchKernelGGL(( mean_kernel), dim3(grid1), dim3(block1) , 0, 0, mean_gpu,data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( std_kernel), dim3(grid2), dim3(block2) , 0, 0, mean_gpu,stddev_gpu,data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce_kernel), dim3(grid3), dim3(block3) , 0, 0, mean_gpu,stddev_gpu,data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( corr_kernel), dim3(grid4), dim3(block4) , 0, 0, symmat_gpu,data_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
symmat_gpu[(M)*(M+1) + (M)] = 1.0;
// DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0;
// hipMemcpy(&(symmat_gpu[(M)*(M+1) + (M)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), hipMemcpyHostToDevice);
}
int main()
{
double t_start, t_end;
DATA_TYPE* data;
DATA_TYPE* mean;
DATA_TYPE* stddev;
DATA_TYPE* symmat;
DATA_TYPE *data_gpu;
DATA_TYPE *stddev_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
stddev = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
symmat = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
hipMallocManaged(&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
hipMallocManaged(&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
hipMallocManaged(&stddev_gpu, sizeof(DATA_TYPE) * (M+1));
hipMallocManaged(&mean_gpu, sizeof(DATA_TYPE) * (M+1));
init_arrays(data, data_gpu);
GPU_argv_init();
correlationCuda(data_gpu, mean_gpu, stddev_gpu, symmat_gpu);
t_start = rtclock();
correlation(data, mean, stddev, symmat);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(symmat, symmat_gpu);
free(data);
free(mean);
free(stddev);
free(symmat);
hipFree(data_gpu);
hipFree(symmat_gpu);
hipFree(stddev_gpu);
hipFree(mean_gpu);
return 0;
}
| fcd29ec2572f07062a7ff3683991ee8b958408b3.cu | /**
* correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
/* Problem size */
#define M 2048 * 6
#define N 2048 * 6
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 256
#define DIM_THREAD_BLOCK_KERNEL_2_Y 1
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 32
#define DIM_THREAD_BLOCK_KERNEL_3_Y 8
/* Thread block dimensions for kernel 4*/
#define DIM_THREAD_BLOCK_KERNEL_4_X 256
#define DIM_THREAD_BLOCK_KERNEL_4_Y 1
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01f
#define EPS 0.005f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* data, DATA_TYPE* data_gpu)
{
int i, j;
for (i=0; i < (M+1); i++)
{
for (j=0; j< (N+1); j++)
{
data[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1);
data_gpu[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1);
}
}
}
void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat)
{
int i, j, j1, j2;
// Determine mean of column vectors of input data matrix
for (j = 1; j < (M+1); j++)
{
mean[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 1; j < (M+1); j++)
{
stddev[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt_of_array_cell(stddev, j);
stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j];
}
// Center and reduce the column vectors.
for (i = 1; i < (N+1); i++)
{
for (j = 1; j < (M+1); j++)
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ;
}
}
// Calculate the m * m correlation matrix.
for (j1 = 1; j1 < M; j1++)
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = j1+1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += (data[i*(M+1) + j1] * data[i*(M+1) + j2]);
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
symmat[M*(M+1) + M] = 1.0;
}
void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=1; i < (M+1); i++)
{
for (j=1; j < (N+1); j++)
{
if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], symmat_outputFromGpu[i*N + j]);
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
mean[j] = 0.0;
int i;
for(i=1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void std_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
std[j] = 0.0;
int i;
for(i = 1; i < (N+1); i++)
{
std[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
std[j] /= (FLOAT_N);
std[j] = sqrt(std[j]);
if(std[j] <= EPS)
{
std[j] = 1.0;
}
}
}
__global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1)))
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N) * std[j]);
}
}
__global__ void corr_kernel(DATA_TYPE *symmat, DATA_TYPE *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i, j2;
if ((j1 >= 1) && (j1 < M))
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = (j1 + 1); j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for(i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2];
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
}
void correlationCuda(DATA_TYPE* data_gpu, DATA_TYPE* mean_gpu, DATA_TYPE* stddev_gpu, DATA_TYPE* symmat_gpu)
{
double t_start, t_end;
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
t_start = rtclock();
mean_kernel<<< grid1, block1 >>>(mean_gpu,data_gpu);
cudaDeviceSynchronize();
std_kernel<<< grid2, block2 >>>(mean_gpu,stddev_gpu,data_gpu);
cudaDeviceSynchronize();
reduce_kernel<<< grid3, block3 >>>(mean_gpu,stddev_gpu,data_gpu);
cudaDeviceSynchronize();
corr_kernel<<< grid4, block4 >>>(symmat_gpu,data_gpu);
cudaDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
symmat_gpu[(M)*(M+1) + (M)] = 1.0;
// DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0;
// cudaMemcpy(&(symmat_gpu[(M)*(M+1) + (M)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
}
int main()
{
double t_start, t_end;
DATA_TYPE* data;
DATA_TYPE* mean;
DATA_TYPE* stddev;
DATA_TYPE* symmat;
DATA_TYPE *data_gpu;
DATA_TYPE *stddev_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
stddev = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
symmat = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
cudaMallocManaged(&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
cudaMallocManaged(&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
cudaMallocManaged(&stddev_gpu, sizeof(DATA_TYPE) * (M+1));
cudaMallocManaged(&mean_gpu, sizeof(DATA_TYPE) * (M+1));
init_arrays(data, data_gpu);
GPU_argv_init();
correlationCuda(data_gpu, mean_gpu, stddev_gpu, symmat_gpu);
t_start = rtclock();
correlation(data, mean, stddev, symmat);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(symmat, symmat_gpu);
free(data);
free(mean);
free(stddev);
free(symmat);
cudaFree(data_gpu);
cudaFree(symmat_gpu);
cudaFree(stddev_gpu);
cudaFree(mean_gpu);
return 0;
}
|
534a8b1f2299b8618c9fb2562e6b22c79d68f45e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include "headers.h"
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
// GPU macro
#define THREADS_PER_BLOCK 1024
#define N_BLOCKS 8
typedef float floatT;
__device__ void __gpu_sync(int *lockIn, int *lockOut) {
if (threadIdx.x==0) {
lockIn[blockIdx.x] = N_BLOCKS;
}
if (blockIdx.x==0) {
if (threadIdx.x<N_BLOCKS) {
while (lockIn[threadIdx.x] != N_BLOCKS) {
}
}
__syncthreads();
if (threadIdx.x<N_BLOCKS) {
lockOut[threadIdx.x] = N_BLOCKS;
}
}
if (threadIdx.x==0) {
while (lockOut[blockIdx.x] != N_BLOCKS) {
}
}
__syncthreads();
}
__global__ void gpu_eh(const int size, const int x, const floatT t, const floatT sigma,
const int idx, const int idy, const int k_beg, int k_end,
floatT *e, floatT *hx, floatT *hy,
int *lockIn, int *lockOut) {
__shared__ char dummy[40000];
int i_base = threadIdx.x;
int chunk = (size-1)/gridDim.x+1;
int j_base = blockIdx.x*chunk;
for (int k=k_beg; k<=k_end; k+=1) {
for (int j = j_base; j<min(size-1,j_base+chunk); j+=1) {
for (int i = i_base; i<(size-1); i+=blockDim.x) {
if (i>0 && j>0) {
e[INDX(i,j,size)] += (hy[INDX(i,j,size-1)]-hy[INDX(i-1,j,size-1)])- (hx[INDX(i,j,size)]-hx[INDX(i,j-1,size)]);
if (i==idx && j==idy) { e[INDX(i,j,size)] -= FJ(k, x, t, sigma); }
}
}
}
__gpu_sync(lockIn,lockOut);
for (int j = j_base; j<min(size-1,j_base+chunk); j+=1) {
for (int i = i_base; i<(size-1); i+=blockDim.x) {
hy[INDX(i,j,size-1)] += 0.5*(e[INDX(i+1,j,size)]-e[INDX(i,j,size)]);
hx[INDX(i, j, size)] -= 0.5*(e[INDX(i, j+1, size)] - e[INDX(i, j, size)]);
}
}
__gpu_sync(lockIn,lockOut);
}
}
void host_fdtd(const int size, const int x, const floatT t, const floatT sigma,
const int idx, const int idy, const int k_beg, const int k_end,
floatT *e, floatT *hx, floatT *hy) {
for (int k = k_beg; k <= k_end; k++) {
for (int i = 1; i < (size-1); i++) {
for (int j = 1; j < (size-1); j++) {
e[INDX(i, j, size)] += (hy[INDX(i, j, (size-1))] - hy[INDX(i-1, j, (size-1))])
- (hx[INDX(i, j, size)] - hx[INDX(i, j-1, size)]);
}
}
e[INDX(idx, idy, size)] -= FJ(k, x, t, sigma);
for (int i = 0; i < (size-1); i++) {
for (int j = 0; j < size; j++) {
hy[INDX(i,j,(size-1))] += 0.5 * (e[INDX(i+1, j, size)] - e[INDX(i, j, size)]);
}
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < (size-1); j++) {
hx[INDX(i, j, size)] -= 0.5 * (e[INDX(i, j+1, size)] - e[INDX(i, j, size)]);
}
}
}
}
int main(int argc, char *argv[]) {
printf("fdtd_sync: GPU using implicit CPU sync\n" );
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
floatT L = 1598.0;
floatT hx = 1.0;
floatT ht = hx/sqrt(2.0)/3;
floatT sigma = 200*ht;
fprintf(stdout, "fj output is %f\n", FJ(500, hx, ht, sigma));
int size = (int) L/hx+1;
int idx = (int) (0.625*L/hx)+1;
int idy = (int) (0.5*L/hx)+1;
fprintf(stdout, "size if %d, source is at idx=%d and idy=%d.\n", size, idx, idy);
floatT *h_E, *h_Hx, *h_Hy;
size_t num_E = size * size;
size_t num_H = (size - 1)*size;
size_t numbytes_E = num_E*sizeof(floatT);
size_t numbytes_H = num_H*sizeof(floatT);
fprintf(stdout, "total memory allocated is %lu\n", numbytes_E+2*numbytes_H);
clock_t t_begin, t_end;
t_begin = clock();
h_E = (floatT *) calloc (num_E, sizeof(floatT));
h_Hx = (floatT *) calloc (num_H, sizeof(floatT));
h_Hy = (floatT *) calloc (num_H, sizeof(floatT));
h_E[INDX(idx, idy, size)] = - FJ(1, hx, ht, sigma);
// GPU memory allocation and initialization
floatT *d_E, *d_Hx, *d_Hy;
checkCUDA( hipMalloc( (void **) &d_E, numbytes_E ) );
checkCUDA( hipMalloc( (void **) &d_Hx, numbytes_H ) );
checkCUDA( hipMalloc( (void **) &d_Hy, numbytes_H ) );
checkCUDA( hipMemcpy(d_E, h_E, numbytes_E, hipMemcpyHostToDevice) );
checkCUDA( hipMemset(d_Hx, 0, numbytes_H) );
checkCUDA( hipMemset(d_Hy, 0, numbytes_H) );
t_end = clock();
fprintf(stdout, "Memory allocation time is %f s\n", (float)(t_end - t_begin) / CLOCKS_PER_SEC);
// lock allocation
int *d_lockIn, *d_lockOut;
checkCUDA( hipMalloc( (void **) &d_lockIn, N_BLOCKS*sizeof(int)) );
checkCUDA( hipMalloc( (void **) &d_lockOut, N_BLOCKS*sizeof(int)) );
checkCUDA( hipMemset(d_lockIn, 0, N_BLOCKS*sizeof(int)) );
checkCUDA( hipMemset(d_lockOut, 0, N_BLOCKS*sizeof(int)) );
int k_beg = 2;
int k_end = 3;//1500;
t_begin = clock();
host_fdtd(size, hx, ht, sigma, idx, idy, k_beg, k_end, h_E, h_Hx, h_Hy);
/*
FILE *fp;
fp = fopen("./cpu_E.f","rb");
fread(h_E,sizeof(floatT),num_E,fp);
fclose(fp);
fprintf(stdout, "finish reading E.\n");
fp = fopen("./cpu_Hx.f","rb");
fread(h_Hx,sizeof(floatT),num_H,fp);
fclose(fp);
fprintf(stdout, "finish reading Hx.\n");
fp = fopen("./cpu_Hy.f","rb");
fread(h_Hy,sizeof(floatT),num_H,fp);
fclose(fp);
fprintf(stdout, "finish reading Hy.\n");
*/
t_end = clock();
fprintf(stdout, "CPU calculation time for %d iteration is %f s\n", k_end, (float)(t_end - t_begin) / CLOCKS_PER_SEC);
// GPU execution
dim3 threads( min(THREADS_PER_BLOCK,size), 1, 1);
dim3 blocks( N_BLOCKS, 1, 1);
fprintf(stdout, "block size is %d by %d.\n", blocks.x, blocks.y);
/* GPU timer */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( gpu_eh), dim3(blocks), dim3(threads) , 0, 0, size, hx, ht, sigma, idx, idy, k_beg, k_end, d_E, d_Hx, d_Hy, d_lockIn, d_lockOut);
checkKERNEL();
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float gpuTime;
checkCUDA( hipEventElapsedTime( &gpuTime, start, stop ) );
printf("GPU naive calculation time %f ms\n", gpuTime );
floatT *out_E, *out_Hx, *out_Hy;
out_E = (floatT *) malloc (numbytes_E);
out_Hx = (floatT *) malloc (numbytes_H);
out_Hy = (floatT *) malloc (numbytes_H);
checkCUDA( hipMemcpy( out_E, d_E, numbytes_E, hipMemcpyDeviceToHost ) );
checkCUDA( hipMemcpy( out_Hx, d_Hx, numbytes_H, hipMemcpyDeviceToHost ) );
checkCUDA( hipMemcpy( out_Hy, d_Hy, numbytes_H, hipMemcpyDeviceToHost ) );
int success = 1;
floatT diff, thresh=1e-6;
for( int i = 0; i < size; i++ ) {
for ( int j = 0; j<size; j++ ) {
diff = abs(1.0-out_E[INDX(i,j,size)]/h_E[INDX(i,j,size)]);
if ( diff>thresh ) {
printf("error in E element %d, %d: CPU %e vs GPU %e\n",i,j,h_E[INDX(i,j,size)],out_E[INDX(i,j,size)] );
success = 0;
break;
}
}
}
for( int i = 0; i < size; i++ ) {
for ( int j = 0; j<size-1; j++ ) {
diff = abs(1.0-out_Hx[INDX(i,j,size)]/h_Hx[INDX(i,j,size)]);
if ( diff>thresh ) {
printf("error in Hx element %d, %d: CPU %e vs GPU %e\n",i,j,h_Hx[INDX(i,j,size)],out_Hx[INDX(i,j,size)] );
success = 0;
break;
}
}
}
for( int i = 0; i < size-1; i++ ) {
for ( int j = 0; j<size; j++ ) {
diff = abs(1.0-out_Hy[INDX(i,j,size-1)]/h_Hy[INDX(i,j,size-1)]);
if ( diff>thresh) {
printf("error in Hy element %d, %d: CPU %e vs GPU %e\n",i,j,h_Hy[INDX(i,j,size-1)],out_Hy[INDX(i,j,size-1)] );
success = 0;
break;
}
}
}
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
free(h_E);
free(h_Hx);
free(h_Hy);
free(out_E);
free(out_Hx);
free(out_Hy);
checkCUDA( hipFree( d_E ) );
checkCUDA( hipFree( d_Hx ) );
checkCUDA( hipFree( d_Hy ) );
checkCUDA( hipDeviceSynchronize() );
return 0;
}
| 534a8b1f2299b8618c9fb2562e6b22c79d68f45e.cu | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include "headers.h"
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
// GPU macro
#define THREADS_PER_BLOCK 1024
#define N_BLOCKS 8
typedef float floatT;
__device__ void __gpu_sync(int *lockIn, int *lockOut) {
if (threadIdx.x==0) {
lockIn[blockIdx.x] = N_BLOCKS;
}
if (blockIdx.x==0) {
if (threadIdx.x<N_BLOCKS) {
while (lockIn[threadIdx.x] != N_BLOCKS) {
}
}
__syncthreads();
if (threadIdx.x<N_BLOCKS) {
lockOut[threadIdx.x] = N_BLOCKS;
}
}
if (threadIdx.x==0) {
while (lockOut[blockIdx.x] != N_BLOCKS) {
}
}
__syncthreads();
}
__global__ void gpu_eh(const int size, const int x, const floatT t, const floatT sigma,
const int idx, const int idy, const int k_beg, int k_end,
floatT *e, floatT *hx, floatT *hy,
int *lockIn, int *lockOut) {
__shared__ char dummy[40000];
int i_base = threadIdx.x;
int chunk = (size-1)/gridDim.x+1;
int j_base = blockIdx.x*chunk;
for (int k=k_beg; k<=k_end; k+=1) {
for (int j = j_base; j<min(size-1,j_base+chunk); j+=1) {
for (int i = i_base; i<(size-1); i+=blockDim.x) {
if (i>0 && j>0) {
e[INDX(i,j,size)] += (hy[INDX(i,j,size-1)]-hy[INDX(i-1,j,size-1)])- (hx[INDX(i,j,size)]-hx[INDX(i,j-1,size)]);
if (i==idx && j==idy) { e[INDX(i,j,size)] -= FJ(k, x, t, sigma); }
}
}
}
__gpu_sync(lockIn,lockOut);
for (int j = j_base; j<min(size-1,j_base+chunk); j+=1) {
for (int i = i_base; i<(size-1); i+=blockDim.x) {
hy[INDX(i,j,size-1)] += 0.5*(e[INDX(i+1,j,size)]-e[INDX(i,j,size)]);
hx[INDX(i, j, size)] -= 0.5*(e[INDX(i, j+1, size)] - e[INDX(i, j, size)]);
}
}
__gpu_sync(lockIn,lockOut);
}
}
void host_fdtd(const int size, const int x, const floatT t, const floatT sigma,
const int idx, const int idy, const int k_beg, const int k_end,
floatT *e, floatT *hx, floatT *hy) {
for (int k = k_beg; k <= k_end; k++) {
for (int i = 1; i < (size-1); i++) {
for (int j = 1; j < (size-1); j++) {
e[INDX(i, j, size)] += (hy[INDX(i, j, (size-1))] - hy[INDX(i-1, j, (size-1))])
- (hx[INDX(i, j, size)] - hx[INDX(i, j-1, size)]);
}
}
e[INDX(idx, idy, size)] -= FJ(k, x, t, sigma);
for (int i = 0; i < (size-1); i++) {
for (int j = 0; j < size; j++) {
hy[INDX(i,j,(size-1))] += 0.5 * (e[INDX(i+1, j, size)] - e[INDX(i, j, size)]);
}
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < (size-1); j++) {
hx[INDX(i, j, size)] -= 0.5 * (e[INDX(i, j+1, size)] - e[INDX(i, j, size)]);
}
}
}
}
int main(int argc, char *argv[]) {
printf("fdtd_sync: GPU using implicit CPU sync\n" );
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
floatT L = 1598.0;
floatT hx = 1.0;
floatT ht = hx/sqrt(2.0)/3;
floatT sigma = 200*ht;
fprintf(stdout, "fj output is %f\n", FJ(500, hx, ht, sigma));
int size = (int) L/hx+1;
int idx = (int) (0.625*L/hx)+1;
int idy = (int) (0.5*L/hx)+1;
fprintf(stdout, "size if %d, source is at idx=%d and idy=%d.\n", size, idx, idy);
floatT *h_E, *h_Hx, *h_Hy;
size_t num_E = size * size;
size_t num_H = (size - 1)*size;
size_t numbytes_E = num_E*sizeof(floatT);
size_t numbytes_H = num_H*sizeof(floatT);
fprintf(stdout, "total memory allocated is %lu\n", numbytes_E+2*numbytes_H);
clock_t t_begin, t_end;
t_begin = clock();
h_E = (floatT *) calloc (num_E, sizeof(floatT));
h_Hx = (floatT *) calloc (num_H, sizeof(floatT));
h_Hy = (floatT *) calloc (num_H, sizeof(floatT));
h_E[INDX(idx, idy, size)] = - FJ(1, hx, ht, sigma);
// GPU memory allocation and initialization
floatT *d_E, *d_Hx, *d_Hy;
checkCUDA( cudaMalloc( (void **) &d_E, numbytes_E ) );
checkCUDA( cudaMalloc( (void **) &d_Hx, numbytes_H ) );
checkCUDA( cudaMalloc( (void **) &d_Hy, numbytes_H ) );
checkCUDA( cudaMemcpy(d_E, h_E, numbytes_E, cudaMemcpyHostToDevice) );
checkCUDA( cudaMemset(d_Hx, 0, numbytes_H) );
checkCUDA( cudaMemset(d_Hy, 0, numbytes_H) );
t_end = clock();
fprintf(stdout, "Memory allocation time is %f s\n", (float)(t_end - t_begin) / CLOCKS_PER_SEC);
// lock allocation
int *d_lockIn, *d_lockOut;
checkCUDA( cudaMalloc( (void **) &d_lockIn, N_BLOCKS*sizeof(int)) );
checkCUDA( cudaMalloc( (void **) &d_lockOut, N_BLOCKS*sizeof(int)) );
checkCUDA( cudaMemset(d_lockIn, 0, N_BLOCKS*sizeof(int)) );
checkCUDA( cudaMemset(d_lockOut, 0, N_BLOCKS*sizeof(int)) );
int k_beg = 2;
int k_end = 3;//1500;
t_begin = clock();
host_fdtd(size, hx, ht, sigma, idx, idy, k_beg, k_end, h_E, h_Hx, h_Hy);
/*
FILE *fp;
fp = fopen("./cpu_E.f","rb");
fread(h_E,sizeof(floatT),num_E,fp);
fclose(fp);
fprintf(stdout, "finish reading E.\n");
fp = fopen("./cpu_Hx.f","rb");
fread(h_Hx,sizeof(floatT),num_H,fp);
fclose(fp);
fprintf(stdout, "finish reading Hx.\n");
fp = fopen("./cpu_Hy.f","rb");
fread(h_Hy,sizeof(floatT),num_H,fp);
fclose(fp);
fprintf(stdout, "finish reading Hy.\n");
*/
t_end = clock();
fprintf(stdout, "CPU calculation time for %d iteration is %f s\n", k_end, (float)(t_end - t_begin) / CLOCKS_PER_SEC);
// GPU execution
dim3 threads( min(THREADS_PER_BLOCK,size), 1, 1);
dim3 blocks( N_BLOCKS, 1, 1);
fprintf(stdout, "block size is %d by %d.\n", blocks.x, blocks.y);
/* GPU timer */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
gpu_eh<<< blocks, threads >>>( size, hx, ht, sigma, idx, idy, k_beg, k_end, d_E, d_Hx, d_Hy, d_lockIn, d_lockOut);
checkKERNEL();
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float gpuTime;
checkCUDA( cudaEventElapsedTime( &gpuTime, start, stop ) );
printf("GPU naive calculation time %f ms\n", gpuTime );
floatT *out_E, *out_Hx, *out_Hy;
out_E = (floatT *) malloc (numbytes_E);
out_Hx = (floatT *) malloc (numbytes_H);
out_Hy = (floatT *) malloc (numbytes_H);
checkCUDA( cudaMemcpy( out_E, d_E, numbytes_E, cudaMemcpyDeviceToHost ) );
checkCUDA( cudaMemcpy( out_Hx, d_Hx, numbytes_H, cudaMemcpyDeviceToHost ) );
checkCUDA( cudaMemcpy( out_Hy, d_Hy, numbytes_H, cudaMemcpyDeviceToHost ) );
int success = 1;
floatT diff, thresh=1e-6;
for( int i = 0; i < size; i++ ) {
for ( int j = 0; j<size; j++ ) {
diff = abs(1.0-out_E[INDX(i,j,size)]/h_E[INDX(i,j,size)]);
if ( diff>thresh ) {
printf("error in E element %d, %d: CPU %e vs GPU %e\n",i,j,h_E[INDX(i,j,size)],out_E[INDX(i,j,size)] );
success = 0;
break;
}
}
}
for( int i = 0; i < size; i++ ) {
for ( int j = 0; j<size-1; j++ ) {
diff = abs(1.0-out_Hx[INDX(i,j,size)]/h_Hx[INDX(i,j,size)]);
if ( diff>thresh ) {
printf("error in Hx element %d, %d: CPU %e vs GPU %e\n",i,j,h_Hx[INDX(i,j,size)],out_Hx[INDX(i,j,size)] );
success = 0;
break;
}
}
}
for( int i = 0; i < size-1; i++ ) {
for ( int j = 0; j<size; j++ ) {
diff = abs(1.0-out_Hy[INDX(i,j,size-1)]/h_Hy[INDX(i,j,size-1)]);
if ( diff>thresh) {
printf("error in Hy element %d, %d: CPU %e vs GPU %e\n",i,j,h_Hy[INDX(i,j,size-1)],out_Hy[INDX(i,j,size-1)] );
success = 0;
break;
}
}
}
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
free(h_E);
free(h_Hx);
free(h_Hy);
free(out_E);
free(out_Hx);
free(out_Hy);
checkCUDA( cudaFree( d_E ) );
checkCUDA( cudaFree( d_Hx ) );
checkCUDA( cudaFree( d_Hy ) );
checkCUDA( cudaDeviceSynchronize() );
return 0;
}
|
836da666ca210425510a9345208ac47375968cf4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../include/cuda/cuda_lib.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
__global__ void matrix_mult (float* A, float* B, float* C, int a_row, int a_col, int b_row, int b_col) {
// share memory ABBLOCK_SIZE * BLOCK_SIZE
__shared__ float A_sub[BLOCK_SIZE * BLOCK_SIZE];
__shared__ float B_sub[BLOCK_SIZE * BLOCK_SIZE];
// blockthreadid
int block_id_row = blockIdx.x;
int block_id_col = blockIdx.y;
int thread_id_row = threadIdx.x;
int thread_id_col = threadIdx.y;
// ABC
int c_row_id = block_id_row * BLOCK_SIZE + thread_id_row;
int c_col_id = block_id_col * BLOCK_SIZE + thread_id_col;
int sbmtx_begin = 0;
float c = 0.0;
float compensation = 0.0;
for (sbmtx_begin = 0; sbmtx_begin < a_col; sbmtx_begin += BLOCK_SIZE) {// ABc_row_idc_col_id
// ABblockAB
A_sub[thread_id_row * BLOCK_SIZE + thread_id_col] = (c_row_id < a_row && sbmtx_begin + thread_id_col < a_col) ? A[c_row_id * a_col + sbmtx_begin + thread_id_col] : 0;
B_sub[thread_id_row * BLOCK_SIZE + thread_id_col] = (c_col_id < b_col && sbmtx_begin + thread_id_row < b_row) ? B[(sbmtx_begin + thread_id_row) * b_col + c_col_id] : 0;
// block
__syncthreads ();
// Ac_row_idBc_col_id
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i) {
// c += A_sub[thread_id_row * BLOCK_SIZE + i] * B_sub[i * BLOCK_SIZE + thread_id_col];
// Kahan's Summation Formula
float y = A_sub[thread_id_row * BLOCK_SIZE + i] * B_sub[i * BLOCK_SIZE + thread_id_col] - compensation;
float t = c + y;//
compensation = (t - c) - y;//
c = t;
}
__syncthreads ();
}
if (c_row_id < a_row && c_col_id < b_col) {
C[c_row_id * b_col + c_col_id] = c;
}
}
void cuda_matrix_mult (float* A, float* B, float* C, int a_row, int a_col, int b_row, int b_col) {// A*B=C
int size_a = a_row * a_col;
int size_b = b_row * b_col;
int size_c = a_row * b_col;
//
float* dev_A, *dev_B, *dev_C;
hipMalloc ((void**) &dev_A, sizeof (float) * size_a);
hipMalloc ((void**) &dev_B, sizeof (float) * size_b);
hipMalloc ((void**) &dev_C, sizeof (float) * size_c);
// copy
hipMemcpy (dev_A, A, sizeof (float) * size_a, hipMemcpyHostToDevice);
hipMemcpy (dev_B, B, sizeof (float) * size_b, hipMemcpyHostToDevice);
// Cgrid_row * grid_colBLOCK_SIZE * BLOCK_SIZEblockCGrid
int grid_row = a_row / BLOCK_SIZE + (a_row % BLOCK_SIZE == 0 ? 0 : 1);
int grid_col = b_col / BLOCK_SIZE + (b_col % BLOCK_SIZE == 0 ? 0 : 1);
dim3 grid (grid_row, grid_col);
dim3 block (BLOCK_SIZE, BLOCK_SIZE);
// kernal
hipLaunchKernelGGL(( matrix_mult) , dim3(grid), dim3(block), 0, 0, dev_A, dev_B, dev_C, a_row, a_col, b_row, b_col);
// copy
hipMemcpy (C, dev_C, sizeof (float) * size_c, hipMemcpyDeviceToHost);
//
hipFree (dev_A);
hipFree (dev_B);
hipFree (dev_C);
}
__global__ void tensor_add (float* A, float* B, float* C, int size) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
for (int i = begin_idx; i < size; i += read_offset) {//
C[i] = A[i] + B[i];
}
}
void cuda_tensor_add (float* A, float* B, float* C, int size) {
float* dev_A, *dev_B, *dev_C;
hipMalloc ((void**) &dev_A, sizeof (float) * size);
hipMalloc ((void**) &dev_B, sizeof (float) * size);
hipMalloc ((void**) &dev_C, sizeof (float) * size);
hipMemcpy (dev_A, A, sizeof (float) * size, hipMemcpyHostToDevice);
hipMemcpy (dev_B, B, sizeof (float) * size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( tensor_add) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_A, dev_B, dev_C, size);
hipMemcpy (C, dev_C, sizeof (float) * size, hipMemcpyDeviceToHost);
//
hipFree (dev_A);
hipFree (dev_B);
hipFree (dev_C);
}
__global__ void scalar_tensor_mult (float* A, float* result, float s, int size) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
for (int i = begin_idx; i < size; i += read_offset) {//
result[i] = A[i] * s;
}
}
void cuda_scalar_tensor_mult (float* A, float* result, float s, int size) {
float* dev_A, *dev_result;
hipMalloc ((void**) &dev_A, sizeof (float) * size);
hipMalloc ((void**) &dev_result, sizeof (float) * size);
hipMemcpy (dev_A, A, sizeof (float) * size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( scalar_tensor_mult) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_A, dev_result, s, size);
hipMemcpy (result, dev_result, sizeof (float) * size, hipMemcpyDeviceToHost);
//
hipFree (dev_A);
hipFree (dev_result);
}
__global__ void element_abs_sum (float* A, int size, float* results) {
__shared__ float sub_results[BLOCK_SIZE];
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
if (begin_idx >= size) {
sub_results[thread_id] = 0;
} else {
float r = 0;
for (int i = begin_idx; i < size; i += read_offset) {
r += fabs(A[i]);
}
sub_results[thread_id] = r;
}
// blockglobalresults
__syncthreads ();
int merge_offset = 1;
int mask = 2;
while (merge_offset <= BLOCK_SIZE) {
if (thread_id % mask == 0 && thread_id + merge_offset < BLOCK_SIZE) {
sub_results[thread_id] += sub_results[thread_id + merge_offset];
}
merge_offset = merge_offset * 2;
mask = mask * 2;
__syncthreads ();
}
if (thread_id == 0) {
results[block_id] = sub_results[0];
}
}
float cuda_element_abs_sum (float* A, int size) {
float* results = (float*) malloc (sizeof (float) * GRID_SIZE);
float* dev_A;
float* dev_results;
hipMalloc ((void**) &dev_A, sizeof (float) * size);
hipMalloc ((void**) &dev_results, sizeof (float) * GRID_SIZE);
hipMemcpy (dev_A, A, sizeof (float) * size, hipMemcpyHostToDevice);
// kernal
hipLaunchKernelGGL(( element_abs_sum) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_A, size, dev_results);
hipMemcpy (results, dev_results, sizeof (float) * GRID_SIZE, hipMemcpyDeviceToHost);
hipFree (dev_results);
hipFree (dev_A);
float abs_sum = 0;
// cpu
for (int i = 0; i < GRID_SIZE; ++i) {
abs_sum += results[i];
}
free (results);
return abs_sum;
}
__global__ void element_square_sum (float* A, int size, float* results) {
__shared__ float sub_results[BLOCK_SIZE];
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
if (begin_idx >= size) {
sub_results[thread_id] = 0;
} else {
float r = 0;
for (int i = begin_idx; i < size; i += read_offset) {
r += A[i] * A[i];
}
sub_results[thread_id] = r;
}
// blockglobalresults
__syncthreads ();
int merge_offset = 1;
int mask = 2;
while (merge_offset <= BLOCK_SIZE) {
if (thread_id % mask == 0 && thread_id + merge_offset < BLOCK_SIZE) {
sub_results[thread_id] += sub_results[thread_id + merge_offset];
}
merge_offset = merge_offset * 2;
mask = mask * 2;
__syncthreads ();
}
if (thread_id == 0) {
results[block_id] = sub_results[0];
}
}
float cuda_element_square_sum (float* A, int size) {
float* results = (float*) malloc (sizeof (float) * GRID_SIZE);
float* dev_A;
float* dev_results;
hipMalloc ((void**) &dev_A, sizeof (float) * size);
hipMalloc ((void**) &dev_results, sizeof (float) * GRID_SIZE);
hipMemcpy (dev_A, A, sizeof (float) * size, hipMemcpyHostToDevice);
// kernal
hipLaunchKernelGGL(( element_square_sum) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_A, size, dev_results);
hipMemcpy (results, dev_results, sizeof (float) * GRID_SIZE, hipMemcpyDeviceToHost);
hipFree (dev_results);
hipFree (dev_A);
float square_sum = 0;
// cpu
for (int i = 0; i < GRID_SIZE; ++i) {
square_sum += results[i];
}
free (results);
return square_sum;
}
__global__ void element_square (float* A, int size) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
for (int i = begin_idx; i < size; i += read_offset) {
A[i] = A[i] * A[i];
}
}
void cuda_element_square (float* A, int size) {
float* dev_A;
hipMalloc ((void**) &dev_A, sizeof (float) * size);
hipMemcpy (dev_A, A, sizeof (float) * size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( element_square) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_A, size);
hipMemcpy (A, dev_A, sizeof (float) * size, hipMemcpyDeviceToHost);
//
hipFree (dev_A);
}
__global__ void element_mult (float* A, float* B, float* C, int size) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
for (int i = begin_idx; i < size; i += read_offset) {
C[i] = A[i] * B[i];
}
}
void cuda_element_mult (float* A, float* B, float* C, int size) {
float* dev_A, *dev_B, *dev_C;
hipMalloc ((void**) &dev_A, sizeof (float) * size);
hipMalloc ((void**) &dev_B, sizeof (float) * size);
hipMalloc ((void**) &dev_C, sizeof (float) * size);
hipMemcpy (dev_A, A, sizeof (float) * size, hipMemcpyHostToDevice);
hipMemcpy (dev_B, B, sizeof (float) * size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( element_mult) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, dev_A, dev_B, dev_C, size);
hipMemcpy (C, dev_C, sizeof (float) * size, hipMemcpyDeviceToHost);
//
hipFree (dev_A);
hipFree (dev_B);
hipFree (dev_C);
}
| 836da666ca210425510a9345208ac47375968cf4.cu | #include "../../include/cuda/cuda_lib.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
__global__ void matrix_mult (float* A, float* B, float* C, int a_row, int a_col, int b_row, int b_col) {
// share memory 缓存A和B中对应的一对子矩阵,大小为BLOCK_SIZE * BLOCK_SIZE
__shared__ float A_sub[BLOCK_SIZE * BLOCK_SIZE];
__shared__ float B_sub[BLOCK_SIZE * BLOCK_SIZE];
// 获取当前线程所在的block和thread的id
int block_id_row = blockIdx.x;
int block_id_col = blockIdx.y;
int thread_id_row = threadIdx.x;
int thread_id_col = threadIdx.y;
// 计算当前线程对应A矩阵的行号和B矩阵的列号,也就是C矩阵的行号和列号
int c_row_id = block_id_row * BLOCK_SIZE + thread_id_row;
int c_col_id = block_id_col * BLOCK_SIZE + thread_id_col;
int sbmtx_begin = 0;
float c = 0.0;
float compensation = 0.0;
for (sbmtx_begin = 0; sbmtx_begin < a_col; sbmtx_begin += BLOCK_SIZE) {// 遍历每一对A,B矩阵c_row_id,c_col_id所在行列的子区间
// 当前线程加载A,B矩阵中对应子矩阵的指定元素,保证当前block中的线程同时加载完一对A,B子矩阵
A_sub[thread_id_row * BLOCK_SIZE + thread_id_col] = (c_row_id < a_row && sbmtx_begin + thread_id_col < a_col) ? A[c_row_id * a_col + sbmtx_begin + thread_id_col] : 0;
B_sub[thread_id_row * BLOCK_SIZE + thread_id_col] = (c_col_id < b_col && sbmtx_begin + thread_id_row < b_row) ? B[(sbmtx_begin + thread_id_row) * b_col + c_col_id] : 0;
// 等待同一个block中的线程加载完毕
__syncthreads ();
// 计算A矩阵c_row_id行和B矩阵c_col_id列一个区间的内积,并将每个区间结果累计
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i) {
// c += A_sub[thread_id_row * BLOCK_SIZE + i] * B_sub[i * BLOCK_SIZE + thread_id_col];
// Kahan's Summation Formula
float y = A_sub[thread_id_row * BLOCK_SIZE + i] * B_sub[i * BLOCK_SIZE + thread_id_col] - compensation;
float t = c + y;// 发生舍入
compensation = (t - c) - y;// 记录下舍入误差
c = t;
}
__syncthreads ();
}
if (c_row_id < a_row && c_col_id < b_col) {
C[c_row_id * b_col + c_col_id] = c;
}
}
void cuda_matrix_mult (float* A, float* B, float* C, int a_row, int a_col, int b_row, int b_col) {// A*B=C
int size_a = a_row * a_col;
int size_b = b_row * b_col;
int size_c = a_row * b_col;
// 在显存上分配空间
float* dev_A, *dev_B, *dev_C;
cudaMalloc ((void**) &dev_A, sizeof (float) * size_a);
cudaMalloc ((void**) &dev_B, sizeof (float) * size_b);
cudaMalloc ((void**) &dev_C, sizeof (float) * size_c);
// copy数据到显存
cudaMemcpy (dev_A, A, sizeof (float) * size_a, cudaMemcpyHostToDevice);
cudaMemcpy (dev_B, B, sizeof (float) * size_b, cudaMemcpyHostToDevice);
// 把结果C矩阵分割成grid_row * grid_col个BLOCK_SIZE * BLOCK_SIZE尺寸的block,可以认为C矩阵对应一个Grid
int grid_row = a_row / BLOCK_SIZE + (a_row % BLOCK_SIZE == 0 ? 0 : 1);
int grid_col = b_col / BLOCK_SIZE + (b_col % BLOCK_SIZE == 0 ? 0 : 1);
dim3 grid (grid_row, grid_col);
dim3 block (BLOCK_SIZE, BLOCK_SIZE);
// 运行kernal函数
matrix_mult <<<grid, block>>> (dev_A, dev_B, dev_C, a_row, a_col, b_row, b_col);
// 把显存数据copy回内存
cudaMemcpy (C, dev_C, sizeof (float) * size_c, cudaMemcpyDeviceToHost);
// 释放显存
cudaFree (dev_A);
cudaFree (dev_B);
cudaFree (dev_C);
}
__global__ void tensor_add (float* A, float* B, float* C, int size) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
for (int i = begin_idx; i < size; i += read_offset) {// 这种方式尽可能保证显存数据的连续读取
C[i] = A[i] + B[i];
}
}
void cuda_tensor_add (float* A, float* B, float* C, int size) {
float* dev_A, *dev_B, *dev_C;
cudaMalloc ((void**) &dev_A, sizeof (float) * size);
cudaMalloc ((void**) &dev_B, sizeof (float) * size);
cudaMalloc ((void**) &dev_C, sizeof (float) * size);
cudaMemcpy (dev_A, A, sizeof (float) * size, cudaMemcpyHostToDevice);
cudaMemcpy (dev_B, B, sizeof (float) * size, cudaMemcpyHostToDevice);
tensor_add <<<GRID_SIZE, BLOCK_SIZE>>> (dev_A, dev_B, dev_C, size);
cudaMemcpy (C, dev_C, sizeof (float) * size, cudaMemcpyDeviceToHost);
// 释放显存
cudaFree (dev_A);
cudaFree (dev_B);
cudaFree (dev_C);
}
__global__ void scalar_tensor_mult (float* A, float* result, float s, int size) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
for (int i = begin_idx; i < size; i += read_offset) {// 这种方式尽可能保证显存数据的连续读取
result[i] = A[i] * s;
}
}
void cuda_scalar_tensor_mult (float* A, float* result, float s, int size) {
float* dev_A, *dev_result;
cudaMalloc ((void**) &dev_A, sizeof (float) * size);
cudaMalloc ((void**) &dev_result, sizeof (float) * size);
cudaMemcpy (dev_A, A, sizeof (float) * size, cudaMemcpyHostToDevice);
scalar_tensor_mult <<<GRID_SIZE, BLOCK_SIZE>>> (dev_A, dev_result, s, size);
cudaMemcpy (result, dev_result, sizeof (float) * size, cudaMemcpyDeviceToHost);
// 释放显存
cudaFree (dev_A);
cudaFree (dev_result);
}
__global__ void element_abs_sum (float* A, int size, float* results) {
__shared__ float sub_results[BLOCK_SIZE];
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
if (begin_idx >= size) {
sub_results[thread_id] = 0;
} else {
float r = 0;
for (int i = begin_idx; i < size; i += read_offset) {
r += fabs(A[i]);
}
sub_results[thread_id] = r;
}
// 将同一个block中得到的结果汇总到global存储中的results中
__syncthreads ();
int merge_offset = 1;
int mask = 2;
while (merge_offset <= BLOCK_SIZE) {
if (thread_id % mask == 0 && thread_id + merge_offset < BLOCK_SIZE) {
sub_results[thread_id] += sub_results[thread_id + merge_offset];
}
merge_offset = merge_offset * 2;
mask = mask * 2;
__syncthreads ();
}
if (thread_id == 0) {
results[block_id] = sub_results[0];
}
}
float cuda_element_abs_sum (float* A, int size) {
float* results = (float*) malloc (sizeof (float) * GRID_SIZE);
float* dev_A;
float* dev_results;
cudaMalloc ((void**) &dev_A, sizeof (float) * size);
cudaMalloc ((void**) &dev_results, sizeof (float) * GRID_SIZE);
cudaMemcpy (dev_A, A, sizeof (float) * size, cudaMemcpyHostToDevice);
// 运行kernal函数
element_abs_sum <<<GRID_SIZE, BLOCK_SIZE>>> (dev_A, size, dev_results);
cudaMemcpy (results, dev_results, sizeof (float) * GRID_SIZE, cudaMemcpyDeviceToHost);
cudaFree (dev_results);
cudaFree (dev_A);
float abs_sum = 0;
// 在cpu端将显卡传回的数据汇总
for (int i = 0; i < GRID_SIZE; ++i) {
abs_sum += results[i];
}
free (results);
return abs_sum;
}
__global__ void element_square_sum (float* A, int size, float* results) {
__shared__ float sub_results[BLOCK_SIZE];
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
if (begin_idx >= size) {
sub_results[thread_id] = 0;
} else {
float r = 0;
for (int i = begin_idx; i < size; i += read_offset) {
r += A[i] * A[i];
}
sub_results[thread_id] = r;
}
// 将同一个block中得到的结果汇总到global存储中的results中
__syncthreads ();
int merge_offset = 1;
int mask = 2;
while (merge_offset <= BLOCK_SIZE) {
if (thread_id % mask == 0 && thread_id + merge_offset < BLOCK_SIZE) {
sub_results[thread_id] += sub_results[thread_id + merge_offset];
}
merge_offset = merge_offset * 2;
mask = mask * 2;
__syncthreads ();
}
if (thread_id == 0) {
results[block_id] = sub_results[0];
}
}
float cuda_element_square_sum (float* A, int size) {
float* results = (float*) malloc (sizeof (float) * GRID_SIZE);
float* dev_A;
float* dev_results;
cudaMalloc ((void**) &dev_A, sizeof (float) * size);
cudaMalloc ((void**) &dev_results, sizeof (float) * GRID_SIZE);
cudaMemcpy (dev_A, A, sizeof (float) * size, cudaMemcpyHostToDevice);
// 运行kernal函数
element_square_sum <<<GRID_SIZE, BLOCK_SIZE>>> (dev_A, size, dev_results);
cudaMemcpy (results, dev_results, sizeof (float) * GRID_SIZE, cudaMemcpyDeviceToHost);
cudaFree (dev_results);
cudaFree (dev_A);
float square_sum = 0;
// 在cpu端将显卡传回的数据汇总
for (int i = 0; i < GRID_SIZE; ++i) {
square_sum += results[i];
}
free (results);
return square_sum;
}
__global__ void element_square (float* A, int size) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
for (int i = begin_idx; i < size; i += read_offset) {
A[i] = A[i] * A[i];
}
}
void cuda_element_square (float* A, int size) {
float* dev_A;
cudaMalloc ((void**) &dev_A, sizeof (float) * size);
cudaMemcpy (dev_A, A, sizeof (float) * size, cudaMemcpyHostToDevice);
element_square <<<GRID_SIZE, BLOCK_SIZE>>> (dev_A, size);
cudaMemcpy (A, dev_A, sizeof (float) * size, cudaMemcpyDeviceToHost);
// 释放显存
cudaFree (dev_A);
}
__global__ void element_mult (float* A, float* B, float* C, int size) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
int begin_idx = block_id * BLOCK_SIZE + thread_id;
int read_offset = GRID_SIZE * BLOCK_SIZE;
for (int i = begin_idx; i < size; i += read_offset) {
C[i] = A[i] * B[i];
}
}
void cuda_element_mult (float* A, float* B, float* C, int size) {
float* dev_A, *dev_B, *dev_C;
cudaMalloc ((void**) &dev_A, sizeof (float) * size);
cudaMalloc ((void**) &dev_B, sizeof (float) * size);
cudaMalloc ((void**) &dev_C, sizeof (float) * size);
cudaMemcpy (dev_A, A, sizeof (float) * size, cudaMemcpyHostToDevice);
cudaMemcpy (dev_B, B, sizeof (float) * size, cudaMemcpyHostToDevice);
element_mult <<<GRID_SIZE, BLOCK_SIZE>>> (dev_A, dev_B, dev_C, size);
cudaMemcpy (C, dev_C, sizeof (float) * size, cudaMemcpyDeviceToHost);
// 释放显存
cudaFree (dev_A);
cudaFree (dev_B);
cudaFree (dev_C);
}
|
f05d7e4423aae6a27e0b3f0475e6c4cd23481ad8.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/cudaarithm.hpp>
#include "stereo.hpp"
#include "util_opencv.hpp"
#include "costs/census.hpp"
#include "dsi.hpp"
#include "wta.hpp"
#include "cost_aggregation.hpp"
#include "aggregations/standard_sgm.hpp"
#include "median_filter.hpp"
#include "dsi_tools.hpp"
#ifdef __GNUG__
#include <chrono>
#include <iostream>
static std::chrono::time_point<std::chrono::system_clock> start;
static void timer_set() {
start = std::chrono::high_resolution_clock::now();
}
static void timer_print(const std::string &msg, const bool reset=true) {
auto stop = std::chrono::high_resolution_clock::now();
char buf[24];
snprintf(buf, sizeof(buf), "%5i ms ",
(int) std::chrono::duration_cast<std::chrono::milliseconds>(stop-start).count());
std::cout << buf << msg << "\n" << std::flush;
if (reset) { timer_set(); }
}
#else
static void timer_set() {}
static void timer_print(const std::string &msg, const bool reset=true) {}
#endif
using cv::Mat;
using cv::Size;
using ftl::stereo::aggregations::StandardSGM;
typedef WeightedCensusMatchingCost MatchingCost;
struct StereoWCensusSgm::Impl {
MatchingCost cost;
Array2D<MatchingCost::Type> cost_min_paths;
Array2D<MatchingCost::Type> uncertainty;
Array2D<uchar> l;
Array2D<uchar> r;
PathAggregator<StandardSGM<MatchingCost::DataType>> aggr;
WinnerTakesAll<DisparitySpaceImage<MatchingCost::Type>,float> wta;
Impl(int width, int height, int min_disp, int max_disp) :
cost(width, height, min_disp, max_disp),
cost_min_paths(width, height),
uncertainty(width, height),
l(width, height), r(width, height)
{}
};
StereoWCensusSgm::StereoWCensusSgm() : impl_(nullptr) {
impl_ = new Impl(0, 0, 0, 0);
}
void StereoWCensusSgm::compute(cv::InputArray l, cv::InputArray r, cv::OutputArray disparity) {
hipSetDevice(0);
if (l.rows() != impl_->cost.height() || r.cols() != impl_->cost.width()) {
delete impl_; impl_ = nullptr;
impl_ = new Impl(l.cols(), l.rows(), params.d_min, params.d_max);
}
mat2gray(l, impl_->l);
mat2gray(r, impl_->r);
timer_set();
// CT
impl_->cost.set(impl_->l, impl_->r);
cudaSafeCall(hipDeviceSynchronize());
if (params.debug) { timer_print("census transform"); }
// cost aggregation
StandardSGM<MatchingCost::DataType> func = {impl_->cost.data(), impl_->cost_min_paths.data(), params.P1, params.P2};
auto &out = impl_->aggr(func, params.paths);
cudaSafeCall(hipDeviceSynchronize());
if (params.debug) { timer_print("Aggregation"); }
impl_->wta(out, params.subpixel, params.lr_consistency);
cudaSafeCall(hipDeviceSynchronize());
if (params.debug) { timer_print("WTA"); }
// Drory, A., Haubold, C., Avidan, S., & Hamprecht, F. A. (2014).
// Semi-global matching: A principled derivation in terms of
// message passing. Lecture Notes in Computer Science (Including Subseries
// Lecture Notes in Artificial Intelligence and Lecture Notes in
// Bioinformatics). https://doi.org/10.1007/978-3-319-11752-2_4
#if USE_GPU
auto uncertainty = impl_->uncertainty.toGpuMat();
cv::cuda::subtract(impl_->wta.min_cost.toGpuMat(), impl_->cost_min_paths.toGpuMat(), uncertainty);
cv::cuda::compare(uncertainty, params.uniqueness, uncertainty, cv::CMP_GT);
impl_->wta.disparity.toGpuMat().setTo(0, uncertainty);
#else
auto uncertainty = impl_->uncertainty.toMat();
cv::subtract(impl_->wta.min_cost.toMat(), impl_->cost_min_paths.toMat(), uncertainty);
cv::compare(uncertainty, params.uniqueness, uncertainty, cv::CMP_GT);
impl_->wta.disparity.toMat().setTo(0, uncertainty);
#endif
median_filter(impl_->wta.disparity, disparity);
if (params.debug) { timer_print("median filter"); }
Array2D<MatchingCost::Type> dsitmp_dev(l.cols(), l.rows());
dsi_slice(out, impl_->wta.disparity, dsitmp_dev);
show_dsi_slice(dsitmp_dev.toGpuMat());
}
StereoWCensusSgm::~StereoWCensusSgm() {
if (impl_) {
delete impl_;
impl_ = nullptr;
}
}
| f05d7e4423aae6a27e0b3f0475e6c4cd23481ad8.cu | #include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/cudaarithm.hpp>
#include "stereo.hpp"
#include "util_opencv.hpp"
#include "costs/census.hpp"
#include "dsi.hpp"
#include "wta.hpp"
#include "cost_aggregation.hpp"
#include "aggregations/standard_sgm.hpp"
#include "median_filter.hpp"
#include "dsi_tools.hpp"
#ifdef __GNUG__
#include <chrono>
#include <iostream>
static std::chrono::time_point<std::chrono::system_clock> start;
static void timer_set() {
start = std::chrono::high_resolution_clock::now();
}
static void timer_print(const std::string &msg, const bool reset=true) {
auto stop = std::chrono::high_resolution_clock::now();
char buf[24];
snprintf(buf, sizeof(buf), "%5i ms ",
(int) std::chrono::duration_cast<std::chrono::milliseconds>(stop-start).count());
std::cout << buf << msg << "\n" << std::flush;
if (reset) { timer_set(); }
}
#else
static void timer_set() {}
static void timer_print(const std::string &msg, const bool reset=true) {}
#endif
using cv::Mat;
using cv::Size;
using ftl::stereo::aggregations::StandardSGM;
typedef WeightedCensusMatchingCost MatchingCost;
struct StereoWCensusSgm::Impl {
MatchingCost cost;
Array2D<MatchingCost::Type> cost_min_paths;
Array2D<MatchingCost::Type> uncertainty;
Array2D<uchar> l;
Array2D<uchar> r;
PathAggregator<StandardSGM<MatchingCost::DataType>> aggr;
WinnerTakesAll<DisparitySpaceImage<MatchingCost::Type>,float> wta;
Impl(int width, int height, int min_disp, int max_disp) :
cost(width, height, min_disp, max_disp),
cost_min_paths(width, height),
uncertainty(width, height),
l(width, height), r(width, height)
{}
};
StereoWCensusSgm::StereoWCensusSgm() : impl_(nullptr) {
impl_ = new Impl(0, 0, 0, 0);
}
void StereoWCensusSgm::compute(cv::InputArray l, cv::InputArray r, cv::OutputArray disparity) {
cudaSetDevice(0);
if (l.rows() != impl_->cost.height() || r.cols() != impl_->cost.width()) {
delete impl_; impl_ = nullptr;
impl_ = new Impl(l.cols(), l.rows(), params.d_min, params.d_max);
}
mat2gray(l, impl_->l);
mat2gray(r, impl_->r);
timer_set();
// CT
impl_->cost.set(impl_->l, impl_->r);
cudaSafeCall(cudaDeviceSynchronize());
if (params.debug) { timer_print("census transform"); }
// cost aggregation
StandardSGM<MatchingCost::DataType> func = {impl_->cost.data(), impl_->cost_min_paths.data(), params.P1, params.P2};
auto &out = impl_->aggr(func, params.paths);
cudaSafeCall(cudaDeviceSynchronize());
if (params.debug) { timer_print("Aggregation"); }
impl_->wta(out, params.subpixel, params.lr_consistency);
cudaSafeCall(cudaDeviceSynchronize());
if (params.debug) { timer_print("WTA"); }
// Drory, A., Haubold, C., Avidan, S., & Hamprecht, F. A. (2014).
// Semi-global matching: A principled derivation in terms of
// message passing. Lecture Notes in Computer Science (Including Subseries
// Lecture Notes in Artificial Intelligence and Lecture Notes in
// Bioinformatics). https://doi.org/10.1007/978-3-319-11752-2_4
#if USE_GPU
auto uncertainty = impl_->uncertainty.toGpuMat();
cv::cuda::subtract(impl_->wta.min_cost.toGpuMat(), impl_->cost_min_paths.toGpuMat(), uncertainty);
cv::cuda::compare(uncertainty, params.uniqueness, uncertainty, cv::CMP_GT);
impl_->wta.disparity.toGpuMat().setTo(0, uncertainty);
#else
auto uncertainty = impl_->uncertainty.toMat();
cv::subtract(impl_->wta.min_cost.toMat(), impl_->cost_min_paths.toMat(), uncertainty);
cv::compare(uncertainty, params.uniqueness, uncertainty, cv::CMP_GT);
impl_->wta.disparity.toMat().setTo(0, uncertainty);
#endif
median_filter(impl_->wta.disparity, disparity);
if (params.debug) { timer_print("median filter"); }
Array2D<MatchingCost::Type> dsitmp_dev(l.cols(), l.rows());
dsi_slice(out, impl_->wta.disparity, dsitmp_dev);
show_dsi_slice(dsitmp_dev.toGpuMat());
}
StereoWCensusSgm::~StereoWCensusSgm() {
if (impl_) {
delete impl_;
impl_ = nullptr;
}
}
|
0f4118c8d0be8115f4b26c89f115ba37c3045044.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 11060
#include "paddle/phi/kernels/funcs/fused_gemm_epilogue.h"
#endif
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/phi/kernels/reduce_sum_kernel.h"
namespace phi {
namespace fusion {
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 11060
template <typename T, typename MT, typename Context>
void FusedLinearParamGradAddImpl(const Context &ctx,
const DenseTensor &x,
const DenseTensor &dout,
const paddle::optional<DenseTensor> &dbias,
int64_t M,
int64_t K,
int64_t N,
bool use_addto,
DenseTensor *dweight_out,
DenseTensor *dbias_out) {
constexpr bool kIsMultiPrecision = !std::is_same<T, MT>::value;
const bool fuse_bias_grad = false; // kIsMultiPrecision && dweight_out;
if (dweight_out) {
phi::funcs::ComputeFusedGemmEpilogueBackward<T, T, MT>(
ctx,
&dout,
&x,
nullptr,
nullptr,
M,
N,
K,
false,
false,
"none",
nullptr,
dweight_out,
fuse_bias_grad ? dbias_out : nullptr,
false,
use_addto);
}
if (dbias_out == nullptr) return;
if (!fuse_bias_grad) {
auto dout_copy = dout;
dout_copy.Resize({M, N});
if (kIsMultiPrecision) {
*dbias_out = phi::Sum<T, Context>(
ctx, dout_copy, {0}, phi::CppTypeToDataType<MT>::Type(), false);
} else {
*dbias_out = phi::Sum<T, Context>(
ctx, dout_copy, {0}, phi::CppTypeToDataType<T>::Type(), false);
}
}
if (dbias) {
if (kIsMultiPrecision) {
phi::AddKernel<MT, Context>(ctx, *dbias_out, dbias.get(), dbias_out);
} else {
phi::AddKernel<T, Context>(ctx, *dbias_out, dbias.get(), dbias_out);
}
}
}
template <int LogLevel = 10>
static void PrintMeta(const DenseTensor &t, const char *name) {
PADDLE_ENFORCE_EQ(
t.initialized(),
true,
phi::errors::InvalidArgument("Tensor(%s) is not initialized.", name));
std::stringstream ss;
ss << "Tensor(" << name << "): ";
ss << "dtype(" << t.dtype() << "), ";
ss << "shape(" << t.dims() << "), ";
ss << "place(" << t.place() << "), ";
ss << "ptr(" << t.data() << ")";
VLOG(LogLevel) << ss.str();
}
template <int LogLevel = 10>
static void PrintMeta(const DenseTensor *t, const char *name) {
if (t == nullptr) {
VLOG(LogLevel) << "Tensor(" << name << "): None";
} else {
PrintMeta<LogLevel>(*t, name);
}
}
template <int LogLevel = 10>
static void PrintMeta(const paddle::optional<DenseTensor> &t,
const char *name) {
const auto *t_ptr = t ? &(t.get()) : nullptr;
PrintMeta<LogLevel>(t_ptr, name);
}
template <typename T, typename Context>
void FusedLinearParamGradAdd(const Context &ctx,
const DenseTensor &x,
const DenseTensor &dout,
const paddle::optional<DenseTensor> &dweight,
const paddle::optional<DenseTensor> &dbias,
bool multi_precision,
DenseTensor *dweight_out,
DenseTensor *dbias_out) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
bool use_addto = false;
if (dweight_out) {
if (dweight) {
use_addto = true;
*dweight_out = dweight.get();
if (multi_precision) {
PADDLE_ENFORCE_EQ(
dweight_out->dtype(),
phi::CppTypeToDataType<MT>::Type(),
phi::errors::InvalidArgument("Invaid data type error."));
} else {
PADDLE_ENFORCE_EQ(
dweight_out->dtype(),
phi::CppTypeToDataType<T>::Type(),
phi::errors::InvalidArgument("Invaid data type error."));
}
} else {
if (multi_precision) {
ctx.template Alloc<MT>(dweight_out);
} else {
ctx.template Alloc<T>(dweight_out);
}
}
}
if (std::is_same<T, MT>::value) {
multi_precision = false;
}
if (dbias_out) {
ctx.template Alloc<T>(dbias_out);
}
int64_t K = x.dims()[x.dims().size() - 1];
int64_t M = x.numel() / K;
int64_t N = dout.dims()[dout.dims().size() - 1];
constexpr int kLogLevel = 10;
if (VLOG_IS_ON(kLogLevel)) {
PrintMeta<kLogLevel>(x, "x");
PrintMeta<kLogLevel>(dout, "dout");
PrintMeta<kLogLevel>(dweight, "dweight");
PrintMeta<kLogLevel>(dbias, "dbias");
PrintMeta<kLogLevel>(dweight_out, "dweight_out");
PrintMeta<kLogLevel>(dbias_out, "dbias_out");
VLOG(kLogLevel) << "multi_precision = " << multi_precision;
VLOG(kLogLevel) << "use_addto = " << use_addto;
VLOG(kLogLevel) << "M = " << M;
VLOG(kLogLevel) << "N = " << N;
VLOG(kLogLevel) << "K = " << K;
}
if (multi_precision) {
FusedLinearParamGradAddImpl<T, MT, Context>(
ctx, x, dout, dbias, M, K, N, use_addto, dweight_out, dbias_out);
} else {
FusedLinearParamGradAddImpl<T, T, Context>(
ctx, x, dout, dbias, M, K, N, use_addto, dweight_out, dbias_out);
}
}
#else
template <typename T, typename Context>
void FusedLinearParamGradAdd(const Context &ctx,
const DenseTensor &x,
const DenseTensor &dout,
const paddle::optional<DenseTensor> &dweight,
const paddle::optional<DenseTensor> &dbias,
bool multi_precision,
DenseTensor *dweight_out,
DenseTensor *dbias_out) {
PADDLE_THROW(phi::errors::Unimplemented(
"FusedLinearParamGradAdd is only supported when TORCH_HIP_VERSION >= 11.6."));
}
#endif
} // namespace fusion
} // namespace phi
PD_REGISTER_KERNEL(fused_linear_param_grad_add,
GPU,
ALL_LAYOUT,
phi::fusion::FusedLinearParamGradAdd,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
| 0f4118c8d0be8115f4b26c89f115ba37c3045044.cu | // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 11060
#include "paddle/phi/kernels/funcs/fused_gemm_epilogue.h"
#endif
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/kernels/elementwise_add_kernel.h"
#include "paddle/phi/kernels/reduce_sum_kernel.h"
namespace phi {
namespace fusion {
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 11060
template <typename T, typename MT, typename Context>
void FusedLinearParamGradAddImpl(const Context &ctx,
const DenseTensor &x,
const DenseTensor &dout,
const paddle::optional<DenseTensor> &dbias,
int64_t M,
int64_t K,
int64_t N,
bool use_addto,
DenseTensor *dweight_out,
DenseTensor *dbias_out) {
constexpr bool kIsMultiPrecision = !std::is_same<T, MT>::value;
const bool fuse_bias_grad = false; // kIsMultiPrecision && dweight_out;
if (dweight_out) {
phi::funcs::ComputeFusedGemmEpilogueBackward<T, T, MT>(
ctx,
&dout,
&x,
nullptr,
nullptr,
M,
N,
K,
false,
false,
"none",
nullptr,
dweight_out,
fuse_bias_grad ? dbias_out : nullptr,
false,
use_addto);
}
if (dbias_out == nullptr) return;
if (!fuse_bias_grad) {
auto dout_copy = dout;
dout_copy.Resize({M, N});
if (kIsMultiPrecision) {
*dbias_out = phi::Sum<T, Context>(
ctx, dout_copy, {0}, phi::CppTypeToDataType<MT>::Type(), false);
} else {
*dbias_out = phi::Sum<T, Context>(
ctx, dout_copy, {0}, phi::CppTypeToDataType<T>::Type(), false);
}
}
if (dbias) {
if (kIsMultiPrecision) {
phi::AddKernel<MT, Context>(ctx, *dbias_out, dbias.get(), dbias_out);
} else {
phi::AddKernel<T, Context>(ctx, *dbias_out, dbias.get(), dbias_out);
}
}
}
template <int LogLevel = 10>
static void PrintMeta(const DenseTensor &t, const char *name) {
PADDLE_ENFORCE_EQ(
t.initialized(),
true,
phi::errors::InvalidArgument("Tensor(%s) is not initialized.", name));
std::stringstream ss;
ss << "Tensor(" << name << "): ";
ss << "dtype(" << t.dtype() << "), ";
ss << "shape(" << t.dims() << "), ";
ss << "place(" << t.place() << "), ";
ss << "ptr(" << t.data() << ")";
VLOG(LogLevel) << ss.str();
}
template <int LogLevel = 10>
static void PrintMeta(const DenseTensor *t, const char *name) {
if (t == nullptr) {
VLOG(LogLevel) << "Tensor(" << name << "): None";
} else {
PrintMeta<LogLevel>(*t, name);
}
}
template <int LogLevel = 10>
static void PrintMeta(const paddle::optional<DenseTensor> &t,
const char *name) {
const auto *t_ptr = t ? &(t.get()) : nullptr;
PrintMeta<LogLevel>(t_ptr, name);
}
template <typename T, typename Context>
void FusedLinearParamGradAdd(const Context &ctx,
const DenseTensor &x,
const DenseTensor &dout,
const paddle::optional<DenseTensor> &dweight,
const paddle::optional<DenseTensor> &dbias,
bool multi_precision,
DenseTensor *dweight_out,
DenseTensor *dbias_out) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
bool use_addto = false;
if (dweight_out) {
if (dweight) {
use_addto = true;
*dweight_out = dweight.get();
if (multi_precision) {
PADDLE_ENFORCE_EQ(
dweight_out->dtype(),
phi::CppTypeToDataType<MT>::Type(),
phi::errors::InvalidArgument("Invaid data type error."));
} else {
PADDLE_ENFORCE_EQ(
dweight_out->dtype(),
phi::CppTypeToDataType<T>::Type(),
phi::errors::InvalidArgument("Invaid data type error."));
}
} else {
if (multi_precision) {
ctx.template Alloc<MT>(dweight_out);
} else {
ctx.template Alloc<T>(dweight_out);
}
}
}
if (std::is_same<T, MT>::value) {
multi_precision = false;
}
if (dbias_out) {
ctx.template Alloc<T>(dbias_out);
}
int64_t K = x.dims()[x.dims().size() - 1];
int64_t M = x.numel() / K;
int64_t N = dout.dims()[dout.dims().size() - 1];
constexpr int kLogLevel = 10;
if (VLOG_IS_ON(kLogLevel)) {
PrintMeta<kLogLevel>(x, "x");
PrintMeta<kLogLevel>(dout, "dout");
PrintMeta<kLogLevel>(dweight, "dweight");
PrintMeta<kLogLevel>(dbias, "dbias");
PrintMeta<kLogLevel>(dweight_out, "dweight_out");
PrintMeta<kLogLevel>(dbias_out, "dbias_out");
VLOG(kLogLevel) << "multi_precision = " << multi_precision;
VLOG(kLogLevel) << "use_addto = " << use_addto;
VLOG(kLogLevel) << "M = " << M;
VLOG(kLogLevel) << "N = " << N;
VLOG(kLogLevel) << "K = " << K;
}
if (multi_precision) {
FusedLinearParamGradAddImpl<T, MT, Context>(
ctx, x, dout, dbias, M, K, N, use_addto, dweight_out, dbias_out);
} else {
FusedLinearParamGradAddImpl<T, T, Context>(
ctx, x, dout, dbias, M, K, N, use_addto, dweight_out, dbias_out);
}
}
#else
template <typename T, typename Context>
void FusedLinearParamGradAdd(const Context &ctx,
const DenseTensor &x,
const DenseTensor &dout,
const paddle::optional<DenseTensor> &dweight,
const paddle::optional<DenseTensor> &dbias,
bool multi_precision,
DenseTensor *dweight_out,
DenseTensor *dbias_out) {
PADDLE_THROW(phi::errors::Unimplemented(
"FusedLinearParamGradAdd is only supported when CUDA_VERSION >= 11.6."));
}
#endif
} // namespace fusion
} // namespace phi
PD_REGISTER_KERNEL(fused_linear_param_grad_add,
GPU,
ALL_LAYOUT,
phi::fusion::FusedLinearParamGradAdd,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
|
ac18caf82e80470f3132aad34b6888673832a795.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include "innerproductlayer.h"
#include "utils.h"
using namespace std;
InnerProductLayer::InnerProductLayer(const bool gpu, const int num, const int channels_in, const int channels_out,
const FP lr, const FP* weights, const FP* bias)
:gpu(gpu), num(num), channels_in(channels_in), channels_out(channels_out), lr(lr)
{
int weights_size = channels_out * channels_in;
this->weights = new FP[weights_size];
memcopy_cpu(this->weights, weights, weights_size * sizeof(FP));
this->bias = new FP[channels_out];
memcopy_cpu(this->bias, bias, channels_out * sizeof(FP));
count = num * channels_in;
if (gpu) {
hipMalloc((void**)&dev_a, num * channels_in * sizeof(FP));
hipMalloc((void**)&dev_b, channels_in * channels_out * sizeof(FP));
hipMalloc((void**)&dev_c, num * channels_out * sizeof(FP));
}
}
InnerProductLayer::~InnerProductLayer()
{
delete[] weights;
delete[] bias;
if (gpu) {
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
}
}
void InnerProductLayer::forward(const FP *input, FP *&output)
{
this->input = input;
output = new FP[num * channels_out];
for (int i = 0; i < num * channels_out; i++)
output[i] = 0;
if (gpu) {
matrixmult_gpu(false, true, input, weights, output, num, channels_out, channels_in, dev_a, dev_b, dev_c);
} else {
matrixmult_btrans_cpu(input, weights, output, num, channels_out, channels_in);
}
for (int i = 0; i < num; i++) {
add_bias_cpu(output + i * channels_out, bias, 1, channels_out);
}
}
void InnerProductLayer::backward(const FP *top_diff, FP *&bottom_diff)
{
bottom_diff = new FP[num * channels_in];
for (int i = 0; i < num * channels_in; i++)
bottom_diff[i] = 0;
int weights_size = channels_out * channels_in;
FP* weights_diff = new FP[weights_size];
FP* weights_diff_tmp = new FP[weights_size];
for (int i = 0; i < weights_size; i++) {
weights_diff[i] = 0;
weights_diff_tmp[i] = 0;
}
FP* bias_diff = new FP[channels_out];
for (int i = 0; i < channels_out; i++) {
bias_diff[i] = 0;
}
if (gpu) {
//update bottom diff
matrixmult_gpu(false, false, top_diff, weights, bottom_diff, num, channels_in, channels_out, dev_c, dev_b, dev_a);
//update weights diff
matrixmult_gpu(true, false, top_diff, input, weights_diff_tmp, channels_out, channels_in, num, dev_c, dev_a, dev_b);
for (int j = 0; j < weights_size; j++) {
weights_diff[j] += weights_diff_tmp[j];
}
//update bias diff
for (int i = 0; i < num; i++) {
for (int c = 0; c < channels_out; c++) {
bias_diff[c] += top_diff[i * channels_out + c];
}
}
} else {
//update bottom diff
matrixmult_cpu(top_diff, weights, bottom_diff, num, channels_in, channels_out);
//update weights diff
matrixmult_atrans_cpu(top_diff, input, weights_diff, channels_out, channels_in, num);
//update bias diff
for (int i = 0; i < num; i++) {
for (int c = 0; c < channels_out; c++) {
bias_diff[c] += top_diff[i * channels_out + c];
}
}
}
// cout << "weights_diff: " << endl;
// for (int i =0; i < channels_out; i++) {
// for (int c = 0; c < channels_in; c++) {
// cout << weights_diff[i * channels_in + c] << " ";
// }
// cout << endl;
// }
// cout << "bias_diff: " << endl;
// for (int i = 0; i < channels_out; i++) {
// cout << bias_diff[i] <<endl;
// }
//update weights
update_params_cpu(weights, weights_diff, weights_size, num, lr);
//update bias
update_params_cpu(bias, bias_diff, channels_out, num, lr);
}
| ac18caf82e80470f3132aad34b6888673832a795.cu | #include <iostream>
#include <stdio.h>
#include "innerproductlayer.h"
#include "utils.h"
using namespace std;
InnerProductLayer::InnerProductLayer(const bool gpu, const int num, const int channels_in, const int channels_out,
const FP lr, const FP* weights, const FP* bias)
:gpu(gpu), num(num), channels_in(channels_in), channels_out(channels_out), lr(lr)
{
int weights_size = channels_out * channels_in;
this->weights = new FP[weights_size];
memcopy_cpu(this->weights, weights, weights_size * sizeof(FP));
this->bias = new FP[channels_out];
memcopy_cpu(this->bias, bias, channels_out * sizeof(FP));
count = num * channels_in;
if (gpu) {
cudaMalloc((void**)&dev_a, num * channels_in * sizeof(FP));
cudaMalloc((void**)&dev_b, channels_in * channels_out * sizeof(FP));
cudaMalloc((void**)&dev_c, num * channels_out * sizeof(FP));
}
}
InnerProductLayer::~InnerProductLayer()
{
delete[] weights;
delete[] bias;
if (gpu) {
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
}
void InnerProductLayer::forward(const FP *input, FP *&output)
{
this->input = input;
output = new FP[num * channels_out];
for (int i = 0; i < num * channels_out; i++)
output[i] = 0;
if (gpu) {
matrixmult_gpu(false, true, input, weights, output, num, channels_out, channels_in, dev_a, dev_b, dev_c);
} else {
matrixmult_btrans_cpu(input, weights, output, num, channels_out, channels_in);
}
for (int i = 0; i < num; i++) {
add_bias_cpu(output + i * channels_out, bias, 1, channels_out);
}
}
void InnerProductLayer::backward(const FP *top_diff, FP *&bottom_diff)
{
bottom_diff = new FP[num * channels_in];
for (int i = 0; i < num * channels_in; i++)
bottom_diff[i] = 0;
int weights_size = channels_out * channels_in;
FP* weights_diff = new FP[weights_size];
FP* weights_diff_tmp = new FP[weights_size];
for (int i = 0; i < weights_size; i++) {
weights_diff[i] = 0;
weights_diff_tmp[i] = 0;
}
FP* bias_diff = new FP[channels_out];
for (int i = 0; i < channels_out; i++) {
bias_diff[i] = 0;
}
if (gpu) {
//update bottom diff
matrixmult_gpu(false, false, top_diff, weights, bottom_diff, num, channels_in, channels_out, dev_c, dev_b, dev_a);
//update weights diff
matrixmult_gpu(true, false, top_diff, input, weights_diff_tmp, channels_out, channels_in, num, dev_c, dev_a, dev_b);
for (int j = 0; j < weights_size; j++) {
weights_diff[j] += weights_diff_tmp[j];
}
//update bias diff
for (int i = 0; i < num; i++) {
for (int c = 0; c < channels_out; c++) {
bias_diff[c] += top_diff[i * channels_out + c];
}
}
} else {
//update bottom diff
matrixmult_cpu(top_diff, weights, bottom_diff, num, channels_in, channels_out);
//update weights diff
matrixmult_atrans_cpu(top_diff, input, weights_diff, channels_out, channels_in, num);
//update bias diff
for (int i = 0; i < num; i++) {
for (int c = 0; c < channels_out; c++) {
bias_diff[c] += top_diff[i * channels_out + c];
}
}
}
// cout << "weights_diff: " << endl;
// for (int i =0; i < channels_out; i++) {
// for (int c = 0; c < channels_in; c++) {
// cout << weights_diff[i * channels_in + c] << " ";
// }
// cout << endl;
// }
// cout << "bias_diff: " << endl;
// for (int i = 0; i < channels_out; i++) {
// cout << bias_diff[i] <<endl;
// }
//update weights
update_params_cpu(weights, weights_diff, weights_size, num, lr);
//update bias
update_params_cpu(bias, bias_diff, channels_out, num, lr);
}
|
2a0651f61aa8c8a85343bc61de86062ce6256284.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "rasterize_cuda_kernel.h"
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N){
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#define GET_DIRECT_4d(data, x0, x1, x2, x3, sd0, sd1, sd2, sd3) \
((data)[(x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) + (x3) * (sd3)])
#define ADD_ATOMIC_4d(data, x0, x1, x2, x3, sd0, sd1, sd2, sd3, v) \
atomicAdd( data + (x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) + (x3) * (sd3), v )
#define ADD_ATOMIC_5d(data, x0, x1, x2, x3, x4, sd0, sd1, sd2, sd3, sd4, v) \
atomicAdd( data + (x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) + (x3) * (sd3) + (x4)*(sd4), v )
#define SET_DIRECT_4d(data, x0, x1, x2, x3, sd0, sd1, sd2, sd3, v) \
((data)[(x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) + (x3) * (sd3)]) = v
#define GET_DIRECT_3d(data, x0, x1, x2, sd0, sd1, sd2) \
((data)[(x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2)])
#define SET_DIRECT_3d(data, x0, x1, x2, sd0, sd1, sd2, v) \
((data)[(x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) ]) = v
#define GET_DIRECT_5d(data, x0, x1, x2, x3, x4, stride0, stride1, stride2, stride3, stride4) \
((data)[(x0)*(stride0)+(x1)*(stride1)+(x2)*(stride2)+(x3)*(stride3)+(x4)*(stride4)])
#define SET_DIRECT_5d(data, x0, x1, x2, x3, x4, stride0, stride1, stride2, stride3, stride4, value) \
((data)[(x0)*(stride0)+(x1)*(stride1)+(x2)*(stride2)+(x3)*(stride3)+(x4)*(stride4)] = (value))
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
const int MODE_BOUNDARY = 0;
const int MODE_MASK = 1;
const int MODE_HARD_MASK = 2;
template <typename scalar_t>
__global__ void inside_outside_cuda_kernel(
const scalar_t* __restrict__ vertices,
int batch_size,
int number_vertices,
scalar_t* rasterized,
int height,
int width) {
// 1-D array of 1-D blocks.
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * width * height) {
return;
}
const int w = width;
const int h = height;
const int nv = number_vertices;
// batch index.
const int bi = i / (w * h);
// pixel number (linear index)
const int pn = i % (w * h);
const int yp = pn / w;
const int xp = pn % w;
// cast a ray: William Randolph Franklin.
int j = 0;
scalar_t c = 0;
for (int vn = 0, j = nv - 1; vn < nv; j = vn++) {
scalar_t from_x;
scalar_t from_y;
scalar_t to_x;
scalar_t to_y;
from_x = vertices[bi * (nv * 2) + vn * 2];
from_y = vertices[bi * (nv * 2) + vn * 2 + 1];
to_x = vertices[bi * (nv * 2) + j * 2];
to_y = vertices[bi * (nv * 2) + j * 2 + 1];
if (((from_y > yp) != (to_y > yp)) && (xp < (to_x - from_x) * (yp - from_y) / (to_y - from_y) + from_x)) {
c = !c;
}
}
rasterized[i] = c == 0 ? -1.0 : 1.0;
}
template <typename scalar_t>
__global__ void forward_rasterize_cuda_kernel(
const scalar_t* __restrict__ vertices,
int batch_size,
int number_vertices,
scalar_t* rasterized,
int* contribution_map,
int height,
int width,
float inv_smoothness,
int mode) {
// 1-D array of 1-D blocks.
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * width * height) {
return;
}
const int w = width;
const int h = height;
const int nv = number_vertices;
// batch index.
const int bi = i / (w * h);
// pixel number (linear index)
const int pn = i % (w * h);
const int yp = pn / w;
const int xp = pn % w;
// go through each vertex.
// at some point, we'll need to record
// which segment contributed the most
// for backwards pass.
scalar_t max_contribution = -2147483647;
int max_vertex_number = -1;
for (int vn = 0; vn < nv; vn++) {
int from_index;
int to_index;
scalar_t from_x;
scalar_t from_y;
scalar_t to_x;
scalar_t to_y;
scalar_t x2_sub_x1;
scalar_t y2_sub_y1;
scalar_t square_segment_length;
scalar_t x_sub_x1;
scalar_t y_sub_y1;
scalar_t x_sub_x2;
scalar_t y_sub_y2;
scalar_t dot;
scalar_t x_proj;
scalar_t y_proj;
scalar_t contribution;
// grid_x, grid_y = xp, yp.
from_index = vn;
to_index = (vn + 1) % number_vertices;
from_x = vertices[bi * (nv * 2) + from_index * 2];
from_y = vertices[bi * (nv * 2) + from_index * 2 + 1];
to_x = vertices[bi * (nv * 2) + to_index * 2];
to_y = vertices[bi * (nv * 2) + to_index * 2 + 1];
x2_sub_x1 = to_x - from_x;
y2_sub_y1 = to_y - from_y;
square_segment_length = x2_sub_x1 * x2_sub_x1 + y2_sub_y1 * y2_sub_y1 + 0.00001;
x_sub_x1 = xp - from_x;
y_sub_y1 = yp - from_y;
x_sub_x2 = xp - to_x;
y_sub_y2 = yp - to_y;
dot = ((x_sub_x1 * x2_sub_x1) + (y_sub_y1 * y2_sub_y1)) / square_segment_length;
x_proj = xp - (from_x + dot * x2_sub_x1);
y_proj = yp - (from_y + dot * y2_sub_y1);
// Does it matter here to compute the squared distance or true Euclidean distance?
if (dot < 0) {
contribution = pow(x_sub_x1, 2) + pow(y_sub_y1, 2);
}
else if (dot > 1) {
contribution = pow(x_sub_x2, 2) + pow(y_sub_y2, 2);
}
else {
contribution = pow(x_proj, 2) + pow(y_proj, 2);
}
// we need contribution to be a decreasing function.
// if (mode == MODE_MASK) {
// // sign * -dist
// contribution = 1.0 / (1.0 + exp(-rasterized[i] * contribution / inv_smoothness));
// }
// else if (mode == MODE_HARD_MASK) {
// // map the inside outside map to 0 or 1.0.
// // technically, we don't need this preceeding loop.
// contribution = rasterized[i] < 0 ? 0.0 : 1.0;
// }
// else {
// contribution = exp(-contribution / inv_smoothness);
// }
contribution = -contribution;
if (contribution > max_contribution) {
max_contribution = contribution;
max_vertex_number = vn;
}
}
if (mode == MODE_MASK) {
// sign * -dist
max_contribution = 1.0 / (1.0 + exp(rasterized[i] * max_contribution / inv_smoothness));
}
else if (mode == MODE_HARD_MASK) {
// map the inside outside map to 0 or 1.0.
// technically, we don't need this preceeding loop.
max_contribution = rasterized[i] < 0 ? 0.0 : 1.0;
}
else {
max_contribution = exp(max_contribution / inv_smoothness);
}
rasterized[i] = max_contribution;
contribution_map[i] = max_vertex_number;
}
template <typename scalar_t>
__global__ void backward_rasterize_cuda_kernel(
const scalar_t* __restrict__ vertices,
const scalar_t* __restrict__ rasterized,
const int* __restrict__ contribution_map,
const scalar_t* __restrict__ grad_output,
scalar_t* grad_vertices,
int batch_size,
int number_vertices,
int width,
int height,
float inv_smoothness) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * width * height) {
return;
}
const int w = width;
const int h = height;
const int nv = number_vertices;
// batch index.
const int bi = i / (w * h);
// pixel number (linear index)
const int pn = i % (w * h);
const int yp = pn / w;
const int xp = pn % w;
// produce dR/dv.
// since we use max over all vertices, we only need
// to apply it to single vertex.
int vn;
int from_index;
int to_index;
scalar_t from_x;
scalar_t from_y;
scalar_t to_x;
scalar_t to_y;
scalar_t x2_sub_x1;
scalar_t y2_sub_y1;
scalar_t square_segment_length;
scalar_t x_sub_x1;
scalar_t y_sub_y1;
scalar_t x_sub_x2;
scalar_t y_sub_y2;
scalar_t dot;
scalar_t x_proj;
scalar_t y_proj;
scalar_t grad_x1 = 0.0;
scalar_t grad_y1 = 0.0;
scalar_t grad_x2 = 0.0;
scalar_t grad_y2 = 0.0;
scalar_t in_out = rasterized[i] >= 0.5 ? 1.0 : -1.0;
vn = contribution_map[i];
from_index = vn;
to_index = (vn + 1) % nv;
// determine how we computed the distance to this segment.
from_x = vertices[bi * (nv * 2) + from_index * 2];
from_y = vertices[bi * (nv * 2) + from_index * 2 + 1];
to_x = vertices[bi * (nv * 2) + to_index * 2];
to_y = vertices[bi * (nv * 2) + to_index * 2 + 1];
x2_sub_x1 = to_x - from_x;
y2_sub_y1 = to_y - from_y;
// grad:
// dX1 = 2 * x2_sub_x1 * -1
// dX2 = 2 * x2_sub_x1
// dY1 = 2 * y2_sub_y1 * -1
// dY2 = 2 * y2_sub_y1
// possible this could NaN?
square_segment_length = x2_sub_x1 * x2_sub_x1 + y2_sub_y1 * y2_sub_y1 + 0.00001;
x_sub_x1 = xp - from_x;
y_sub_y1 = yp - from_y;
x_sub_x2 = xp - to_x;
y_sub_y2 = yp - to_y;
// grad numer:
// dX1 = -1 * x2_sub_x1 + -1 * x_sub_x1
// dX2 = x_sub_x1
scalar_t dot_num = ((x_sub_x1 * x2_sub_x1) + (y_sub_y1 * y2_sub_y1));
dot = dot_num / square_segment_length;
x_proj = xp - (from_x + dot * x2_sub_x1);
y_proj = yp - (from_y + dot * y2_sub_y1);
// negative sign?
if (dot < 0) {
// contribution = exp(-((xp - from_x) ** 2 + (yp - from_y) ** 2 / inv_smoothness)
// grad_x1 = (rasterized[i] * 2 * x_sub_x1) / inv_smoothness;
// grad_y1 = (rasterized[i] * 2 * y_sub_y1) / inv_smoothness;
// grad_x1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * 2 * x_sub_x1 / inv_smoothness;
// grad_y1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * 2 * y_sub_y1 / inv_smoothness;
grad_x1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * -2 * x_sub_x1 / inv_smoothness;
grad_y1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * -2 * y_sub_y1 / inv_smoothness;
}
else if (dot > 1) {
// contribution = exp(-((xp - to_x) ** 2 + (yp - to_y) ** 2) / inv_smoothness)
// grad_x2 = (rasterized[i] * 2 * x_sub_x2) / inv_smoothness;
// grad_y2 = (rasterized[i] * 2 * y_sub_y2) / inv_smoothness;
grad_x2 = in_out * rasterized[i] * (1.0 - rasterized[i]) * -2 * x_sub_x2 / inv_smoothness;
grad_y2 = in_out * rasterized[i] * (1.0 - rasterized[i]) * -2 * y_sub_y2 / inv_smoothness;
}
else {
// contribution = exp(-(xp - from_x) ** 2 / inv_smoothness)
scalar_t ss_x1 = -2.0 * x2_sub_x1;
scalar_t ss_x2 = 2.0 * x2_sub_x1;
scalar_t ss_y1 = -2.0 * y2_sub_y1;
scalar_t ss_y2 = 2.0 * y2_sub_y1;
scalar_t dot_x1 = (square_segment_length * (-x2_sub_x1 - x_sub_x1) - dot_num * ss_x1) / pow(square_segment_length, 2);
scalar_t dot_x2 = (square_segment_length * x_sub_x1 - dot_num * ss_x2) / pow(square_segment_length, 2);
scalar_t dot_y1 = (square_segment_length * (-y2_sub_y1 - y_sub_y1) - dot_num * ss_y1) / pow(square_segment_length, 2);
scalar_t dot_y2 = (square_segment_length * y_sub_y1 - dot_num * ss_y2) / pow(square_segment_length, 2);
// d/dx()
scalar_t x_proj_x1 = -1 - dot_x1 * x2_sub_x1 + dot;
scalar_t x_proj_x2 = -(dot_x2 * x2_sub_x1 + dot);
scalar_t y_proj_y1 = -1 - dot_y1 * y2_sub_y1 + dot;
scalar_t y_proj_y2 = -(dot_y2 * y2_sub_y1 + dot);
// we also need mixed.
scalar_t y_proj_x1 = -dot_x1 * y2_sub_y1;
scalar_t y_proj_x2 = -dot_x2 * y2_sub_y1;
scalar_t x_proj_y1 = -dot_y1 * x2_sub_x1;
scalar_t x_proj_y2 = -dot_y2 * x2_sub_x1;
// - as well?
grad_x1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * (2.0 * x_proj * x_proj_x1 + 2.0 * y_proj * y_proj_x1) / inv_smoothness;
grad_x2 = in_out * rasterized[i] * (1.0 - rasterized[i]) * (2.0 * x_proj * x_proj_x2 + 2.0 * y_proj * y_proj_x2) / inv_smoothness;
grad_y1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * (2.0 * x_proj * x_proj_y1 + 2.0 * y_proj * y_proj_y1) / inv_smoothness;
grad_y2 = in_out * rasterized[i] * (1.0 - rasterized[i]) * (2.0 * x_proj * x_proj_y2 + 2.0 * y_proj * y_proj_y2) / inv_smoothness;
// grad_x1 = -rasterized[i] * (2.0 * x_proj * x_proj_x1 + 2.0 * y_proj * y_proj_x1) / inv_smoothness;
// grad_x2 = -rasterized[i] * (2.0 * x_proj * x_proj_x2 + 2.0 * y_proj * y_proj_x2) / inv_smoothness;
// grad_y1 = -rasterized[i] * (2.0 * x_proj * x_proj_y1 + 2.0 * y_proj * y_proj_y1) / inv_smoothness;
// grad_y2 = -rasterized[i] * (2.0 * x_proj * x_proj_y2 + 2.0 * y_proj * y_proj_y2) / inv_smoothness;
}
// apply the input gradients.
grad_x1 = grad_x1 * grad_output[i];
grad_x2 = grad_x2 * grad_output[i];
grad_y1 = grad_y1 * grad_output[i];
grad_y2 = grad_y2 * grad_output[i];
// grad_vertices[bi * (height * width * nv * 2) + yp * (width * nv * 2) + xp * (nv * 2) + from_index * 2] = grad_x1;
// grad_vertices[bi * (height * width * nv * 2) + yp * (width * nv * 2) + xp * (nv * 2) + from_index * 2 + 1] = grad_y1;
// grad_vertices[bi * (height * width * nv * 2) + yp * (width * nv * 2) + xp * (nv * 2) + to_index * 2] = grad_x2;
// grad_vertices[bi * (height * width * nv * 2) + yp * (width * nv * 2) + xp * (nv * 2) + to_index * 2 + 1] = grad_y2;
// unsure if should be deferencing.
atomicAdd(grad_vertices + bi * nv * 2 + from_index * 2, grad_x1);
atomicAdd(grad_vertices + bi * nv * 2 + from_index * 2 + 1, grad_y1);
atomicAdd(grad_vertices + bi * nv * 2 + to_index * 2, grad_x2);
atomicAdd(grad_vertices + bi * nv * 2 + to_index * 2 + 1, grad_y2);
}
std::vector<at::Tensor> forward_rasterize_cuda(
at::Tensor vertices,
at::Tensor rasterized,
at::Tensor contribution_map,
int width,
int height,
float inv_smoothness,
int mode) {
const auto batch_size = vertices.size(0);
const auto number_vertices = vertices.size(1);
const int threads = 512;
// each block processes some 512 sized chunk of the output image.
const dim3 blocks ((batch_size * width * height - 1) / threads + 1);
if ((mode == MODE_MASK) || (mode == MODE_HARD_MASK)) {
// determine whether each point is inside or outside.
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "inside_outside_cuda", ([&] {
hipLaunchKernelGGL(( inside_outside_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
vertices.data<scalar_t>(),
batch_size,
number_vertices,
rasterized.data<scalar_t>(),
height,
width);
}));
}
if (mode != MODE_HARD_MASK) {
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "forward_rasterize_cuda", ([&] {
hipLaunchKernelGGL(( forward_rasterize_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
vertices.data<scalar_t>(),
batch_size,
number_vertices,
rasterized.data<scalar_t>(),
contribution_map.data<int>(),
height,
width,
inv_smoothness,
mode);
}));
}
hipError_t err = hipGetLastError();
err = hipGetLastError();
if (err != hipSuccess)
printf("Error in forward_rasterize: %s\n", hipGetErrorString(err));
return { rasterized, contribution_map };
}
at::Tensor backward_rasterize_cuda(
at::Tensor vertices,
at::Tensor rasterized,
at::Tensor contribution_map,
at::Tensor grad_output,
at::Tensor grad_vertices,
int width,
int height,
float inv_smoothness,
int mode) {
const auto batch_size = vertices.size(0);
const auto number_vertices = vertices.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * width * height - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "backward_rasterize_cuda", ([&] {
hipLaunchKernelGGL(( backward_rasterize_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
vertices.data<scalar_t>(),
rasterized.data<scalar_t>(),
contribution_map.data<int>(),
grad_output.data<scalar_t>(),
grad_vertices.data<scalar_t>(),
batch_size,
number_vertices,
width,
height,
inv_smoothness);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error in backward_rasterize: %s\n", hipGetErrorString(err));
return grad_vertices;
}
| 2a0651f61aa8c8a85343bc61de86062ce6256284.cu | #include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "rasterize_cuda_kernel.h"
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N){
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// for the older gpus atomicAdd with double arguments does not exist
#if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__)
static __inline__ __device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old);
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#define GET_DIRECT_4d(data, x0, x1, x2, x3, sd0, sd1, sd2, sd3) \
((data)[(x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) + (x3) * (sd3)])
#define ADD_ATOMIC_4d(data, x0, x1, x2, x3, sd0, sd1, sd2, sd3, v) \
atomicAdd( data + (x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) + (x3) * (sd3), v )
#define ADD_ATOMIC_5d(data, x0, x1, x2, x3, x4, sd0, sd1, sd2, sd3, sd4, v) \
atomicAdd( data + (x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) + (x3) * (sd3) + (x4)*(sd4), v )
#define SET_DIRECT_4d(data, x0, x1, x2, x3, sd0, sd1, sd2, sd3, v) \
((data)[(x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) + (x3) * (sd3)]) = v
#define GET_DIRECT_3d(data, x0, x1, x2, sd0, sd1, sd2) \
((data)[(x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2)])
#define SET_DIRECT_3d(data, x0, x1, x2, sd0, sd1, sd2, v) \
((data)[(x0) * (sd0) + (x1) * (sd1) + (x2) * (sd2) ]) = v
#define GET_DIRECT_5d(data, x0, x1, x2, x3, x4, stride0, stride1, stride2, stride3, stride4) \
((data)[(x0)*(stride0)+(x1)*(stride1)+(x2)*(stride2)+(x3)*(stride3)+(x4)*(stride4)])
#define SET_DIRECT_5d(data, x0, x1, x2, x3, x4, stride0, stride1, stride2, stride3, stride4, value) \
((data)[(x0)*(stride0)+(x1)*(stride1)+(x2)*(stride2)+(x3)*(stride3)+(x4)*(stride4)] = (value))
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
const int MODE_BOUNDARY = 0;
const int MODE_MASK = 1;
const int MODE_HARD_MASK = 2;
template <typename scalar_t>
__global__ void inside_outside_cuda_kernel(
const scalar_t* __restrict__ vertices,
int batch_size,
int number_vertices,
scalar_t* rasterized,
int height,
int width) {
// 1-D array of 1-D blocks.
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * width * height) {
return;
}
const int w = width;
const int h = height;
const int nv = number_vertices;
// batch index.
const int bi = i / (w * h);
// pixel number (linear index)
const int pn = i % (w * h);
const int yp = pn / w;
const int xp = pn % w;
// cast a ray: William Randolph Franklin.
int j = 0;
scalar_t c = 0;
for (int vn = 0, j = nv - 1; vn < nv; j = vn++) {
scalar_t from_x;
scalar_t from_y;
scalar_t to_x;
scalar_t to_y;
from_x = vertices[bi * (nv * 2) + vn * 2];
from_y = vertices[bi * (nv * 2) + vn * 2 + 1];
to_x = vertices[bi * (nv * 2) + j * 2];
to_y = vertices[bi * (nv * 2) + j * 2 + 1];
if (((from_y > yp) != (to_y > yp)) && (xp < (to_x - from_x) * (yp - from_y) / (to_y - from_y) + from_x)) {
c = !c;
}
}
rasterized[i] = c == 0 ? -1.0 : 1.0;
}
template <typename scalar_t>
__global__ void forward_rasterize_cuda_kernel(
const scalar_t* __restrict__ vertices,
int batch_size,
int number_vertices,
scalar_t* rasterized,
int* contribution_map,
int height,
int width,
float inv_smoothness,
int mode) {
// 1-D array of 1-D blocks.
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * width * height) {
return;
}
const int w = width;
const int h = height;
const int nv = number_vertices;
// batch index.
const int bi = i / (w * h);
// pixel number (linear index)
const int pn = i % (w * h);
const int yp = pn / w;
const int xp = pn % w;
// go through each vertex.
// at some point, we'll need to record
// which segment contributed the most
// for backwards pass.
scalar_t max_contribution = -2147483647;
int max_vertex_number = -1;
for (int vn = 0; vn < nv; vn++) {
int from_index;
int to_index;
scalar_t from_x;
scalar_t from_y;
scalar_t to_x;
scalar_t to_y;
scalar_t x2_sub_x1;
scalar_t y2_sub_y1;
scalar_t square_segment_length;
scalar_t x_sub_x1;
scalar_t y_sub_y1;
scalar_t x_sub_x2;
scalar_t y_sub_y2;
scalar_t dot;
scalar_t x_proj;
scalar_t y_proj;
scalar_t contribution;
// grid_x, grid_y = xp, yp.
from_index = vn;
to_index = (vn + 1) % number_vertices;
from_x = vertices[bi * (nv * 2) + from_index * 2];
from_y = vertices[bi * (nv * 2) + from_index * 2 + 1];
to_x = vertices[bi * (nv * 2) + to_index * 2];
to_y = vertices[bi * (nv * 2) + to_index * 2 + 1];
x2_sub_x1 = to_x - from_x;
y2_sub_y1 = to_y - from_y;
square_segment_length = x2_sub_x1 * x2_sub_x1 + y2_sub_y1 * y2_sub_y1 + 0.00001;
x_sub_x1 = xp - from_x;
y_sub_y1 = yp - from_y;
x_sub_x2 = xp - to_x;
y_sub_y2 = yp - to_y;
dot = ((x_sub_x1 * x2_sub_x1) + (y_sub_y1 * y2_sub_y1)) / square_segment_length;
x_proj = xp - (from_x + dot * x2_sub_x1);
y_proj = yp - (from_y + dot * y2_sub_y1);
// Does it matter here to compute the squared distance or true Euclidean distance?
if (dot < 0) {
contribution = pow(x_sub_x1, 2) + pow(y_sub_y1, 2);
}
else if (dot > 1) {
contribution = pow(x_sub_x2, 2) + pow(y_sub_y2, 2);
}
else {
contribution = pow(x_proj, 2) + pow(y_proj, 2);
}
// we need contribution to be a decreasing function.
// if (mode == MODE_MASK) {
// // sign * -dist
// contribution = 1.0 / (1.0 + exp(-rasterized[i] * contribution / inv_smoothness));
// }
// else if (mode == MODE_HARD_MASK) {
// // map the inside outside map to 0 or 1.0.
// // technically, we don't need this preceeding loop.
// contribution = rasterized[i] < 0 ? 0.0 : 1.0;
// }
// else {
// contribution = exp(-contribution / inv_smoothness);
// }
contribution = -contribution;
if (contribution > max_contribution) {
max_contribution = contribution;
max_vertex_number = vn;
}
}
if (mode == MODE_MASK) {
// sign * -dist
max_contribution = 1.0 / (1.0 + exp(rasterized[i] * max_contribution / inv_smoothness));
}
else if (mode == MODE_HARD_MASK) {
// map the inside outside map to 0 or 1.0.
// technically, we don't need this preceeding loop.
max_contribution = rasterized[i] < 0 ? 0.0 : 1.0;
}
else {
max_contribution = exp(max_contribution / inv_smoothness);
}
rasterized[i] = max_contribution;
contribution_map[i] = max_vertex_number;
}
template <typename scalar_t>
__global__ void backward_rasterize_cuda_kernel(
const scalar_t* __restrict__ vertices,
const scalar_t* __restrict__ rasterized,
const int* __restrict__ contribution_map,
const scalar_t* __restrict__ grad_output,
scalar_t* grad_vertices,
int batch_size,
int number_vertices,
int width,
int height,
float inv_smoothness) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= batch_size * width * height) {
return;
}
const int w = width;
const int h = height;
const int nv = number_vertices;
// batch index.
const int bi = i / (w * h);
// pixel number (linear index)
const int pn = i % (w * h);
const int yp = pn / w;
const int xp = pn % w;
// produce dR/dv.
// since we use max over all vertices, we only need
// to apply it to single vertex.
int vn;
int from_index;
int to_index;
scalar_t from_x;
scalar_t from_y;
scalar_t to_x;
scalar_t to_y;
scalar_t x2_sub_x1;
scalar_t y2_sub_y1;
scalar_t square_segment_length;
scalar_t x_sub_x1;
scalar_t y_sub_y1;
scalar_t x_sub_x2;
scalar_t y_sub_y2;
scalar_t dot;
scalar_t x_proj;
scalar_t y_proj;
scalar_t grad_x1 = 0.0;
scalar_t grad_y1 = 0.0;
scalar_t grad_x2 = 0.0;
scalar_t grad_y2 = 0.0;
scalar_t in_out = rasterized[i] >= 0.5 ? 1.0 : -1.0;
vn = contribution_map[i];
from_index = vn;
to_index = (vn + 1) % nv;
// determine how we computed the distance to this segment.
from_x = vertices[bi * (nv * 2) + from_index * 2];
from_y = vertices[bi * (nv * 2) + from_index * 2 + 1];
to_x = vertices[bi * (nv * 2) + to_index * 2];
to_y = vertices[bi * (nv * 2) + to_index * 2 + 1];
x2_sub_x1 = to_x - from_x;
y2_sub_y1 = to_y - from_y;
// grad:
// dX1 = 2 * x2_sub_x1 * -1
// dX2 = 2 * x2_sub_x1
// dY1 = 2 * y2_sub_y1 * -1
// dY2 = 2 * y2_sub_y1
// possible this could NaN?
square_segment_length = x2_sub_x1 * x2_sub_x1 + y2_sub_y1 * y2_sub_y1 + 0.00001;
x_sub_x1 = xp - from_x;
y_sub_y1 = yp - from_y;
x_sub_x2 = xp - to_x;
y_sub_y2 = yp - to_y;
// grad numer:
// dX1 = -1 * x2_sub_x1 + -1 * x_sub_x1
// dX2 = x_sub_x1
scalar_t dot_num = ((x_sub_x1 * x2_sub_x1) + (y_sub_y1 * y2_sub_y1));
dot = dot_num / square_segment_length;
x_proj = xp - (from_x + dot * x2_sub_x1);
y_proj = yp - (from_y + dot * y2_sub_y1);
// negative sign?
if (dot < 0) {
// contribution = exp(-((xp - from_x) ** 2 + (yp - from_y) ** 2 / inv_smoothness)
// grad_x1 = (rasterized[i] * 2 * x_sub_x1) / inv_smoothness;
// grad_y1 = (rasterized[i] * 2 * y_sub_y1) / inv_smoothness;
// grad_x1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * 2 * x_sub_x1 / inv_smoothness;
// grad_y1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * 2 * y_sub_y1 / inv_smoothness;
grad_x1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * -2 * x_sub_x1 / inv_smoothness;
grad_y1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * -2 * y_sub_y1 / inv_smoothness;
}
else if (dot > 1) {
// contribution = exp(-((xp - to_x) ** 2 + (yp - to_y) ** 2) / inv_smoothness)
// grad_x2 = (rasterized[i] * 2 * x_sub_x2) / inv_smoothness;
// grad_y2 = (rasterized[i] * 2 * y_sub_y2) / inv_smoothness;
grad_x2 = in_out * rasterized[i] * (1.0 - rasterized[i]) * -2 * x_sub_x2 / inv_smoothness;
grad_y2 = in_out * rasterized[i] * (1.0 - rasterized[i]) * -2 * y_sub_y2 / inv_smoothness;
}
else {
// contribution = exp(-(xp - from_x) ** 2 / inv_smoothness)
scalar_t ss_x1 = -2.0 * x2_sub_x1;
scalar_t ss_x2 = 2.0 * x2_sub_x1;
scalar_t ss_y1 = -2.0 * y2_sub_y1;
scalar_t ss_y2 = 2.0 * y2_sub_y1;
scalar_t dot_x1 = (square_segment_length * (-x2_sub_x1 - x_sub_x1) - dot_num * ss_x1) / pow(square_segment_length, 2);
scalar_t dot_x2 = (square_segment_length * x_sub_x1 - dot_num * ss_x2) / pow(square_segment_length, 2);
scalar_t dot_y1 = (square_segment_length * (-y2_sub_y1 - y_sub_y1) - dot_num * ss_y1) / pow(square_segment_length, 2);
scalar_t dot_y2 = (square_segment_length * y_sub_y1 - dot_num * ss_y2) / pow(square_segment_length, 2);
// d/dx()
scalar_t x_proj_x1 = -1 - dot_x1 * x2_sub_x1 + dot;
scalar_t x_proj_x2 = -(dot_x2 * x2_sub_x1 + dot);
scalar_t y_proj_y1 = -1 - dot_y1 * y2_sub_y1 + dot;
scalar_t y_proj_y2 = -(dot_y2 * y2_sub_y1 + dot);
// we also need mixed.
scalar_t y_proj_x1 = -dot_x1 * y2_sub_y1;
scalar_t y_proj_x2 = -dot_x2 * y2_sub_y1;
scalar_t x_proj_y1 = -dot_y1 * x2_sub_x1;
scalar_t x_proj_y2 = -dot_y2 * x2_sub_x1;
// - as well?
grad_x1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * (2.0 * x_proj * x_proj_x1 + 2.0 * y_proj * y_proj_x1) / inv_smoothness;
grad_x2 = in_out * rasterized[i] * (1.0 - rasterized[i]) * (2.0 * x_proj * x_proj_x2 + 2.0 * y_proj * y_proj_x2) / inv_smoothness;
grad_y1 = in_out * rasterized[i] * (1.0 - rasterized[i]) * (2.0 * x_proj * x_proj_y1 + 2.0 * y_proj * y_proj_y1) / inv_smoothness;
grad_y2 = in_out * rasterized[i] * (1.0 - rasterized[i]) * (2.0 * x_proj * x_proj_y2 + 2.0 * y_proj * y_proj_y2) / inv_smoothness;
// grad_x1 = -rasterized[i] * (2.0 * x_proj * x_proj_x1 + 2.0 * y_proj * y_proj_x1) / inv_smoothness;
// grad_x2 = -rasterized[i] * (2.0 * x_proj * x_proj_x2 + 2.0 * y_proj * y_proj_x2) / inv_smoothness;
// grad_y1 = -rasterized[i] * (2.0 * x_proj * x_proj_y1 + 2.0 * y_proj * y_proj_y1) / inv_smoothness;
// grad_y2 = -rasterized[i] * (2.0 * x_proj * x_proj_y2 + 2.0 * y_proj * y_proj_y2) / inv_smoothness;
}
// apply the input gradients.
grad_x1 = grad_x1 * grad_output[i];
grad_x2 = grad_x2 * grad_output[i];
grad_y1 = grad_y1 * grad_output[i];
grad_y2 = grad_y2 * grad_output[i];
// grad_vertices[bi * (height * width * nv * 2) + yp * (width * nv * 2) + xp * (nv * 2) + from_index * 2] = grad_x1;
// grad_vertices[bi * (height * width * nv * 2) + yp * (width * nv * 2) + xp * (nv * 2) + from_index * 2 + 1] = grad_y1;
// grad_vertices[bi * (height * width * nv * 2) + yp * (width * nv * 2) + xp * (nv * 2) + to_index * 2] = grad_x2;
// grad_vertices[bi * (height * width * nv * 2) + yp * (width * nv * 2) + xp * (nv * 2) + to_index * 2 + 1] = grad_y2;
// unsure if should be deferencing.
atomicAdd(grad_vertices + bi * nv * 2 + from_index * 2, grad_x1);
atomicAdd(grad_vertices + bi * nv * 2 + from_index * 2 + 1, grad_y1);
atomicAdd(grad_vertices + bi * nv * 2 + to_index * 2, grad_x2);
atomicAdd(grad_vertices + bi * nv * 2 + to_index * 2 + 1, grad_y2);
}
std::vector<at::Tensor> forward_rasterize_cuda(
at::Tensor vertices,
at::Tensor rasterized,
at::Tensor contribution_map,
int width,
int height,
float inv_smoothness,
int mode) {
const auto batch_size = vertices.size(0);
const auto number_vertices = vertices.size(1);
const int threads = 512;
// each block processes some 512 sized chunk of the output image.
const dim3 blocks ((batch_size * width * height - 1) / threads + 1);
if ((mode == MODE_MASK) || (mode == MODE_HARD_MASK)) {
// determine whether each point is inside or outside.
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "inside_outside_cuda", ([&] {
inside_outside_cuda_kernel<scalar_t><<<blocks, threads>>>(
vertices.data<scalar_t>(),
batch_size,
number_vertices,
rasterized.data<scalar_t>(),
height,
width);
}));
}
if (mode != MODE_HARD_MASK) {
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "forward_rasterize_cuda", ([&] {
forward_rasterize_cuda_kernel<scalar_t><<<blocks, threads>>>(
vertices.data<scalar_t>(),
batch_size,
number_vertices,
rasterized.data<scalar_t>(),
contribution_map.data<int>(),
height,
width,
inv_smoothness,
mode);
}));
}
cudaError_t err = cudaGetLastError();
err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in forward_rasterize: %s\n", cudaGetErrorString(err));
return { rasterized, contribution_map };
}
at::Tensor backward_rasterize_cuda(
at::Tensor vertices,
at::Tensor rasterized,
at::Tensor contribution_map,
at::Tensor grad_output,
at::Tensor grad_vertices,
int width,
int height,
float inv_smoothness,
int mode) {
const auto batch_size = vertices.size(0);
const auto number_vertices = vertices.size(1);
const int threads = 512;
const dim3 blocks ((batch_size * width * height - 1) / threads + 1);
AT_DISPATCH_FLOATING_TYPES(vertices.type(), "backward_rasterize_cuda", ([&] {
backward_rasterize_cuda_kernel<scalar_t><<<blocks, threads>>>(
vertices.data<scalar_t>(),
rasterized.data<scalar_t>(),
contribution_map.data<int>(),
grad_output.data<scalar_t>(),
grad_vertices.data<scalar_t>(),
batch_size,
number_vertices,
width,
height,
inv_smoothness);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in backward_rasterize: %s\n", cudaGetErrorString(err));
return grad_vertices;
}
|
a718749a6f2fc5710c5c30fecb118c8df246a2f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ftw_gfg2fvi.cu */
// IN: A pointer to a ***non-replicated*** polymer configuration as *ftw_GFG65536.
// OUT: A free volume intensity is returned as *ftw_FVI256.
// Input configuration is not modified.
// Cross-interaction values are stored for the replicated config only.
#include <ftw_gfg2fvi.h>
#include <ftw_config_parser.h>
#include <ftw_types.h>
#include <stdlib.h>
#include <math.h>
// I took the kernel prototype out of the header file, because the header is included by C/C++ compilers that don't know what a kernel is...
// NOTE: this uses COMPASS / LJ 6-9 potential
__global__ void EnergyKernel256(ftw_GFG65536 *d_configuration, ftw_EnergyArray256 *d_attraction, ftw_EnergyArray256 *d_repulsion, ftw_EnergyArray256 *d_total)
{
unsigned int idx = blockIdx.x;
unsigned int idy = blockIdx.y;
unsigned int idz = threadIdx.x;
float repulsion=0;
float attraction=0;
float alpha;
float dx, dy, dz, d, dd;
float f_resolution_x = d_configuration->box_x / 256;
float f_resolution_y = d_configuration->box_y / 256;
float f_resolution_z = d_configuration->box_z / 256;
float cuda_x = idx * f_resolution_x;
float cuda_y = idy * f_resolution_y;
float cuda_z = idz * f_resolution_z;
// evaluate energy at (cuda_x, cuda_y, cuda_z);
for (int i=0; i< d_configuration->n_atoms; i++) {
// central atom
dx = d_configuration->atom[i].x - cuda_x;
dy = d_configuration->atom[i].y - cuda_y;
dz = d_configuration->atom[i].z - cuda_z;
dd = dx*dx + dy*dy + dz*dz; d = sqrt(dd);
alpha = pow(d_configuration->atom[i].sigma, 3) / (d * dd);
repulsion += d_configuration->atom[i].epsilon * alpha * alpha * alpha;
attraction += d_configuration->atom[i].epsilon * alpha * alpha;
}
// If NULL pointers are passed for the attraction or repulsion, no values are returned.
if (d_attraction) d_attraction->energy[idx][idy][idz] = 3 * attraction;
if (d_repulsion) d_repulsion->energy[idx][idy][idz] = 2 * repulsion;
if (d_total) d_total->energy[idx][idy][idz] = 2 * repulsion - 3 * attraction;
}
__global__ void EnergyKernel256_612(ftw_GFG65536 *d_configuration, ftw_EnergyArray256 *d_attraction, ftw_EnergyArray256 *d_repulsion, ftw_EnergyArray256 *d_total)
{
unsigned int idx = blockIdx.x;
unsigned int idy = blockIdx.y;
unsigned int idz = threadIdx.x;
float repulsion=0;
float attraction=0;
float sigma_over_r_sq;
float dx, dy, dz, dd;
float f_resolution_x = d_configuration->box_x / 256;
float f_resolution_y = d_configuration->box_y / 256;
float f_resolution_z = d_configuration->box_z / 256;
float cuda_x = idx * f_resolution_x;
float cuda_y = idy * f_resolution_y;
float cuda_z = idz * f_resolution_z;
// evaluate energy at (cuda_x, cuda_y, cuda_z);
for (int i=0; i< d_configuration->n_atoms; i++) {
// central atom
dx = d_configuration->atom[i].x - cuda_x;
dy = d_configuration->atom[i].y - cuda_y;
dz = d_configuration->atom[i].z - cuda_z;
dd = dx*dx + dy*dy + dz*dz;
sigma_over_r_sq = d_configuration->atom[i].sigma * d_configuration->atom[i].sigma / dd; // squared
repulsion += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
attraction += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
}
// If NULL pointers are passed for the attraction or repulsion, no values are returned.
if (d_attraction) d_attraction->energy[idx][idy][idz] = 4 * attraction;
if (d_repulsion) d_repulsion->energy[idx][idy][idz] = 4 * repulsion;
if (d_total) d_total->energy[idx][idy][idz] = 4 * repulsion - 4 * attraction;
}
__global__ void EnergyKernel512_612(ftw_GFG65536 *d_configuration, ftw_EnergyArray512 *d_attraction, ftw_EnergyArray512 *d_repulsion, ftw_EnergyArray512 *d_total)
{
unsigned int idx = blockIdx.x;
unsigned int idy = blockIdx.y;
unsigned int idz = threadIdx.x;
float repulsion=0;
float attraction=0;
float sigma_over_r_sq;
float dx, dy, dz, dd;
float f_resolution_x = d_configuration->box_x / 256;
float f_resolution_y = d_configuration->box_y / 256;
float f_resolution_z = d_configuration->box_z / 256;
float cuda_x = idx * f_resolution_x;
float cuda_y = idy * f_resolution_y;
float cuda_z = idz * f_resolution_z;
// evaluate energy at (cuda_x, cuda_y, cuda_z);
for (int i=0; i< d_configuration->n_atoms; i++) {
// central atom
dx = d_configuration->atom[i].x - cuda_x;
dy = d_configuration->atom[i].y - cuda_y;
dz = d_configuration->atom[i].z - cuda_z;
dd = dx*dx + dy*dy + dz*dz;
sigma_over_r_sq = d_configuration->atom[i].sigma * d_configuration->atom[i].sigma / dd; // squared
repulsion += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
attraction += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
}
// If NULL pointers are passed for the attraction or repulsion, no values are returned.
if (d_attraction) d_attraction->energy[idx][idy][idz] = 4 * attraction;
if (d_repulsion) d_repulsion->energy[idx][idy][idz] = 4 * repulsion;
if (d_total) d_total->energy[idx][idy][idz] = 4 * repulsion - 4 * attraction;
}
// This is the routine to call from outside the library
extern "C" ftw_FVI256 *GFGToFVI256(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
// call energy array then process each val
ftw_EnergyArray256 *era = GFGToRepulsion256(gfg, sigma, epsilon);
ftw_FVI256 *fvi = (ftw_FVI256*)malloc(sizeof(ftw_FVI256));
// now process each value...
for (int i=0; i<256; i++) for (int j=0; j<256; j++) for (int k=0; k<256; k++)
fvi->intensity[i][j][k] = exp(era->energy[i][j][k]/-298000); // this is arbitrary... should be clarified
return fvi;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray256 *GFGToRepulsion256_612(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_EnergyArray256 *d_repulsion;
ftw_GFG65536 *d_configuration;
fprintf(stderr, "pointers: %ld\t%ld\n", d_repulsion, d_configuration);
fflush(stderr);
// replicate the gfg
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg);
// and cross-parameterize use 6-12 rule
for (int n=0; n<gfg->n_atoms; n++)
{
h_configuration->atom[n].sigma = 0.5f * (sigma + h_configuration->atom[n].sigma);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
hipError_t err;
/* allocate for energy array and configuration on device */
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_repulsion, sizeof(ftw_EnergyArray256)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), hipMemcpyHostToDevice ));
dim3 dimGrid(256, 256);
dim3 dimBlock(256, 1, 1);
hipLaunchKernelGGL(( EnergyKernel256_612), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_configuration, NULL, d_repulsion, NULL);
hipDeviceSynchronize(); // block until the device has completed
err = hipGetLastError();
if (err != hipSuccess) printf("%s\n", hipGetErrorString(err));
// retrieve result
ftw_EnergyArray256 *h_repulsion = (ftw_EnergyArray256 *)malloc(sizeof(ftw_EnergyArray256));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy(h_repulsion, d_repulsion, sizeof(ftw_EnergyArray256), hipMemcpyDeviceToHost ));
// free device memory
hipFree(d_configuration);
hipFree(d_repulsion);
free(h_configuration); // free host memory for replicated configuration
return h_repulsion;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray512 *GFGToRepulsion512_612(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_EnergyArray512 *d_repulsion;
ftw_GFG65536 *d_configuration;
// replicate the gfg
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg);
// and cross-parameterize use 6-12 rule
for (int n=0; n<gfg->n_atoms; n++)
{
h_configuration->atom[n].sigma = 0.5f * (sigma + h_configuration->atom[n].sigma);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
hipError_t err;
/* allocate for energy array and configuration on device */
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_repulsion, sizeof(ftw_EnergyArray256)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), hipMemcpyHostToDevice ));
dim3 dimGrid(512, 512);
dim3 dimBlock(512, 1, 1);
hipLaunchKernelGGL(( EnergyKernel512_612), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_configuration, NULL, d_repulsion, NULL);
hipDeviceSynchronize(); // block until the device has completed
err = hipGetLastError();
if (err != hipSuccess) printf("%s\n", hipGetErrorString(err));
// retrieve result
ftw_EnergyArray512 *h_repulsion = (ftw_EnergyArray512 *)malloc(sizeof(ftw_EnergyArray512));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy(h_repulsion, d_repulsion, sizeof(ftw_EnergyArray512), hipMemcpyDeviceToHost ));
// free device memory
hipFree(d_configuration);
hipFree(d_repulsion);
free(h_configuration); // free host memory for replicated configuration
return h_repulsion;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray256 *GFGToRepulsion256(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_EnergyArray256 *d_repulsion;
ftw_GFG65536 *d_configuration;
// replicate the gfg
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg);
// and cross-parameterize
for (int n=0; n<gfg->n_atoms; n++)
{
h_configuration->atom[n].sigma = pow(0.5f * (float)(pow(sigma, 6) + pow(h_configuration->atom[n].sigma, 6)), 0.1666666f);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
hipError_t err;
/* allocate for energy array and configuration on device */
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_repulsion, sizeof(ftw_EnergyArray256)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), hipMemcpyHostToDevice ));
dim3 dimGrid(256, 256);
dim3 dimBlock(256, 1, 1);
hipLaunchKernelGGL(( EnergyKernel256), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_configuration, NULL, d_repulsion, NULL);
hipDeviceSynchronize(); // block until the device has completed
err = hipGetLastError();
if (err != hipSuccess) printf("%s\n", hipGetErrorString(err));
// retrieve result
ftw_EnergyArray256 *h_repulsion = (ftw_EnergyArray256 *)malloc(sizeof(ftw_EnergyArray256));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy(h_repulsion, d_repulsion, sizeof(ftw_EnergyArray256), hipMemcpyDeviceToHost ));
// free device memory
hipFree(d_configuration);
hipFree(d_repulsion);
free(h_configuration); // free host memory for replicated configuration
return h_repulsion;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray256 *GFGToEnergyArray256(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_EnergyArray256 *d_energy_array;
ftw_GFG65536 *d_configuration;
// replicate the gfg
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg);
// and cross-parameterize
for (int n=0; n<gfg->n_atoms; n++)
{
h_configuration->atom[n].sigma = pow(0.5f * (float)(pow(sigma, 6) + pow(h_configuration->atom[n].sigma, 6)), 0.1666666f);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
hipError_t err;
/* allocate for energy array and configuration on device */
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_energy_array, sizeof(ftw_EnergyArray256)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), hipMemcpyHostToDevice ));
dim3 dimGrid(256, 256);
dim3 dimBlock(256, 1, 1);
hipLaunchKernelGGL(( EnergyKernel256), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_configuration, NULL, NULL, d_energy_array);
hipDeviceSynchronize(); // block until the device has completed
err = hipGetLastError();
if (err != hipSuccess) printf("%s\n", hipGetErrorString(err));
// retrieve result
ftw_EnergyArray256 *h_energy_array = (ftw_EnergyArray256 *)malloc(sizeof(ftw_EnergyArray256));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy(h_energy_array, d_energy_array, sizeof(ftw_EnergyArray256), hipMemcpyDeviceToHost ));
// free device memory
hipFree(d_configuration);
hipFree(d_energy_array);
free(h_configuration); // free host memory for replicated configuration
return h_energy_array;
}
// This is for a traditional LJ 6-12 interaction. Note that sigma is the value where energy is zero, not the well-bottom as for COMPASS...
// This operates on 'chunks' in x-direction because the domain is too large for the GPU memory
__global__ void EnergyKernel1024_612( ftw_GFG65536 *d_configuration,
ftw_Chunk *d_attraction,
ftw_Chunk *d_repulsion,
ftw_Chunk *d_total,
int chunk, int chunk_size) {
unsigned int idx = threadIdx.x;
unsigned int idy = blockIdx.x;
unsigned int idz = blockIdx.y;
float repulsion=0;
float attraction=0;
float sigma_over_r_sq;
float dx, dy, dz, dd;
float f_resolution_x = d_configuration->box_x / 1024;
float f_resolution_y = d_configuration->box_y / 1024;
float f_resolution_z = d_configuration->box_z / 1024;
float cuda_x = (chunk * chunk_size + idx ) * f_resolution_x;
float cuda_y = idy * f_resolution_y;
float cuda_z = idz * f_resolution_z;
// evaluate energy at (cuda_x, cuda_y, cuda_z);
for (int i=0; i< d_configuration->n_atoms; i++) {
// central atom
dx = d_configuration->atom[i].x - cuda_x;
dy = d_configuration->atom[i].y - cuda_y;
dz = d_configuration->atom[i].z - cuda_z;
dd = dx*dx + dy*dy + dz*dz;
sigma_over_r_sq = d_configuration->atom[i].sigma * d_configuration->atom[i].sigma / dd;
repulsion += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
attraction += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
}
// If NULL pointers are passed, then no values are recorded.
if (d_attraction) d_attraction->energy[idx][idy][idz] = 4 * attraction;
if (d_repulsion) d_repulsion->energy[idx][idy][idz] = 4 * repulsion;
if (d_total) d_total->energy[idx][idy][idz] = 4 * repulsion - 4 * attraction;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray1024 *GFGToEnergyArray1024_612(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_Chunk *d_energy_array_chunk;
ftw_GFG65536 *d_configuration;
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg); // replicate the gfg
for (int n=0; n<gfg->n_atoms; n++) // and cross-parameterize
{
h_configuration->atom[n].sigma = pow(0.5f * (float)(pow(sigma, 6) + pow(h_configuration->atom[n].sigma, 6)), 0.1666666f);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
// (x,y,z) is (blockx, gridx, gridy)... chunking to 4 parts in x, then will combine results
int chunk_size = 256, chunks = 4;
dim3 dimGrid(1024, 1024);
dim3 dimBlock(chunk_size, 1, 1);
hipError_t err;
/* allocate for energy array and configuration on device */
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_energy_array_chunk, sizeof(ftw_Chunk)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), hipMemcpyHostToDevice ));
ftw_EnergyArray1024 *h_energy_array = (ftw_EnergyArray1024 *)malloc(sizeof(ftw_EnergyArray1024)); // host structure, for result
for (int chunk=0; chunk < chunks; chunk++)
{
hipLaunchKernelGGL(( EnergyKernel1024_612), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_configuration, NULL, NULL, d_energy_array_chunk, chunk, chunk_size);
hipDeviceSynchronize(); // block until the device has completed
err = hipGetLastError();
if (err != hipSuccess) {printf("CUDA error: %s\n", hipGetErrorString(err)); exit(1);}
// retrieve result
ftw_EnergyArray1024* h_address = (ftw_EnergyArray1024*)((long)h_energy_array + (long)(sizeof(ftw_Chunk) * chunk));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy(h_address, d_energy_array_chunk, sizeof(ftw_Chunk), hipMemcpyDeviceToHost ));
}
// free device memory
hipFree(d_configuration);
hipFree(d_energy_array_chunk);
free(h_configuration); // free host memory for replicated configuration
return h_energy_array;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray1024 *GFGToRepulsion1024_612(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_Chunk *d_repulsion_chunk;
ftw_GFG65536 *d_configuration;
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg); // replicate the gfg
for (int n=0; n<gfg->n_atoms; n++) // and cross-parameterize
{
h_configuration->atom[n].sigma = pow(0.5f * (float)(pow(sigma, 6) + pow(h_configuration->atom[n].sigma, 6)), 0.1666666f);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
// (x,y,z) is (blockx, gridx, gridy)... chunking to 4 parts in x, then will combine results
int chunk_size = 256, chunks = 4;
dim3 dimGrid(1024, 1024);
dim3 dimBlock(chunk_size, 1, 1);
hipError_t err;
/* allocate for energy array and configuration on device */
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_repulsion_chunk, sizeof(ftw_Chunk)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), hipMemcpyHostToDevice ));
ftw_EnergyArray1024 *h_repulsion = (ftw_EnergyArray1024 *)malloc(sizeof(ftw_EnergyArray1024)); // host structure, for result
for (int chunk=0; chunk < chunks; chunk++)
{
hipLaunchKernelGGL(( EnergyKernel1024_612), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_configuration, NULL, d_repulsion_chunk, NULL, chunk, chunk_size);
hipDeviceSynchronize(); // block until the device has completed
err = hipGetLastError();
if (err != hipSuccess) {printf("CUDA error: %s\n", hipGetErrorString(err)); exit(1);}
// retrieve result
ftw_EnergyArray1024* h_address = (ftw_EnergyArray1024*)((long)h_repulsion + (long)(sizeof(ftw_Chunk) * chunk));
for(err = hipErrorUnknown; err != hipSuccess; err = hipMemcpy(h_address, d_repulsion_chunk, sizeof(ftw_Chunk), hipMemcpyDeviceToHost ));
}
// free device memory
hipFree(d_configuration);
hipFree(d_repulsion_chunk);
free(h_configuration); // free host memory for replicated configuration
return h_repulsion;
}
| a718749a6f2fc5710c5c30fecb118c8df246a2f5.cu | /* ftw_gfg2fvi.cu */
// IN: A pointer to a ***non-replicated*** polymer configuration as *ftw_GFG65536.
// OUT: A free volume intensity is returned as *ftw_FVI256.
// Input configuration is not modified.
// Cross-interaction values are stored for the replicated config only.
#include <ftw_gfg2fvi.h>
#include <ftw_config_parser.h>
#include <ftw_types.h>
#include <stdlib.h>
#include <math.h>
// I took the kernel prototype out of the header file, because the header is included by C/C++ compilers that don't know what a kernel is...
// NOTE: this uses COMPASS / LJ 6-9 potential
__global__ void EnergyKernel256(ftw_GFG65536 *d_configuration, ftw_EnergyArray256 *d_attraction, ftw_EnergyArray256 *d_repulsion, ftw_EnergyArray256 *d_total)
{
unsigned int idx = blockIdx.x;
unsigned int idy = blockIdx.y;
unsigned int idz = threadIdx.x;
float repulsion=0;
float attraction=0;
float alpha;
float dx, dy, dz, d, dd;
float f_resolution_x = d_configuration->box_x / 256;
float f_resolution_y = d_configuration->box_y / 256;
float f_resolution_z = d_configuration->box_z / 256;
float cuda_x = idx * f_resolution_x;
float cuda_y = idy * f_resolution_y;
float cuda_z = idz * f_resolution_z;
// evaluate energy at (cuda_x, cuda_y, cuda_z);
for (int i=0; i< d_configuration->n_atoms; i++) {
// central atom
dx = d_configuration->atom[i].x - cuda_x;
dy = d_configuration->atom[i].y - cuda_y;
dz = d_configuration->atom[i].z - cuda_z;
dd = dx*dx + dy*dy + dz*dz; d = sqrt(dd);
alpha = pow(d_configuration->atom[i].sigma, 3) / (d * dd);
repulsion += d_configuration->atom[i].epsilon * alpha * alpha * alpha;
attraction += d_configuration->atom[i].epsilon * alpha * alpha;
}
// If NULL pointers are passed for the attraction or repulsion, no values are returned.
if (d_attraction) d_attraction->energy[idx][idy][idz] = 3 * attraction;
if (d_repulsion) d_repulsion->energy[idx][idy][idz] = 2 * repulsion;
if (d_total) d_total->energy[idx][idy][idz] = 2 * repulsion - 3 * attraction;
}
__global__ void EnergyKernel256_612(ftw_GFG65536 *d_configuration, ftw_EnergyArray256 *d_attraction, ftw_EnergyArray256 *d_repulsion, ftw_EnergyArray256 *d_total)
{
unsigned int idx = blockIdx.x;
unsigned int idy = blockIdx.y;
unsigned int idz = threadIdx.x;
float repulsion=0;
float attraction=0;
float sigma_over_r_sq;
float dx, dy, dz, dd;
float f_resolution_x = d_configuration->box_x / 256;
float f_resolution_y = d_configuration->box_y / 256;
float f_resolution_z = d_configuration->box_z / 256;
float cuda_x = idx * f_resolution_x;
float cuda_y = idy * f_resolution_y;
float cuda_z = idz * f_resolution_z;
// evaluate energy at (cuda_x, cuda_y, cuda_z);
for (int i=0; i< d_configuration->n_atoms; i++) {
// central atom
dx = d_configuration->atom[i].x - cuda_x;
dy = d_configuration->atom[i].y - cuda_y;
dz = d_configuration->atom[i].z - cuda_z;
dd = dx*dx + dy*dy + dz*dz;
sigma_over_r_sq = d_configuration->atom[i].sigma * d_configuration->atom[i].sigma / dd; // squared
repulsion += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
attraction += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
}
// If NULL pointers are passed for the attraction or repulsion, no values are returned.
if (d_attraction) d_attraction->energy[idx][idy][idz] = 4 * attraction;
if (d_repulsion) d_repulsion->energy[idx][idy][idz] = 4 * repulsion;
if (d_total) d_total->energy[idx][idy][idz] = 4 * repulsion - 4 * attraction;
}
__global__ void EnergyKernel512_612(ftw_GFG65536 *d_configuration, ftw_EnergyArray512 *d_attraction, ftw_EnergyArray512 *d_repulsion, ftw_EnergyArray512 *d_total)
{
unsigned int idx = blockIdx.x;
unsigned int idy = blockIdx.y;
unsigned int idz = threadIdx.x;
float repulsion=0;
float attraction=0;
float sigma_over_r_sq;
float dx, dy, dz, dd;
float f_resolution_x = d_configuration->box_x / 256;
float f_resolution_y = d_configuration->box_y / 256;
float f_resolution_z = d_configuration->box_z / 256;
float cuda_x = idx * f_resolution_x;
float cuda_y = idy * f_resolution_y;
float cuda_z = idz * f_resolution_z;
// evaluate energy at (cuda_x, cuda_y, cuda_z);
for (int i=0; i< d_configuration->n_atoms; i++) {
// central atom
dx = d_configuration->atom[i].x - cuda_x;
dy = d_configuration->atom[i].y - cuda_y;
dz = d_configuration->atom[i].z - cuda_z;
dd = dx*dx + dy*dy + dz*dz;
sigma_over_r_sq = d_configuration->atom[i].sigma * d_configuration->atom[i].sigma / dd; // squared
repulsion += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
attraction += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
}
// If NULL pointers are passed for the attraction or repulsion, no values are returned.
if (d_attraction) d_attraction->energy[idx][idy][idz] = 4 * attraction;
if (d_repulsion) d_repulsion->energy[idx][idy][idz] = 4 * repulsion;
if (d_total) d_total->energy[idx][idy][idz] = 4 * repulsion - 4 * attraction;
}
// This is the routine to call from outside the library
extern "C" ftw_FVI256 *GFGToFVI256(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
// call energy array then process each val
ftw_EnergyArray256 *era = GFGToRepulsion256(gfg, sigma, epsilon);
ftw_FVI256 *fvi = (ftw_FVI256*)malloc(sizeof(ftw_FVI256));
// now process each value...
for (int i=0; i<256; i++) for (int j=0; j<256; j++) for (int k=0; k<256; k++)
fvi->intensity[i][j][k] = exp(era->energy[i][j][k]/-298000); // this is arbitrary... should be clarified
return fvi;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray256 *GFGToRepulsion256_612(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_EnergyArray256 *d_repulsion;
ftw_GFG65536 *d_configuration;
fprintf(stderr, "pointers: %ld\t%ld\n", d_repulsion, d_configuration);
fflush(stderr);
// replicate the gfg
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg);
// and cross-parameterize use 6-12 rule
for (int n=0; n<gfg->n_atoms; n++)
{
h_configuration->atom[n].sigma = 0.5f * (sigma + h_configuration->atom[n].sigma);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
cudaError_t err;
/* allocate for energy array and configuration on device */
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_repulsion, sizeof(ftw_EnergyArray256)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), cudaMemcpyHostToDevice ));
dim3 dimGrid(256, 256);
dim3 dimBlock(256, 1, 1);
EnergyKernel256_612<<< dimGrid, dimBlock >>>(d_configuration, NULL, d_repulsion, NULL);
cudaThreadSynchronize(); // block until the device has completed
err = cudaGetLastError();
if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err));
// retrieve result
ftw_EnergyArray256 *h_repulsion = (ftw_EnergyArray256 *)malloc(sizeof(ftw_EnergyArray256));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy(h_repulsion, d_repulsion, sizeof(ftw_EnergyArray256), cudaMemcpyDeviceToHost ));
// free device memory
cudaFree(d_configuration);
cudaFree(d_repulsion);
free(h_configuration); // free host memory for replicated configuration
return h_repulsion;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray512 *GFGToRepulsion512_612(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_EnergyArray512 *d_repulsion;
ftw_GFG65536 *d_configuration;
// replicate the gfg
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg);
// and cross-parameterize use 6-12 rule
for (int n=0; n<gfg->n_atoms; n++)
{
h_configuration->atom[n].sigma = 0.5f * (sigma + h_configuration->atom[n].sigma);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
cudaError_t err;
/* allocate for energy array and configuration on device */
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_repulsion, sizeof(ftw_EnergyArray256)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), cudaMemcpyHostToDevice ));
dim3 dimGrid(512, 512);
dim3 dimBlock(512, 1, 1);
EnergyKernel512_612<<< dimGrid, dimBlock >>>(d_configuration, NULL, d_repulsion, NULL);
cudaThreadSynchronize(); // block until the device has completed
err = cudaGetLastError();
if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err));
// retrieve result
ftw_EnergyArray512 *h_repulsion = (ftw_EnergyArray512 *)malloc(sizeof(ftw_EnergyArray512));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy(h_repulsion, d_repulsion, sizeof(ftw_EnergyArray512), cudaMemcpyDeviceToHost ));
// free device memory
cudaFree(d_configuration);
cudaFree(d_repulsion);
free(h_configuration); // free host memory for replicated configuration
return h_repulsion;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray256 *GFGToRepulsion256(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_EnergyArray256 *d_repulsion;
ftw_GFG65536 *d_configuration;
// replicate the gfg
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg);
// and cross-parameterize
for (int n=0; n<gfg->n_atoms; n++)
{
h_configuration->atom[n].sigma = pow(0.5f * (float)(pow(sigma, 6) + pow(h_configuration->atom[n].sigma, 6)), 0.1666666f);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
cudaError_t err;
/* allocate for energy array and configuration on device */
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_repulsion, sizeof(ftw_EnergyArray256)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), cudaMemcpyHostToDevice ));
dim3 dimGrid(256, 256);
dim3 dimBlock(256, 1, 1);
EnergyKernel256<<< dimGrid, dimBlock >>>(d_configuration, NULL, d_repulsion, NULL);
cudaThreadSynchronize(); // block until the device has completed
err = cudaGetLastError();
if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err));
// retrieve result
ftw_EnergyArray256 *h_repulsion = (ftw_EnergyArray256 *)malloc(sizeof(ftw_EnergyArray256));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy(h_repulsion, d_repulsion, sizeof(ftw_EnergyArray256), cudaMemcpyDeviceToHost ));
// free device memory
cudaFree(d_configuration);
cudaFree(d_repulsion);
free(h_configuration); // free host memory for replicated configuration
return h_repulsion;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray256 *GFGToEnergyArray256(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_EnergyArray256 *d_energy_array;
ftw_GFG65536 *d_configuration;
// replicate the gfg
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg);
// and cross-parameterize
for (int n=0; n<gfg->n_atoms; n++)
{
h_configuration->atom[n].sigma = pow(0.5f * (float)(pow(sigma, 6) + pow(h_configuration->atom[n].sigma, 6)), 0.1666666f);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
cudaError_t err;
/* allocate for energy array and configuration on device */
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_energy_array, sizeof(ftw_EnergyArray256)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), cudaMemcpyHostToDevice ));
dim3 dimGrid(256, 256);
dim3 dimBlock(256, 1, 1);
EnergyKernel256<<< dimGrid, dimBlock >>>(d_configuration, NULL, NULL, d_energy_array);
cudaThreadSynchronize(); // block until the device has completed
err = cudaGetLastError();
if (err != cudaSuccess) printf("%s\n", cudaGetErrorString(err));
// retrieve result
ftw_EnergyArray256 *h_energy_array = (ftw_EnergyArray256 *)malloc(sizeof(ftw_EnergyArray256));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy(h_energy_array, d_energy_array, sizeof(ftw_EnergyArray256), cudaMemcpyDeviceToHost ));
// free device memory
cudaFree(d_configuration);
cudaFree(d_energy_array);
free(h_configuration); // free host memory for replicated configuration
return h_energy_array;
}
// This is for a traditional LJ 6-12 interaction. Note that sigma is the value where energy is zero, not the well-bottom as for COMPASS...
// This operates on 'chunks' in x-direction because the domain is too large for the GPU memory
__global__ void EnergyKernel1024_612( ftw_GFG65536 *d_configuration,
ftw_Chunk *d_attraction,
ftw_Chunk *d_repulsion,
ftw_Chunk *d_total,
int chunk, int chunk_size) {
unsigned int idx = threadIdx.x;
unsigned int idy = blockIdx.x;
unsigned int idz = blockIdx.y;
float repulsion=0;
float attraction=0;
float sigma_over_r_sq;
float dx, dy, dz, dd;
float f_resolution_x = d_configuration->box_x / 1024;
float f_resolution_y = d_configuration->box_y / 1024;
float f_resolution_z = d_configuration->box_z / 1024;
float cuda_x = (chunk * chunk_size + idx ) * f_resolution_x;
float cuda_y = idy * f_resolution_y;
float cuda_z = idz * f_resolution_z;
// evaluate energy at (cuda_x, cuda_y, cuda_z);
for (int i=0; i< d_configuration->n_atoms; i++) {
// central atom
dx = d_configuration->atom[i].x - cuda_x;
dy = d_configuration->atom[i].y - cuda_y;
dz = d_configuration->atom[i].z - cuda_z;
dd = dx*dx + dy*dy + dz*dz;
sigma_over_r_sq = d_configuration->atom[i].sigma * d_configuration->atom[i].sigma / dd;
repulsion += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
attraction += d_configuration->atom[i].epsilon * sigma_over_r_sq * sigma_over_r_sq * sigma_over_r_sq;
}
// If NULL pointers are passed, then no values are recorded.
if (d_attraction) d_attraction->energy[idx][idy][idz] = 4 * attraction;
if (d_repulsion) d_repulsion->energy[idx][idy][idz] = 4 * repulsion;
if (d_total) d_total->energy[idx][idy][idz] = 4 * repulsion - 4 * attraction;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray1024 *GFGToEnergyArray1024_612(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_Chunk *d_energy_array_chunk;
ftw_GFG65536 *d_configuration;
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg); // replicate the gfg
for (int n=0; n<gfg->n_atoms; n++) // and cross-parameterize
{
h_configuration->atom[n].sigma = pow(0.5f * (float)(pow(sigma, 6) + pow(h_configuration->atom[n].sigma, 6)), 0.1666666f);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
// (x,y,z) is (blockx, gridx, gridy)... chunking to 4 parts in x, then will combine results
int chunk_size = 256, chunks = 4;
dim3 dimGrid(1024, 1024);
dim3 dimBlock(chunk_size, 1, 1);
cudaError_t err;
/* allocate for energy array and configuration on device */
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_energy_array_chunk, sizeof(ftw_Chunk)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), cudaMemcpyHostToDevice ));
ftw_EnergyArray1024 *h_energy_array = (ftw_EnergyArray1024 *)malloc(sizeof(ftw_EnergyArray1024)); // host structure, for result
for (int chunk=0; chunk < chunks; chunk++)
{
EnergyKernel1024_612<<< dimGrid, dimBlock >>>(d_configuration, NULL, NULL, d_energy_array_chunk, chunk, chunk_size);
cudaThreadSynchronize(); // block until the device has completed
err = cudaGetLastError();
if (err != cudaSuccess) {printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(1);}
// retrieve result
ftw_EnergyArray1024* h_address = (ftw_EnergyArray1024*)((long)h_energy_array + (long)(sizeof(ftw_Chunk) * chunk));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy(h_address, d_energy_array_chunk, sizeof(ftw_Chunk), cudaMemcpyDeviceToHost ));
}
// free device memory
cudaFree(d_configuration);
cudaFree(d_energy_array_chunk);
free(h_configuration); // free host memory for replicated configuration
return h_energy_array;
}
// This routine to be called from outside the library
extern "C" ftw_EnergyArray1024 *GFGToRepulsion1024_612(ftw_GFG65536 *gfg, float sigma, float epsilon)
{
ftw_Chunk *d_repulsion_chunk;
ftw_GFG65536 *d_configuration;
ftw_GFG65536 *h_configuration = replicateGFG65536(gfg); // replicate the gfg
for (int n=0; n<gfg->n_atoms; n++) // and cross-parameterize
{
h_configuration->atom[n].sigma = pow(0.5f * (float)(pow(sigma, 6) + pow(h_configuration->atom[n].sigma, 6)), 0.1666666f);
h_configuration->atom[n].epsilon = sqrt(epsilon * h_configuration->atom[n].epsilon);
}
// then do the calc
// (x,y,z) is (blockx, gridx, gridy)... chunking to 4 parts in x, then will combine results
int chunk_size = 256, chunks = 4;
dim3 dimGrid(1024, 1024);
dim3 dimBlock(chunk_size, 1, 1);
cudaError_t err;
/* allocate for energy array and configuration on device */
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_repulsion_chunk, sizeof(ftw_Chunk)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMalloc( (void **) &d_configuration, sizeof(ftw_GFG65536)));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy( d_configuration, h_configuration, sizeof(ftw_GFG65536), cudaMemcpyHostToDevice ));
ftw_EnergyArray1024 *h_repulsion = (ftw_EnergyArray1024 *)malloc(sizeof(ftw_EnergyArray1024)); // host structure, for result
for (int chunk=0; chunk < chunks; chunk++)
{
EnergyKernel1024_612<<< dimGrid, dimBlock >>>(d_configuration, NULL, d_repulsion_chunk, NULL, chunk, chunk_size);
cudaThreadSynchronize(); // block until the device has completed
err = cudaGetLastError();
if (err != cudaSuccess) {printf("CUDA error: %s\n", cudaGetErrorString(err)); exit(1);}
// retrieve result
ftw_EnergyArray1024* h_address = (ftw_EnergyArray1024*)((long)h_repulsion + (long)(sizeof(ftw_Chunk) * chunk));
for(err = cudaErrorUnknown; err != cudaSuccess; err = cudaMemcpy(h_address, d_repulsion_chunk, sizeof(ftw_Chunk), cudaMemcpyDeviceToHost ));
}
// free device memory
cudaFree(d_configuration);
cudaFree(d_repulsion_chunk);
free(h_configuration); // free host memory for replicated configuration
return h_repulsion;
}
|
c0ec876bb2f312bf41eee76808a4295ab50a9860.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gtest/gtest.h"
#include "../libpsc/cuda/cuda_mparticles.cuh"
#include "../libpsc/cuda/cuda_mparticles_sort.cuh"
#include "../libpsc/cuda/collision_cuda_impl.hxx"
#include "../libpsc/cuda/cuda_collision.cuh"
#include "psc_fields_single.h"
#include "psc_particles_single.h"
#include "testing.hxx"
using dim = dim_yz;
using Mparticles = MparticlesCuda<BS144>;
// ======================================================================
// RngStateCuda
TEST(RngStateCuda, ctor_dtor)
{
RngStateCuda rng_state;
RngStateCuda rng_state2(128);
}
TEST(RngStateCuda, resize)
{
RngStateCuda rng_state;
rng_state.resize(256);
EXPECT_EQ(rng_state.size(), 256);
}
__global__ static void kernel_random(RngStateCuda::Device rng_state,
thrust::device_ptr<float> x)
{
int n = threadIdx.x + blockDim.x * blockIdx.x;
auto rng = rng_state[n];
x[n] = rng.uniform();
rng_state[n] = rng;
}
TEST(RngStateCuda, access)
{
dim3 dim_grid(2);
RngStateCuda rng_state(dim_grid.x * THREADS_PER_BLOCK);
ASSERT_EQ(THREADS_PER_BLOCK, 128);
thrust::device_vector<float> x(dim_grid.x * THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_random), dim3(dim_grid), dim3(THREADS_PER_BLOCK), 0, 0, rng_state, x.data());
float sum = thrust::reduce(x.begin(), x.end(), 0.f, thrust::plus<float>());
float avg = sum / x.size();
EXPECT_NEAR(avg, .5, .05);
// repeat to make sure numbers don't repeat
hipLaunchKernelGGL(( kernel_random), dim3(dim_grid), dim3(THREADS_PER_BLOCK), 0, 0, rng_state, x.data());
float sum2 = thrust::reduce(x.begin(), x.end(), 0.f, thrust::plus<float>());
float avg2 = sum2 / x.size();
EXPECT_NEAR(avg2, .5, .05);
EXPECT_NE(avg, avg2);
}
// ======================================================================
// make_psc
//
// FIXME, duplicated in various testing environments
template <typename dim>
static Grid_t& make_psc(const Grid_t::Kinds& kinds)
{
Int3 gdims = {16, 16, 16};
Int3 ibn = {2, 2, 2};
Vec3<double> length = {160., 160., 160.};
if (dim::InvarX::value) {
gdims[0] = 1;
ibn[0] = 0;
}
if (dim::InvarY::value) {
gdims[1] = 1;
ibn[1] = 0;
}
if (dim::InvarZ::value) {
gdims[2] = 1;
ibn[2] = 0;
}
auto grid_domain = Grid_t::Domain{gdims, length};
auto grid_bc =
psc::grid::BC{{BND_FLD_PERIODIC, BND_FLD_PERIODIC, BND_FLD_PERIODIC},
{BND_FLD_PERIODIC, BND_FLD_PERIODIC, BND_FLD_PERIODIC},
{BND_PRT_PERIODIC, BND_PRT_PERIODIC, BND_PRT_PERIODIC},
{BND_PRT_PERIODIC, BND_PRT_PERIODIC, BND_PRT_PERIODIC}};
auto norm_params = Grid_t::NormalizationParams::dimensionless();
norm_params.nicell = 200;
auto coeff = Grid_t::Normalization{norm_params};
return *new Grid_t{grid_domain, grid_bc, kinds, coeff, 1.};
}
static Mparticles make_mparticles(const Grid_t& grid)
{
Mparticles mprts{grid};
auto inj = mprts.injector();
auto injector = inj[0];
injector({{5., 5., 5.}, {1., 0., 0.}, 1., 0});
injector({{5., 5., 5.}, {0., 0., 0.}, 1., 0});
injector({{5., 15., 15.}, {.6, 0., 0.}, 1., 0});
injector({{5., 15., 15.}, {.7, 0., 0.}, 1., 0});
injector({{5., 15., 15.}, {.8, 0., 0.}, 1., 0});
injector({{5., 15., 5.}, {.1, 0., 0.}, 1., 0});
injector({{5., 15., 5.}, {.2, 0., 0.}, 1., 0});
injector({{5., 15., 5.}, {.3, 0., 0.}, 1., 0});
injector({{5., 15., 5.}, {.4, 0., 0.}, 1., 0});
return mprts;
}
TEST(cuda_mparticles_sort, sort)
{
auto kinds = Grid_t::Kinds{Grid_t::Kind(1., 1., "test_species")};
const auto& grid = make_psc<dim>(kinds);
// init particles
auto mprts = make_mparticles(grid);
auto& cmprts = *mprts.cmprts();
auto sort = cuda_mparticles_sort(cmprts.n_cells());
sort.find_indices_ids(cmprts);
EXPECT_EQ(sort.d_idx, (std::vector<int>{0, 0, 17, 17, 17, 1, 1, 1, 1}));
EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8}));
sort.stable_sort_cidx();
EXPECT_EQ(sort.d_idx, (std::vector<int>{0, 0, 1, 1, 1, 1, 17, 17, 17}));
EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 5, 6, 7, 8, 2, 3, 4}));
sort.find_offsets();
std::vector<int> off(cmprts.n_cells() + 1);
off[0] = 0;
off[1] = 2;
for (int i = 2; i < 18; i++) {
off[i] = 6;
}
for (int i = 18; i <= 256; i++) {
off[i] = 9;
}
EXPECT_EQ(sort.d_off, off);
}
TEST(cuda_mparticles_randomize_sort, sort)
{
auto kinds = Grid_t::Kinds{Grid_t::Kind(1., 1., "test_species")};
const auto& grid = make_psc<dim>(kinds);
// init particles
auto mprts = make_mparticles(grid);
auto& cmprts = *mprts.cmprts();
cuda_mparticles_randomize_sort sort;
sort.find_indices_ids(cmprts);
EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8}));
sort.sort();
// EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 5, 8, 7, 6, 2, 3, 4}));
// EXPECT_EQ(sort.d_id, (std::vector<int>{1, 0, 8, 7, 5, 6, 4, 2, 3}));
EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 7, 5, 8, 6, 2, 4, 3}));
float last = sort.d_random_idx[0];
for (int i = 1; i < cmprts.size(); i++) {
EXPECT_GE(sort.d_random_idx[i], last);
last = sort.d_random_idx[i];
}
// for (int i = 0; i < cmprts.size(); i++) {
// mprintf("i %d r_idx %g id %d\n", i, (float)sort.d_random_idx[i],
// (int)sort.d_id[i]);
// }
sort.find_offsets();
std::vector<int> off(cmprts.n_cells() + 1);
off[0] = 0;
off[1] = 2;
for (int i = 2; i < 18; i++) {
off[i] = 6;
}
for (int i = 18; i <= 256; i++) {
off[i] = 9;
}
EXPECT_EQ(sort.d_off, off);
#if 1
// do over, get different permutation
sort.find_indices_ids(cmprts);
sort.sort();
// for (int i = 0; i < cmprts.size(); i++) {
// mprintf("i %d r_idx %g id %d\n", i, (float)sort.d_random_idx[i],
// (int)sort.d_id[i]);
// }
EXPECT_NE(sort.d_id, (std::vector<int>{0, 1, 7, 5, 8, 6, 2, 4, 3}));
#endif
}
TEST(CollisionTest, Test2)
{
using Collision = CollisionCuda<MparticlesCuda<BS144>, RngStateFake>;
const typename Mparticles::real_t eps = 1e-5;
auto kinds = Grid_t::Kinds{Grid_t::Kind(1., 1., "test_species")};
const auto& grid = make_psc<dim>(kinds);
// init particles
auto mprts = make_mparticles(grid);
auto collision = Collision(grid, 1, 1.);
auto& cmprts = *mprts.cmprts();
auto sort_by_cell = cuda_mparticles_sort(cmprts.n_cells());
sort_by_cell.find_indices_ids(cmprts);
EXPECT_EQ(sort_by_cell.d_idx,
(std::vector<int>{0, 0, 17, 17, 17, 1, 1, 1, 1}));
EXPECT_EQ(sort_by_cell.d_id, (std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8}));
sort_by_cell.stable_sort_cidx();
EXPECT_EQ(sort_by_cell.d_idx,
(std::vector<int>{0, 0, 1, 1, 1, 1, 17, 17, 17}));
EXPECT_EQ(sort_by_cell.d_id, (std::vector<int>{0, 1, 5, 6, 7, 8, 2, 3, 4}));
sort_by_cell.find_offsets();
// for (int i = 0; i < cmprts.size(); i++) {
// mprintf("i %d idx %d id %d\n", i, (int)sort_by_cell.d_idx[i],
// (int)sort_by_cell.d_id[i]);
collision(mprts);
auto accessor = mprts.accessor();
auto it = accessor[0].begin();
auto prtf0 = *it++;
auto prtf1 = *it++;
EXPECT_NEAR(prtf0.u()[0] + prtf1.u()[0], 1., eps);
EXPECT_NEAR(prtf0.u()[1] + prtf1.u()[1], 0., eps);
EXPECT_NEAR(prtf0.u()[2] + prtf1.u()[2], 0., eps);
#if 0
// depends on random numbers, but for RngFake, we know
EXPECT_NEAR(prtf0.u()[0], 0.96226911, eps);
EXPECT_NEAR(prtf0.u()[1], 0. , eps);
EXPECT_NEAR(prtf0.u()[2], -0.17342988, eps);
EXPECT_NEAR(prtf1.u()[0], 0.03773088, eps);
EXPECT_NEAR(prtf1.u()[1], -0. , eps);
EXPECT_NEAR(prtf1.u()[2], 0.17342988, eps);
#endif
}
// ======================================================================
// main
int main(int argc, char** argv)
{
MPI_Init(&argc, &argv);
::testing::InitGoogleTest(&argc, argv);
int rc = RUN_ALL_TESTS();
MPI_Finalize();
return rc;
}
| c0ec876bb2f312bf41eee76808a4295ab50a9860.cu |
#include "gtest/gtest.h"
#include "../libpsc/cuda/cuda_mparticles.cuh"
#include "../libpsc/cuda/cuda_mparticles_sort.cuh"
#include "../libpsc/cuda/collision_cuda_impl.hxx"
#include "../libpsc/cuda/cuda_collision.cuh"
#include "psc_fields_single.h"
#include "psc_particles_single.h"
#include "testing.hxx"
using dim = dim_yz;
using Mparticles = MparticlesCuda<BS144>;
// ======================================================================
// RngStateCuda
TEST(RngStateCuda, ctor_dtor)
{
RngStateCuda rng_state;
RngStateCuda rng_state2(128);
}
TEST(RngStateCuda, resize)
{
RngStateCuda rng_state;
rng_state.resize(256);
EXPECT_EQ(rng_state.size(), 256);
}
__global__ static void kernel_random(RngStateCuda::Device rng_state,
thrust::device_ptr<float> x)
{
int n = threadIdx.x + blockDim.x * blockIdx.x;
auto rng = rng_state[n];
x[n] = rng.uniform();
rng_state[n] = rng;
}
TEST(RngStateCuda, access)
{
dim3 dim_grid(2);
RngStateCuda rng_state(dim_grid.x * THREADS_PER_BLOCK);
ASSERT_EQ(THREADS_PER_BLOCK, 128);
thrust::device_vector<float> x(dim_grid.x * THREADS_PER_BLOCK);
kernel_random<<<dim_grid, THREADS_PER_BLOCK>>>(rng_state, x.data());
float sum = thrust::reduce(x.begin(), x.end(), 0.f, thrust::plus<float>());
float avg = sum / x.size();
EXPECT_NEAR(avg, .5, .05);
// repeat to make sure numbers don't repeat
kernel_random<<<dim_grid, THREADS_PER_BLOCK>>>(rng_state, x.data());
float sum2 = thrust::reduce(x.begin(), x.end(), 0.f, thrust::plus<float>());
float avg2 = sum2 / x.size();
EXPECT_NEAR(avg2, .5, .05);
EXPECT_NE(avg, avg2);
}
// ======================================================================
// make_psc
//
// FIXME, duplicated in various testing environments
template <typename dim>
static Grid_t& make_psc(const Grid_t::Kinds& kinds)
{
Int3 gdims = {16, 16, 16};
Int3 ibn = {2, 2, 2};
Vec3<double> length = {160., 160., 160.};
if (dim::InvarX::value) {
gdims[0] = 1;
ibn[0] = 0;
}
if (dim::InvarY::value) {
gdims[1] = 1;
ibn[1] = 0;
}
if (dim::InvarZ::value) {
gdims[2] = 1;
ibn[2] = 0;
}
auto grid_domain = Grid_t::Domain{gdims, length};
auto grid_bc =
psc::grid::BC{{BND_FLD_PERIODIC, BND_FLD_PERIODIC, BND_FLD_PERIODIC},
{BND_FLD_PERIODIC, BND_FLD_PERIODIC, BND_FLD_PERIODIC},
{BND_PRT_PERIODIC, BND_PRT_PERIODIC, BND_PRT_PERIODIC},
{BND_PRT_PERIODIC, BND_PRT_PERIODIC, BND_PRT_PERIODIC}};
auto norm_params = Grid_t::NormalizationParams::dimensionless();
norm_params.nicell = 200;
auto coeff = Grid_t::Normalization{norm_params};
return *new Grid_t{grid_domain, grid_bc, kinds, coeff, 1.};
}
static Mparticles make_mparticles(const Grid_t& grid)
{
Mparticles mprts{grid};
auto inj = mprts.injector();
auto injector = inj[0];
injector({{5., 5., 5.}, {1., 0., 0.}, 1., 0});
injector({{5., 5., 5.}, {0., 0., 0.}, 1., 0});
injector({{5., 15., 15.}, {.6, 0., 0.}, 1., 0});
injector({{5., 15., 15.}, {.7, 0., 0.}, 1., 0});
injector({{5., 15., 15.}, {.8, 0., 0.}, 1., 0});
injector({{5., 15., 5.}, {.1, 0., 0.}, 1., 0});
injector({{5., 15., 5.}, {.2, 0., 0.}, 1., 0});
injector({{5., 15., 5.}, {.3, 0., 0.}, 1., 0});
injector({{5., 15., 5.}, {.4, 0., 0.}, 1., 0});
return mprts;
}
TEST(cuda_mparticles_sort, sort)
{
auto kinds = Grid_t::Kinds{Grid_t::Kind(1., 1., "test_species")};
const auto& grid = make_psc<dim>(kinds);
// init particles
auto mprts = make_mparticles(grid);
auto& cmprts = *mprts.cmprts();
auto sort = cuda_mparticles_sort(cmprts.n_cells());
sort.find_indices_ids(cmprts);
EXPECT_EQ(sort.d_idx, (std::vector<int>{0, 0, 17, 17, 17, 1, 1, 1, 1}));
EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8}));
sort.stable_sort_cidx();
EXPECT_EQ(sort.d_idx, (std::vector<int>{0, 0, 1, 1, 1, 1, 17, 17, 17}));
EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 5, 6, 7, 8, 2, 3, 4}));
sort.find_offsets();
std::vector<int> off(cmprts.n_cells() + 1);
off[0] = 0;
off[1] = 2;
for (int i = 2; i < 18; i++) {
off[i] = 6;
}
for (int i = 18; i <= 256; i++) {
off[i] = 9;
}
EXPECT_EQ(sort.d_off, off);
}
TEST(cuda_mparticles_randomize_sort, sort)
{
auto kinds = Grid_t::Kinds{Grid_t::Kind(1., 1., "test_species")};
const auto& grid = make_psc<dim>(kinds);
// init particles
auto mprts = make_mparticles(grid);
auto& cmprts = *mprts.cmprts();
cuda_mparticles_randomize_sort sort;
sort.find_indices_ids(cmprts);
EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8}));
sort.sort();
// EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 5, 8, 7, 6, 2, 3, 4}));
// EXPECT_EQ(sort.d_id, (std::vector<int>{1, 0, 8, 7, 5, 6, 4, 2, 3}));
EXPECT_EQ(sort.d_id, (std::vector<int>{0, 1, 7, 5, 8, 6, 2, 4, 3}));
float last = sort.d_random_idx[0];
for (int i = 1; i < cmprts.size(); i++) {
EXPECT_GE(sort.d_random_idx[i], last);
last = sort.d_random_idx[i];
}
// for (int i = 0; i < cmprts.size(); i++) {
// mprintf("i %d r_idx %g id %d\n", i, (float)sort.d_random_idx[i],
// (int)sort.d_id[i]);
// }
sort.find_offsets();
std::vector<int> off(cmprts.n_cells() + 1);
off[0] = 0;
off[1] = 2;
for (int i = 2; i < 18; i++) {
off[i] = 6;
}
for (int i = 18; i <= 256; i++) {
off[i] = 9;
}
EXPECT_EQ(sort.d_off, off);
#if 1
// do over, get different permutation
sort.find_indices_ids(cmprts);
sort.sort();
// for (int i = 0; i < cmprts.size(); i++) {
// mprintf("i %d r_idx %g id %d\n", i, (float)sort.d_random_idx[i],
// (int)sort.d_id[i]);
// }
EXPECT_NE(sort.d_id, (std::vector<int>{0, 1, 7, 5, 8, 6, 2, 4, 3}));
#endif
}
TEST(CollisionTest, Test2)
{
using Collision = CollisionCuda<MparticlesCuda<BS144>, RngStateFake>;
const typename Mparticles::real_t eps = 1e-5;
auto kinds = Grid_t::Kinds{Grid_t::Kind(1., 1., "test_species")};
const auto& grid = make_psc<dim>(kinds);
// init particles
auto mprts = make_mparticles(grid);
auto collision = Collision(grid, 1, 1.);
auto& cmprts = *mprts.cmprts();
auto sort_by_cell = cuda_mparticles_sort(cmprts.n_cells());
sort_by_cell.find_indices_ids(cmprts);
EXPECT_EQ(sort_by_cell.d_idx,
(std::vector<int>{0, 0, 17, 17, 17, 1, 1, 1, 1}));
EXPECT_EQ(sort_by_cell.d_id, (std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8}));
sort_by_cell.stable_sort_cidx();
EXPECT_EQ(sort_by_cell.d_idx,
(std::vector<int>{0, 0, 1, 1, 1, 1, 17, 17, 17}));
EXPECT_EQ(sort_by_cell.d_id, (std::vector<int>{0, 1, 5, 6, 7, 8, 2, 3, 4}));
sort_by_cell.find_offsets();
// for (int i = 0; i < cmprts.size(); i++) {
// mprintf("i %d idx %d id %d\n", i, (int)sort_by_cell.d_idx[i],
// (int)sort_by_cell.d_id[i]);
collision(mprts);
auto accessor = mprts.accessor();
auto it = accessor[0].begin();
auto prtf0 = *it++;
auto prtf1 = *it++;
EXPECT_NEAR(prtf0.u()[0] + prtf1.u()[0], 1., eps);
EXPECT_NEAR(prtf0.u()[1] + prtf1.u()[1], 0., eps);
EXPECT_NEAR(prtf0.u()[2] + prtf1.u()[2], 0., eps);
#if 0
// depends on random numbers, but for RngFake, we know
EXPECT_NEAR(prtf0.u()[0], 0.96226911, eps);
EXPECT_NEAR(prtf0.u()[1], 0. , eps);
EXPECT_NEAR(prtf0.u()[2], -0.17342988, eps);
EXPECT_NEAR(prtf1.u()[0], 0.03773088, eps);
EXPECT_NEAR(prtf1.u()[1], -0. , eps);
EXPECT_NEAR(prtf1.u()[2], 0.17342988, eps);
#endif
}
// ======================================================================
// main
int main(int argc, char** argv)
{
MPI_Init(&argc, &argv);
::testing::InitGoogleTest(&argc, argv);
int rc = RUN_ALL_TESTS();
MPI_Finalize();
return rc;
}
|
851cd056bd090234dc9dfca5ffa2ff01d1da4252.hip | // !!! This is a file automatically generated by hipify!!!
inline void standard(int *h_low, int *h_arglow, int *h_classes,
int *h_ess, int *h_rows_mp, const int m, const int p,
int *h_aux_mp, int *h_low_true, int *h_ess_true,
float *h_float_m, float *error_lone,
float *error_linf, float *error_redu, float *error_ess,
float *time_track, int *p_iter){
// time
float time = 0.0;
// iter and trackers
track_host(0, m, h_low, h_ess, h_classes,
h_low_true, h_ess_true, h_float_m,
error_lone, error_linf, error_redu,
error_ess, time_track, time);
int iter = 1;
for(int j = 0; j < m; j++){
// TIC
//clock_t tic = clock();
hipEvent_t start, stop;
tic(&start, &stop);
// Work on column "j"
reduce_col_host(j, h_rows_mp, h_aux_mp, h_low, h_arglow, m, p, h_ess);
// Update classes host
if (h_low[j] > -1){
h_classes[j] = -1;
h_classes[h_low[j]] = 1;
}else{
h_classes[j] = 1;
}
// Essential estimation
if (h_low[j] > -1){
h_ess[j] = 0;
h_ess[h_low[j]] = 0;
}
// TOC
toc(start, stop, &time);
// meausre progress
track_host(iter, m, h_low, h_ess, h_classes,
h_low_true, h_ess_true, h_float_m,
error_lone, error_linf, error_redu,
error_ess, time_track, time);
// iter
iter++;
}
p_iter[0] = iter;
}
| 851cd056bd090234dc9dfca5ffa2ff01d1da4252.cu | inline void standard(int *h_low, int *h_arglow, int *h_classes,
int *h_ess, int *h_rows_mp, const int m, const int p,
int *h_aux_mp, int *h_low_true, int *h_ess_true,
float *h_float_m, float *error_lone,
float *error_linf, float *error_redu, float *error_ess,
float *time_track, int *p_iter){
// time
float time = 0.0;
// iter and trackers
track_host(0, m, h_low, h_ess, h_classes,
h_low_true, h_ess_true, h_float_m,
error_lone, error_linf, error_redu,
error_ess, time_track, time);
int iter = 1;
for(int j = 0; j < m; j++){
// TIC
//clock_t tic = clock();
cudaEvent_t start, stop;
tic(&start, &stop);
// Work on column "j"
reduce_col_host(j, h_rows_mp, h_aux_mp, h_low, h_arglow, m, p, h_ess);
// Update classes host
if (h_low[j] > -1){
h_classes[j] = -1;
h_classes[h_low[j]] = 1;
}else{
h_classes[j] = 1;
}
// Essential estimation
if (h_low[j] > -1){
h_ess[j] = 0;
h_ess[h_low[j]] = 0;
}
// TOC
toc(start, stop, &time);
// meausre progress
track_host(iter, m, h_low, h_ess, h_classes,
h_low_true, h_ess_true, h_float_m,
error_lone, error_linf, error_redu,
error_ess, time_track, time);
// iter
iter++;
}
p_iter[0] = iter;
}
|
4c6b62cfae105caacee352d87467753a2396d5b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgeadd.cu, normal z -> c, Sun Nov 20 20:20:27 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset.
*/
__global__
void cgeadd_full(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB COMPLEX array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_cgeadd(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( cgeadd_full), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dA, ldda, dB, lddb );
}
| 4c6b62cfae105caacee352d87467753a2396d5b0.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zgeadd.cu, normal z -> c, Sun Nov 20 20:20:27 2016
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset.
*/
__global__
void cgeadd_full(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD adds two matrices, dB = alpha*dA + dB.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in,out]
dB COMPLEX array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_cgeadd(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
cgeadd_full<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, dA, ldda, dB, lddb );
}
|
primitives.hip | // !!! This is a file automatically generated by hipify!!!
#include "GANA/primitives.cuh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
namespace GANA {
// Draw triangle.
void Triangle::draw(FILE *out_file, int const start_idx, int const resid) {
const auto i = start_idx;
const auto j = start_idx + 1;
const auto k = start_idx + 2;
_p[0].draw(out_file, i, resid);
_p[1].draw(out_file, j, resid);
_p[2].draw(out_file, k, resid);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", i, j, k);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", j, k, i);
return;
}
// Draw tetrahedron.
void Tetrahedron::draw (FILE *out_file, int const start_idx, int const resid) {
const auto i = start_idx;
const auto j = start_idx + 1;
const auto k = start_idx + 2;
const auto l = start_idx + 3;
_p[0].draw(out_file, i, resid);
_p[1].draw(out_file, j, resid);
_p[2].draw(out_file, k, resid);
_p[3].draw(out_file, l, resid);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", i, j, k, l);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", j, k, l, i);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", k, l, i, j);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", l, i, j, k);
return;
}
Cube::Cube(float const p0x, float const p0y, float const p0z, float const dim) {
_dim = dim;
_p[0] = Point(p0x, p0y, p0z);
_p[1] = Point(p0x, p0y, p0z + _dim);
_p[2] = Point(p0x, p0y + _dim, p0z);
_p[3] = Point(p0x, p0y + _dim, p0z + _dim);
_p[4] = Point(p0x + _dim, p0y, p0z);
_p[5] = Point(p0x + _dim, p0y, p0z + _dim);
_p[6] = Point(p0x + _dim, p0y + _dim, p0z);
_p[7] = Point(p0x + _dim, p0y + _dim, p0z + _dim);
return;
}
// From GANA::Point.
Cube::Cube(Point const p0, float const dim) {
_dim = dim;
_p[0] = p0;
_p[1] = p0 + Vector(0.f, 0.f, _dim);
_p[2] = p0 + Vector(0.f, _dim, _dim);
_p[3] = p0 + Vector(0.f, _dim, 0.f);
_p[4] = p0 + Vector(_dim, 0.f, 0.f);
_p[5] = p0 + Vector(_dim, 0.f, _dim);
_p[6] = p0 + Vector(_dim, _dim, _dim);
_p[7] = p0 + Vector(_dim, _dim, 0.f);
return;
}
// From CGAL Point.
Cube::Cube(Point_3 const p0, float const dim) {
_dim = dim;
_p[0] = Point(p0);
_p[1] = p0 + Vector(0.f, 0.f, _dim);
_p[2] = p0 + Vector(0.f, _dim, _dim);
_p[3] = p0 + Vector(0.f, _dim, 0.f);
_p[4] = p0 + Vector(_dim, 0.f, 0.f);
_p[5] = p0 + Vector(_dim, 0.f, _dim);
_p[6] = p0 + Vector(_dim, _dim, _dim);
_p[7] = p0 + Vector(_dim, _dim, 0.f);
return;
}
// Draw cube.
void Cube::draw(FILE *out_file, int const start_idx, int const resid) {
const auto i = start_idx;
const auto j = start_idx + 1;
const auto k = start_idx + 2;
const auto l = start_idx + 3;
const auto ii = start_idx + 4;
const auto jj = start_idx + 5;
const auto kk = start_idx + 6;
const auto ll = start_idx + 7;
_p[0].draw(out_file, i, resid);
_p[1].draw(out_file, j, resid);
_p[2].draw(out_file, k, resid);
_p[3].draw(out_file, l, resid);
_p[4].draw(out_file, ii, resid);
_p[5].draw(out_file, jj, resid);
_p[6].draw(out_file, kk, resid);
_p[7].draw(out_file, ll, resid);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", i, j, l, ii);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", k, j, l, kk);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", jj, ii, kk, j);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", ll, ii, kk, l);
return;
}
// From GANA::Point
Prism::Prism(Point const &p0, Point const &p1, Point const &p2, Point const &p3,
Point const &p4, Point const &p5, Point const &p6, Point const &p7) {
_p[0] = p0;
_p[1] = p1;
_p[2] = p2;
_p[3] = p3;
_p[4] = p4;
_p[5] = p5;
_p[6] = p6;
_p[7] = p7;
return;
}
// From CGAL Point
Prism::Prism(Point_3 const &p0, Point_3 const &p1, Point_3 const &p2,
Point_3 const &p3, Point_3 const &p4, Point_3 const &p5,
Point_3 const &p6, Point_3 const &p7) {
_p[0] = Point(p0);
_p[1] = Point(p1);
_p[2] = Point(p2);
_p[3] = Point(p3);
_p[4] = Point(p4);
_p[5] = Point(p5);
_p[6] = Point(p6);
_p[7] = Point(p7);
return;
}
// Draw prism. Can't draw connectivity properly if the prism wasn't constructed
// with proper Point ordering. SO this class is kind of useless.
void Prism::draw(FILE *out_file, int const start_idx, int const resid) {
const auto i = start_idx;
const auto j = start_idx + 1;
const auto k = start_idx + 2;
const auto l = start_idx + 3;
const auto ii = start_idx + 4;
const auto jj = start_idx + 5;
const auto kk = start_idx + 6;
const auto ll = start_idx + 7;
_p[0].draw(out_file, i, resid);
_p[1].draw(out_file, j, resid);
_p[2].draw(out_file, k, resid);
_p[3].draw(out_file, l, resid);
_p[4].draw(out_file, ii, resid);
_p[5].draw(out_file, jj, resid);
_p[6].draw(out_file, kk, resid);
_p[7].draw(out_file, ll, resid);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", i, j, l, ii);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", k, j, l, kk);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", jj, ii, kk, j);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", ll, ii, kk, l);
return;
}
auto determinant(Vector const &v0, Vector const &v1, Vector const &v2) -> float
{
// First, compute the det2x2.
float const m01 = v0[0]*v1[1] - v0[1]*v1[0];
float const m02 = v0[0]*v2[1] - v0[1]*v2[0];
float const m12 = v1[0]*v2[1] - v1[1]*v2[0];
// Now compute the minors of rank 3.
return m01*v2[2] - m02*v1[2] + m12*v0[2];
}
auto determinant(Tetrahedron const &t) -> float {
Vector const v10 = t[1] - t[0];
Vector const v20 = t[2] - t[0];
Vector const v30 = t[3] - t[0];
return determinant(v10, v20, v30);
}
}
| primitives.cu | #include "GANA/primitives.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
namespace GANA {
// Draw triangle.
void Triangle::draw(FILE *out_file, int const start_idx, int const resid) {
const auto i = start_idx;
const auto j = start_idx + 1;
const auto k = start_idx + 2;
_p[0].draw(out_file, i, resid);
_p[1].draw(out_file, j, resid);
_p[2].draw(out_file, k, resid);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", i, j, k);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", j, k, i);
return;
}
// Draw tetrahedron.
void Tetrahedron::draw (FILE *out_file, int const start_idx, int const resid) {
const auto i = start_idx;
const auto j = start_idx + 1;
const auto k = start_idx + 2;
const auto l = start_idx + 3;
_p[0].draw(out_file, i, resid);
_p[1].draw(out_file, j, resid);
_p[2].draw(out_file, k, resid);
_p[3].draw(out_file, l, resid);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", i, j, k, l);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", j, k, l, i);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", k, l, i, j);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4}\n", l, i, j, k);
return;
}
Cube::Cube(float const p0x, float const p0y, float const p0z, float const dim) {
_dim = dim;
_p[0] = Point(p0x, p0y, p0z);
_p[1] = Point(p0x, p0y, p0z + _dim);
_p[2] = Point(p0x, p0y + _dim, p0z);
_p[3] = Point(p0x, p0y + _dim, p0z + _dim);
_p[4] = Point(p0x + _dim, p0y, p0z);
_p[5] = Point(p0x + _dim, p0y, p0z + _dim);
_p[6] = Point(p0x + _dim, p0y + _dim, p0z);
_p[7] = Point(p0x + _dim, p0y + _dim, p0z + _dim);
return;
}
// From GANA::Point.
Cube::Cube(Point const p0, float const dim) {
_dim = dim;
_p[0] = p0;
_p[1] = p0 + Vector(0.f, 0.f, _dim);
_p[2] = p0 + Vector(0.f, _dim, _dim);
_p[3] = p0 + Vector(0.f, _dim, 0.f);
_p[4] = p0 + Vector(_dim, 0.f, 0.f);
_p[5] = p0 + Vector(_dim, 0.f, _dim);
_p[6] = p0 + Vector(_dim, _dim, _dim);
_p[7] = p0 + Vector(_dim, _dim, 0.f);
return;
}
// From CGAL Point.
Cube::Cube(Point_3 const p0, float const dim) {
_dim = dim;
_p[0] = Point(p0);
_p[1] = p0 + Vector(0.f, 0.f, _dim);
_p[2] = p0 + Vector(0.f, _dim, _dim);
_p[3] = p0 + Vector(0.f, _dim, 0.f);
_p[4] = p0 + Vector(_dim, 0.f, 0.f);
_p[5] = p0 + Vector(_dim, 0.f, _dim);
_p[6] = p0 + Vector(_dim, _dim, _dim);
_p[7] = p0 + Vector(_dim, _dim, 0.f);
return;
}
// Draw cube.
void Cube::draw(FILE *out_file, int const start_idx, int const resid) {
const auto i = start_idx;
const auto j = start_idx + 1;
const auto k = start_idx + 2;
const auto l = start_idx + 3;
const auto ii = start_idx + 4;
const auto jj = start_idx + 5;
const auto kk = start_idx + 6;
const auto ll = start_idx + 7;
_p[0].draw(out_file, i, resid);
_p[1].draw(out_file, j, resid);
_p[2].draw(out_file, k, resid);
_p[3].draw(out_file, l, resid);
_p[4].draw(out_file, ii, resid);
_p[5].draw(out_file, jj, resid);
_p[6].draw(out_file, kk, resid);
_p[7].draw(out_file, ll, resid);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", i, j, l, ii);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", k, j, l, kk);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", jj, ii, kk, j);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", ll, ii, kk, l);
return;
}
// From GANA::Point
Prism::Prism(Point const &p0, Point const &p1, Point const &p2, Point const &p3,
Point const &p4, Point const &p5, Point const &p6, Point const &p7) {
_p[0] = p0;
_p[1] = p1;
_p[2] = p2;
_p[3] = p3;
_p[4] = p4;
_p[5] = p5;
_p[6] = p6;
_p[7] = p7;
return;
}
// From CGAL Point
Prism::Prism(Point_3 const &p0, Point_3 const &p1, Point_3 const &p2,
Point_3 const &p3, Point_3 const &p4, Point_3 const &p5,
Point_3 const &p6, Point_3 const &p7) {
_p[0] = Point(p0);
_p[1] = Point(p1);
_p[2] = Point(p2);
_p[3] = Point(p3);
_p[4] = Point(p4);
_p[5] = Point(p5);
_p[6] = Point(p6);
_p[7] = Point(p7);
return;
}
// Draw prism. Can't draw connectivity properly if the prism wasn't constructed
// with proper Point ordering. SO this class is kind of useless.
void Prism::draw(FILE *out_file, int const start_idx, int const resid) {
const auto i = start_idx;
const auto j = start_idx + 1;
const auto k = start_idx + 2;
const auto l = start_idx + 3;
const auto ii = start_idx + 4;
const auto jj = start_idx + 5;
const auto kk = start_idx + 6;
const auto ll = start_idx + 7;
_p[0].draw(out_file, i, resid);
_p[1].draw(out_file, j, resid);
_p[2].draw(out_file, k, resid);
_p[3].draw(out_file, l, resid);
_p[4].draw(out_file, ii, resid);
_p[5].draw(out_file, jj, resid);
_p[6].draw(out_file, kk, resid);
_p[7].draw(out_file, ll, resid);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", i, j, l, ii);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", k, j, l, kk);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", jj, ii, kk, j);
fmt::print(out_file, "CONECT {:>4} {:>4} {:>4} {:>4}\n", ll, ii, kk, l);
return;
}
auto determinant(Vector const &v0, Vector const &v1, Vector const &v2) -> float
{
// First, compute the det2x2.
float const m01 = v0[0]*v1[1] - v0[1]*v1[0];
float const m02 = v0[0]*v2[1] - v0[1]*v2[0];
float const m12 = v1[0]*v2[1] - v1[1]*v2[0];
// Now compute the minors of rank 3.
return m01*v2[2] - m02*v1[2] + m12*v0[2];
}
auto determinant(Tetrahedron const &t) -> float {
Vector const v10 = t[1] - t[0];
Vector const v20 = t[2] - t[0];
Vector const v30 = t[3] - t[0];
return determinant(v10, v20, v30);
}
}
|
a0ea8161381a7e23d7aee960f1b4b63d82a9c713.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return((double)tp.tv_sec + (double)tp.tv_usec*1e-6);
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for(int idx=0; idx<N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
void initialData(float *ip, int size)
{
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++)
{
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) C[i] = A[i] + B[i];
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
int match = 1;
for(int i=0; i<N; i++)
{
if(abs(hostRef[i]-gpuRef[i])>epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match)
{
printf("Arrays match.\n\n");
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev=0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of vectors
int nElem = 1<<24;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
printf("sumArraysOnHost Time elapsed %f sec\n", iElaps);
float CPU_iElaps = iElaps;
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
int iLen = 1024;
dim3 block (iLen);
dim3 grid ((nElem+block.x-1)/block.x);
iStart = cpuSecond();
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("sumArraysOnGPU <<<%d, %d>>> Time elapsed %f sec\n", grid.x, block.x, iElaps);
float GPU_iElaps = iElaps;
// GPU speedup factor
printf("GPU speedup factor = %.2f sec\n", CPU_iElaps/GPU_iElaps);
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return(0);
}
| a0ea8161381a7e23d7aee960f1b4b63d82a9c713.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return((double)tp.tv_sec + (double)tp.tv_usec*1e-6);
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for(int idx=0; idx<N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
void initialData(float *ip, int size)
{
time_t t;
srand((unsigned int) time(&t));
for(int i=0; i<size; i++)
{
ip[i] = (float)(rand() & 0xFF)/10.0f;
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<N) C[i] = A[i] + B[i];
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
int match = 1;
for(int i=0; i<N; i++)
{
if(abs(hostRef[i]-gpuRef[i])>epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match)
{
printf("Arrays match.\n\n");
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev=0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem = 1<<24;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
printf("sumArraysOnHost Time elapsed %f sec\n", iElaps);
float CPU_iElaps = iElaps;
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
int iLen = 1024;
dim3 block (iLen);
dim3 grid ((nElem+block.x-1)/block.x);
iStart = cpuSecond();
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("sumArraysOnGPU <<<%d, %d>>> Time elapsed %f sec\n", grid.x, block.x, iElaps);
float GPU_iElaps = iElaps;
// GPU speedup factor
printf("GPU speedup factor = %.2f sec\n", CPU_iElaps/GPU_iElaps);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return(0);
}
|
c5e8374f114908d4306d02d4b58032a41ee7d842.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/cuda_utils.hpp>
#include <type_traits>
#include <thrust/device_vector.h>
#include <sys/time.h>
#include <time.h>
#include <utility/utility.hpp>
#include <utility/trajectory_thrust.cuh>
#include <cuspatial/trajectory.hpp>
namespace {
/**
* @brief CUDA kernel for computing distances and speeds of trajectories
*
*/
template <typename T>
__global__ void distspeed_kernel(gdf_size_type num_traj,
const T* const __restrict__ x,
const T* const __restrict__ y,
const cuspatial::its_timestamp * const __restrict__ time,
const uint32_t * const __restrict__ len,
const uint32_t * const __restrict__ pos,
T* const __restrict__ dis,
T* const __restrict__ sp)
{
int pid=blockIdx.x*blockDim.x+threadIdx.x;
if(pid>=num_traj) return;
int bp=(pid==0)?0:pos[pid-1];
int ep=pos[pid]-1;
//assuming the same year --restriction to be removed
float td=(time[ep].yd-time[bp].yd)*86400;
td+=(time[ep].hh-time[bp].hh)*3600;
td+=(time[ep].mm-time[bp].mm)*60;
td+=(time[ep].ss-time[bp].ss);
td+=(time[ep].ms-time[bp].ms)/(float)1000;
if((len[pid]<2)||(td==0)||(time[ep].y!=time[bp].y))
{
dis[pid]=-1;
sp[pid]=-1;
}
else
{
float ds=0;
for(int i=0;i<len[pid]-1;i++)
{
float dt=(x[bp+i+1]-x[bp+i])*(x[bp+i+1]-x[bp+i]);
dt+=(y[bp+i+1]-y[bp+i])*(y[bp+i+1]-y[bp+i]);
ds+=sqrt(dt);
}
dis[pid]=ds*1000; //km to m
sp[pid]=ds*1000/td; // m/s
}
}
struct distspeed_functor
{
template <typename T>
static constexpr bool is_supported()
{
return std::is_floating_point<T>::value;
}
template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr>
std::pair<gdf_column,gdf_column> operator()(const gdf_column& x,
const gdf_column& y,
const gdf_column& timestamp,
const gdf_column& length,
const gdf_column& offset)
{
gdf_column dist{};
T* temp{nullptr};
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&dist, temp, nullptr, length.size, x.dtype, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "dist");
gdf_column speed{};
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&speed, temp, nullptr, length.size, x.dtype, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "speed");
gdf_size_type min_grid_size = 0, block_size = 0;
CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&min_grid_size,
&block_size,
distspeed_kernel<T>) );
cudf::util::cuda::grid_config_1d grid{x.size, block_size, 1};
hipLaunchKernelGGL(( distspeed_kernel<T>), dim3(grid.num_blocks), dim3(block_size), 0, 0, length.size,
static_cast<T*>(x.data), static_cast<T*>(y.data),
static_cast<cuspatial::its_timestamp*>(timestamp.data),
static_cast<uint32_t*>(length.data),
static_cast<uint32_t*>(offset.data),
static_cast<T*>(dist.data), static_cast<T*>(speed.data) );
CUDA_TRY( hipDeviceSynchronize() );
return std::make_pair(dist,speed);
}
template <typename T, std::enable_if_t< !is_supported<T>() >* = nullptr>
std::pair<gdf_column,gdf_column> operator()(const gdf_column& x,
const gdf_column& y,
const gdf_column& timestamp,
const gdf_column& length,
const gdf_column& offset)
{
CUDF_FAIL("Non-floating point operation is not supported");
}
};
} // namespace anonymous
namespace cuspatial {
/*
* Compute distance(length) and speed of trajectories
*
* see trajectory.hpp
*/
std::pair<gdf_column,gdf_column>
trajectory_distance_and_speed(const gdf_column& x, const gdf_column& y,
const gdf_column& timestamp,
const gdf_column& length,
const gdf_column& offset)
{
CUDF_EXPECTS(x.data != nullptr && y.data != nullptr &&
timestamp.data != nullptr && length.data != nullptr &&
offset.data != nullptr,
"Null data pointer");
CUDF_EXPECTS(x.size == y.size && x.size == timestamp.size &&
length.size == offset.size, "Data size mismatch");
//future versions might allow x/y/ts/pos/len have null_count>0, which might be useful for taking query results as inputs
CUDF_EXPECTS(x.null_count == 0 && y.null_count == 0 &&
timestamp.null_count == 0 &&
length.null_count == 0 && offset.null_count == 0,
"Null data support not implemented");
CUDF_EXPECTS(x.size >= offset.size ,
"one trajectory must have at least one point");
std::pair<gdf_column,gdf_column> res_pair =
cudf::type_dispatcher(x.dtype, distspeed_functor(), x, y,
timestamp, length, offset);
return res_pair;
}
}// namespace cuspatial
| c5e8374f114908d4306d02d4b58032a41ee7d842.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/cuda_utils.hpp>
#include <type_traits>
#include <thrust/device_vector.h>
#include <sys/time.h>
#include <time.h>
#include <utility/utility.hpp>
#include <utility/trajectory_thrust.cuh>
#include <cuspatial/trajectory.hpp>
namespace {
/**
* @brief CUDA kernel for computing distances and speeds of trajectories
*
*/
template <typename T>
__global__ void distspeed_kernel(gdf_size_type num_traj,
const T* const __restrict__ x,
const T* const __restrict__ y,
const cuspatial::its_timestamp * const __restrict__ time,
const uint32_t * const __restrict__ len,
const uint32_t * const __restrict__ pos,
T* const __restrict__ dis,
T* const __restrict__ sp)
{
int pid=blockIdx.x*blockDim.x+threadIdx.x;
if(pid>=num_traj) return;
int bp=(pid==0)?0:pos[pid-1];
int ep=pos[pid]-1;
//assuming the same year --restriction to be removed
float td=(time[ep].yd-time[bp].yd)*86400;
td+=(time[ep].hh-time[bp].hh)*3600;
td+=(time[ep].mm-time[bp].mm)*60;
td+=(time[ep].ss-time[bp].ss);
td+=(time[ep].ms-time[bp].ms)/(float)1000;
if((len[pid]<2)||(td==0)||(time[ep].y!=time[bp].y))
{
dis[pid]=-1;
sp[pid]=-1;
}
else
{
float ds=0;
for(int i=0;i<len[pid]-1;i++)
{
float dt=(x[bp+i+1]-x[bp+i])*(x[bp+i+1]-x[bp+i]);
dt+=(y[bp+i+1]-y[bp+i])*(y[bp+i+1]-y[bp+i]);
ds+=sqrt(dt);
}
dis[pid]=ds*1000; //km to m
sp[pid]=ds*1000/td; // m/s
}
}
struct distspeed_functor
{
template <typename T>
static constexpr bool is_supported()
{
return std::is_floating_point<T>::value;
}
template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr>
std::pair<gdf_column,gdf_column> operator()(const gdf_column& x,
const gdf_column& y,
const gdf_column& timestamp,
const gdf_column& length,
const gdf_column& offset)
{
gdf_column dist{};
T* temp{nullptr};
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&dist, temp, nullptr, length.size, x.dtype, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "dist");
gdf_column speed{};
RMM_TRY( RMM_ALLOC(&temp, length.size * sizeof(T), 0) );
gdf_column_view_augmented(&speed, temp, nullptr, length.size, x.dtype, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "speed");
gdf_size_type min_grid_size = 0, block_size = 0;
CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&min_grid_size,
&block_size,
distspeed_kernel<T>) );
cudf::util::cuda::grid_config_1d grid{x.size, block_size, 1};
distspeed_kernel<T><<<grid.num_blocks, block_size>>>(length.size,
static_cast<T*>(x.data), static_cast<T*>(y.data),
static_cast<cuspatial::its_timestamp*>(timestamp.data),
static_cast<uint32_t*>(length.data),
static_cast<uint32_t*>(offset.data),
static_cast<T*>(dist.data), static_cast<T*>(speed.data) );
CUDA_TRY( cudaDeviceSynchronize() );
return std::make_pair(dist,speed);
}
template <typename T, std::enable_if_t< !is_supported<T>() >* = nullptr>
std::pair<gdf_column,gdf_column> operator()(const gdf_column& x,
const gdf_column& y,
const gdf_column& timestamp,
const gdf_column& length,
const gdf_column& offset)
{
CUDF_FAIL("Non-floating point operation is not supported");
}
};
} // namespace anonymous
namespace cuspatial {
/*
* Compute distance(length) and speed of trajectories
*
* see trajectory.hpp
*/
std::pair<gdf_column,gdf_column>
trajectory_distance_and_speed(const gdf_column& x, const gdf_column& y,
const gdf_column& timestamp,
const gdf_column& length,
const gdf_column& offset)
{
CUDF_EXPECTS(x.data != nullptr && y.data != nullptr &&
timestamp.data != nullptr && length.data != nullptr &&
offset.data != nullptr,
"Null data pointer");
CUDF_EXPECTS(x.size == y.size && x.size == timestamp.size &&
length.size == offset.size, "Data size mismatch");
//future versions might allow x/y/ts/pos/len have null_count>0, which might be useful for taking query results as inputs
CUDF_EXPECTS(x.null_count == 0 && y.null_count == 0 &&
timestamp.null_count == 0 &&
length.null_count == 0 && offset.null_count == 0,
"Null data support not implemented");
CUDF_EXPECTS(x.size >= offset.size ,
"one trajectory must have at least one point");
std::pair<gdf_column,gdf_column> res_pair =
cudf::type_dispatcher(x.dtype, distspeed_functor(), x, y,
timestamp, length, offset);
return res_pair;
}
}// namespace cuspatial
|
121ba1feeb3c6b0917473330d4008713824aca00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "headers.h"
void initializeArray(float *h_A,int n){
for(int i = 0;i<n;i++){
//h_A[i] = 1;
h_A[i] = -1 + 2*(rand()/(float)RAND_MAX);
}
}
float naive_sum(float *h_A,int n){
float sum = 0;
for(int i = 0;i<n;i++){
sum +=h_A[i];
}
return sum;
}
int main(){
printf("Enter the number of elements : ");
int numElements;
scanf("%d",&numElements);
printf("You entered %d : \n",numElements);
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
size_t size = numElements*sizeof(float);
// Allocate the host input array A
float *h_A = (float *)malloc(size);
//Initialize the array
initializeArray(h_A,numElements);
float correct_ans = naive_sum(h_A,numElements);
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vector A in host memory to the device
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int blocksPerGrid,threadsPerBlock;
blocksPerGrid = 1;
threadsPerBlock = numElements;
hipLaunchKernelGGL(( reduction_sum), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_A,numElements);
// Copy the device result vector in device memory to the host result vector
// in host memory.
float ans = 0;
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_A,d_A,size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
ans = h_A[0];
//printf("Modified Array : \n");
//for(int i = 0;i<numElements;i++)printf(" %f ",h_A[i]);
if(fabs(correct_ans - ans)>1e-5){
printf("\nERROR : WRONG IMPLEMENTATION \nActual value should be : %f \n Your Answer : %f\n",correct_ans,ans);
}
else{
printf("\nAns : %f\n",ans);
}
return 0;
} | 121ba1feeb3c6b0917473330d4008713824aca00.cu | #include "headers.h"
void initializeArray(float *h_A,int n){
for(int i = 0;i<n;i++){
//h_A[i] = 1;
h_A[i] = -1 + 2*(rand()/(float)RAND_MAX);
}
}
float naive_sum(float *h_A,int n){
float sum = 0;
for(int i = 0;i<n;i++){
sum +=h_A[i];
}
return sum;
}
int main(){
printf("Enter the number of elements : ");
int numElements;
scanf("%d",&numElements);
printf("You entered %d : \n",numElements);
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
size_t size = numElements*sizeof(float);
// Allocate the host input array A
float *h_A = (float *)malloc(size);
//Initialize the array
initializeArray(h_A,numElements);
float correct_ans = naive_sum(h_A,numElements);
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vector A in host memory to the device
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int blocksPerGrid,threadsPerBlock;
blocksPerGrid = 1;
threadsPerBlock = numElements;
reduction_sum<<<blocksPerGrid,threadsPerBlock>>>(d_A,numElements);
// Copy the device result vector in device memory to the host result vector
// in host memory.
float ans = 0;
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_A,d_A,size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
ans = h_A[0];
//printf("Modified Array : \n");
//for(int i = 0;i<numElements;i++)printf(" %f ",h_A[i]);
if(fabs(correct_ans - ans)>1e-5){
printf("\nERROR : WRONG IMPLEMENTATION \nActual value should be : %f \n Your Answer : %f\n",correct_ans,ans);
}
else{
printf("\nAns : %f\n",ans);
}
return 0;
} |
954fbf1061cdec813270408efa0a03ec006d36de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#define N 2048 * 2048 // Number of elements in each vector
inline hipError_t checkCuda(hipError_t result) {
if (result != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
// Initialize memory
__global__ void initVectors(int * a, int * b, int * c) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(; i < N; i += stride) {
a[i] = 2;
b[i] = 1;
c[i] = 0;
}
}
/*
* Optimize this already-accelerated codebase. Work iteratively,
* and use nsys to support your work.
*
* Aim to profile `saxpy` (without modifying `N`) running under
* 20us.
*
* EDIT: I made it run under 77 us :)
*
* Some bugs have been placed in this codebase for your edification.
*/
__global__ void saxpy(int * a, int * b, int * c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; i < N; i += stride)
c[i] = 2 * a[i] + b[i];
}
int main()
{
int *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
int deviceId;
hipDeviceProp_t props;
hipGetDevice(&deviceId);
hipGetDeviceProperties(&props, deviceId);
checkCuda(hipMallocManaged(&a, size));
checkCuda(hipMallocManaged(&b, size));
checkCuda(hipMallocManaged(&c, size));
int threads_per_block = props.warpSize * 8;
int number_of_blocks = props.multiProcessorCount * 32; // using stride is better than arbitrary blocks
// prefetch to gpu
checkCuda(hipMemPrefetchAsync(a, size, deviceId));
checkCuda(hipMemPrefetchAsync(b, size, deviceId));
checkCuda(hipMemPrefetchAsync(c, size, deviceId));
hipLaunchKernelGGL(( initVectors) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c );
checkCuda(hipGetLastError());
checkCuda(hipDeviceSynchronize());
hipLaunchKernelGGL(( saxpy) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c );
checkCuda(hipGetLastError());
checkCuda(hipDeviceSynchronize());
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
hipFree( a ); hipFree( b ); hipFree( c );
}
| 954fbf1061cdec813270408efa0a03ec006d36de.cu | #include <stdio.h>
#include <assert.h>
#define N 2048 * 2048 // Number of elements in each vector
inline cudaError_t checkCuda(cudaError_t result) {
if (result != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
// Initialize memory
__global__ void initVectors(int * a, int * b, int * c) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(; i < N; i += stride) {
a[i] = 2;
b[i] = 1;
c[i] = 0;
}
}
/*
* Optimize this already-accelerated codebase. Work iteratively,
* and use nsys to support your work.
*
* Aim to profile `saxpy` (without modifying `N`) running under
* 20us.
*
* EDIT: I made it run under 77 us :)
*
* Some bugs have been placed in this codebase for your edification.
*/
__global__ void saxpy(int * a, int * b, int * c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; i < N; i += stride)
c[i] = 2 * a[i] + b[i];
}
int main()
{
int *a, *b, *c;
int size = N * sizeof (int); // The total number of bytes per vector
int deviceId;
cudaDeviceProp props;
cudaGetDevice(&deviceId);
cudaGetDeviceProperties(&props, deviceId);
checkCuda(cudaMallocManaged(&a, size));
checkCuda(cudaMallocManaged(&b, size));
checkCuda(cudaMallocManaged(&c, size));
int threads_per_block = props.warpSize * 8;
int number_of_blocks = props.multiProcessorCount * 32; // using stride is better than arbitrary blocks
// prefetch to gpu
checkCuda(cudaMemPrefetchAsync(a, size, deviceId));
checkCuda(cudaMemPrefetchAsync(b, size, deviceId));
checkCuda(cudaMemPrefetchAsync(c, size, deviceId));
initVectors <<< number_of_blocks, threads_per_block >>>( a, b, c );
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
saxpy <<< number_of_blocks, threads_per_block >>> ( a, b, c );
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
// Print out the first and last 5 values of c for a quality check
for( int i = 0; i < 5; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
for( int i = N-5; i < N; ++i )
printf("c[%d] = %d, ", i, c[i]);
printf ("\n");
cudaFree( a ); cudaFree( b ); cudaFree( c );
}
|
e8ade37516d404774e766160d46f65b51827a4ef.hip | // !!! This is a file automatically generated by hipify!!!
/*
* -----------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* SUNMATRIX_CUSPARSE unit tests.
* -----------------------------------------------------------------
*/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <nvector/nvector_cuda.h>
#include <nvector/nvector_serial.h>
#include <sundials/sundials_math.h>
#include <sundials/sundials_matrix.h>
#include <sundials/sundials_types.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include <sunmatrix/sunmatrix_sparse.h>
#include "test_sunmatrix.h"
#include "dreadrb.h"
enum { IDENTITY, RANDOM, RBFILE };
/* Implementation specific test of SUNMatrix_cuSparse_SetKernelExecPolicy */
int Test_SetKernelExecPolicy(SUNMatrix A, int myid);
class ATestExecPolicy : public SUNCudaExecPolicy
{
public:
ATestExecPolicy() : stream_(0) {}
virtual size_t gridSize(size_t numWorkElements = 0, size_t blockDim = 0) const
{
return 1;
}
virtual size_t blockSize(size_t numWorkElements = 0, size_t gridDim = 0) const
{
return 1;
}
virtual const hipStream_t* stream() const
{
return &stream_;
}
virtual SUNCudaExecPolicy* clone() const
{
return static_cast<SUNCudaExecPolicy*>(new ATestExecPolicy());
}
private:
const hipStream_t stream_;
};
/* ----------------------------------------------------------------------
* Main SUNMatrix Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails=0; /* counter for test failures */
sunindextype M, N; /* overall matrix dims */
sunindextype blkrows, blkcols; /* block matrix dims */
int nblocks; /* number of matrix blocks */
int block_nnz_max; /* max number of nnz in block */
int mattype; /* matrix storage type */
N_Vector x, y, d_x, d_y; /* test vectors */
realtype* vecdata; /* pointers to vector data */
SUNMatrix A, B, C, D, dA, dB, dI; /* test matrices */
realtype* matdata; /* pointer to matrix data */
int print_timing, square;
int matrix_to_use;
sunindextype i, j;
FILE* matrixfp;
char* filename;
hipsparseStatus_t cusp_status;
hipsparseHandle_t cusp_handle;
/* initialize some input variables */
blkrows = 0;
blkcols = 0;
nblocks = 0;
square = 0;
/* check input */
if (argc < 7) {
printf("ERROR: SIX (6) inputs required: matrix (filename|random|identity), matrix rows, matrix cols, number of blocks, matrix type (CSR/BCSR), print timing (0/1)\n");
return(-1);
}
/* determine what test matrix to use */
if (!strcmp(argv[1], "random")) {
matrix_to_use = RANDOM;
} else if (!strcmp(argv[1], "identity")) {
matrix_to_use = IDENTITY;
} else {
matrix_to_use = RBFILE;
filename = argv[1];
}
/* if we are not reading from a file, verify that the dimension args are legal */
if (matrix_to_use != RBFILE) {
blkrows = (sunindextype) atol(argv[2]);
if (blkrows <= 0) {
printf("ERROR: number of rows must be a positive integer\n");
return(-1);
}
blkcols = (sunindextype) atol(argv[3]);
if (blkcols <= 0) {
printf("ERROR: number of cols must be a positive integer\n");
return(-1);
}
square = (blkrows == blkcols) ? 1 : 0;
}
nblocks = (sunindextype) atol(argv[4]);
if (nblocks < 1) {
printf("ERROR: number of blocks must be a positive integer\n");
return(-1);
}
if (!strcmp(argv[5], "CSR")) {
mattype = SUNMAT_CUSPARSE_CSR;
if (nblocks != 1) {
printf("ERROR: the CSR format only supports 1 block\n");
return(-1);
}
} else if (!strcmp(argv[5], "BCSR")) {
mattype = SUNMAT_CUSPARSE_BCSR;
if (matrix_to_use == RBFILE) {
printf("ERROR: cannot read BCSR format from a file\n");
}
if (!square) {
printf("ERROR: the BCSR format only supports square block matrices\n");
return(-1);
}
} else {
printf("ERROR: matrix type must be CSR or BCSR\n");
return(-1);
}
print_timing = atoi(argv[6]);
SetTiming(print_timing);
/* Initialize cuSPARSE */
cusp_status = hipsparseCreate(&cusp_handle);
if (cusp_status != HIPSPARSE_STATUS_SUCCESS) {
printf("ERROR: could not create cuSPARSE handle\n");
return(-1);
}
/* Initialize vectors and matrices to NULL */
x = NULL;
y = NULL;
A = NULL;
B = NULL;
C = NULL;
D = NULL;
dA = NULL;
dB = NULL;
dI = NULL;
if (matrix_to_use == RANDOM) {
M = blkrows * nblocks;
N = blkcols * nblocks;
block_nnz_max = blkrows*blkcols / 2;
/* Create sparsity pattern for a block. */
sunindextype *cols = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
sunindextype *rows = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
for (i=0; i<block_nnz_max; i++) {
cols[i] = rand() % blkcols;
rows[i] = rand() % blkrows;
}
/* Fill matrix with uniform random data in [0,1/N] */
D = SUNDenseMatrix(M, N);
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + blkcols*i;
sunindextype row = rows[j] + blkrows*i;
matdata = SUNDenseMatrix_Column(D,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
if (SUNMatScaleAddI(RCONST(1.0), D)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix D\n");
return(-1);
}
/* Fill matrix with uniform random data in [0,1/N] */
C = SUNDenseMatrix(M, N);
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + blkcols*i;
sunindextype row = rows[j] + blkrows*i;
matdata = SUNDenseMatrix_Column(C,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
if (SUNMatScaleAddI(RCONST(1.0), C)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix C\n");
return(-1);
}
free(cols);
free(rows);
/* Create sparse matrices from dense */
A = SUNSparseFromDenseMatrix(C, ZERO, CSR_MAT);
if (A == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL for A\n");
return(-1);
}
B = SUNSparseFromDenseMatrix(D, ZERO, CSR_MAT);
if (B == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL B\n");
return(-1);
}
} else if (matrix_to_use == IDENTITY) {
M = blkrows * nblocks;
N = blkcols * nblocks;
D = SUNDenseMatrix(M, N);
SUNMatScaleAddI(RCONST(0.0), D);
if (SUNMatScaleAddI(RCONST(0.0), D)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix D\n");
return(-1);
}
C = SUNDenseMatrix(M, N);
if (SUNMatScaleAddI(RCONST(0.0), C)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix C\n");
return(-1);
}
/* Create sparse matrices from dense */
A = SUNSparseFromDenseMatrix(C, ZERO, CSR_MAT);
if (A == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL for A\n");
return(-1);
}
B = SUNSparseFromDenseMatrix(D, ZERO, CSR_MAT);
if (B == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL B\n");
return(-1);
}
} else {
SUNMatrix cscA;
matrixfp = fopen(filename, "r");
dreadrb_dist(0, matrixfp, &cscA);
fclose(matrixfp);
if (SUNSparseMatrix_ToCSR(cscA, &A)) {
printf("ERROR: cannot convert matrix that was read to CSR\n");
return(-1);
}
SUNMatDestroy(cscA);
if (SUNMatScaleAddI(RCONST(1.0), A)) {
printf("ERROR: SUNMatScaleAddI failed on matrix that read\n");
return(-1);
}
blkrows = SUNSparseMatrix_Rows(A);
blkcols = SUNSparseMatrix_Columns(A);
square = (blkrows == blkcols) ? 1 : 0;
nblocks = 1;
M = blkrows * nblocks;
N = blkcols * nblocks;
B = SUNMatClone(A);
if (B == NULL || (SUNMatCopy(A, B) != 0)) {
printf("ERROR: failed to SUNMatClone and SUNMatCopy\n");
return(-1);
}
}
printf("cuSPARSE SUNMatrix test: size %ld by %ld, nblocks %ld, block size %ld by %ld, format = %i\n\n",
(long int) M, (long int) N, (long int) nblocks, (long int) blkrows, (long int) blkcols, mattype);
if (mattype == SUNMAT_CUSPARSE_CSR) {
/* Create matrices that will be on the device */
dA = SUNMatrix_cuSparse_NewCSR(SM_ROWS_S(A), SM_COLUMNS_S(A), SM_NNZ_S(A), cusp_handle);
if (dA == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dA\n");
return(-1);
}
dB = SUNMatrix_cuSparse_NewCSR(SM_ROWS_S(B), SM_COLUMNS_S(B), SM_NNZ_S(B), cusp_handle);
if (dB == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dB\n");
return(-1);
}
} else if (mattype == SUNMAT_CUSPARSE_BCSR) {
sunindextype block_nnz;
/* Calculate actual number of nonzeros per block */
block_nnz = SUNSparseMatrix_NNZ(A) / nblocks;
/* Create matrices that will be on the device */
dA = SUNMatrix_cuSparse_NewBlockCSR(nblocks, blkrows, blkrows, block_nnz, cusp_handle);
if (dA == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dA\n");
return(-1);
}
dB = SUNMatrix_cuSparse_NewBlockCSR(nblocks, blkrows, blkrows, block_nnz, cusp_handle);
if (dB == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dB\n");
return(-1);
}
} else {
printf("ERROR: unknown mattype\n");
return(-1);
}
/* Copy data to device */
fails = SUNMatrix_cuSparse_CopyToDevice(dA, SM_DATA_S(A), SM_INDEXPTRS_S(A), SM_INDEXVALS_S(A));
if (fails != 0) {
printf("ERROR: could not copy A to the device\n");
return(-1);
}
fails = SUNMatrix_cuSparse_CopyToDevice(dB, SM_DATA_S(B), SM_INDEXPTRS_S(B), SM_INDEXVALS_S(B));
if (fails != 0) {
printf("ERROR: could not copy B to the device\n");
return(-1);
}
/* Create/fill I matrix */
dI = NULL;
if (square) {
dI = SUNMatClone_cuSparse(dA);
if (dI == NULL) {
printf("ERROR: SUNMatClone_cuSparse returned NULL\n");
return(-1);
}
if (SUNMatCopy_cuSparse(dA, dI)) {
printf("ERROR: SUNMatCopy_cuSparse failed\n");
return(-1);
}
if (SUNMatScaleAddI_cuSparse(ZERO, dI)) {
printf("ERROR: SUNMatScaleAddI_cuSparse failed\n");
return(-1);
}
}
/* Create vectors */
d_x = N_VNew_Cuda(N);
d_y = N_VNew_Cuda(M);
if (d_x == NULL || d_y == NULL) {
printf("ERROR: N_VNew_Cuda returned NULL\n");
return(-1);
}
x = N_VMake_Serial(N, N_VGetHostArrayPointer_Cuda(d_x));
y = N_VMake_Serial(M, N_VGetHostArrayPointer_Cuda(d_y));
if (x == NULL || y == NULL) {
printf("ERROR: N_VMake_Serial returned NULL\n");
return(-1);
}
/* Zero the vectors on the host */
N_VConst(ZERO, x);
N_VConst(ZERO, y);
/* Fill vector on the host */
vecdata = N_VGetArrayPointer(x);
for(i=0; i<N; i++)
vecdata[i] = (realtype) rand() / (realtype) RAND_MAX;
/* Compute reference y on the host */
if (SUNMatMatvec(A, x, y)) {
printf("FAIL: SUNSparseMatrix matvec failure \n \n");
SUNMatDestroy(A); SUNMatDestroy(B);
SUNMatDestroy(C); SUNMatDestroy(D);
SUNMatDestroy(dA); SUNMatDestroy(dB);
N_VDestroy(x); N_VDestroy(y);
N_VDestroy(d_x); N_VDestroy(d_y);
if (square) {
SUNMatDestroy(dI);
}
return(1);
}
/* Copy vectors to the device */
N_VCopyToDevice_Cuda(d_x);
N_VCopyToDevice_Cuda(d_y);
printf("Setup complete\n");
printf("Beginning tests\n\n");
/* SUNMatrix Tests */
fails += Test_SUNMatGetID(dA, SUNMATRIX_CUSPARSE, 0);
fails += Test_SUNMatClone(dA, 0);
fails += Test_SUNMatCopy(dA, 0);
fails += Test_SUNMatZero(dA, 0);
fails += Test_SUNMatScaleAdd(dA, dI, 0);
if (square) fails += Test_SUNMatScaleAddI(dA, dI, 0);
fails += Test_SUNMatMatvec(dA, d_x, d_y, 0);
if (square) fails += Test_SetKernelExecPolicy(dI, 0);
/* Print result */
if (fails) {
SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A), NULL, NULL);
SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B), NULL, NULL);
printf("\nA =\n");
SUNSparseMatrix_Print(A,stdout);
printf("\nB =\n");
SUNSparseMatrix_Print(B,stdout);
N_VCopyFromDevice_Cuda(d_x);
N_VCopyFromDevice_Cuda(d_y);
printf("\nx\n");
N_VPrint_Cuda(d_x);
printf("\ny = Ax (reference)\n");
N_VPrint_Cuda(d_y);
} else {
printf("SUCCESS: SUNMatrix module passed all tests \n \n");
}
printf("Beginning teardown\n");
/* Free vectors and matrices */
N_VDestroy(x);
N_VDestroy(y);
N_VDestroy(d_x);
N_VDestroy(d_y);
SUNMatDestroy(A);
SUNMatDestroy(B);
SUNMatDestroy(C);
SUNMatDestroy(D);
SUNMatDestroy(dA);
SUNMatDestroy(dB);
if (square) {
SUNMatDestroy(dI);
}
hipsparseDestroy(cusp_handle);
printf("Teardown complete\n");
return(fails);
}
/* ----------------------------------------------------------------------
* Test the SUNMatrix_cuSparse_SetKernelExecPolicy function.
* --------------------------------------------------------------------*/
int Test_SetKernelExecPolicy(SUNMatrix I, int myid)
{
int print_all_ranks = 0;
realtype tol = 100*UNIT_ROUNDOFF;
SUNMatrix B = SUNMatClone(I);
/* check cloned matrix */
if (B == NULL) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" After SUNMatClone, B == NULL \n \n", myid);
return(1);
}
/* copy data */
if (SUNMatCopy(I, B)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" SUNMatCopy returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
/* set kernel exec policy */
ATestExecPolicy exec_policy;
SUNMatrix_cuSparse_SetKernelExecPolicy(B, &exec_policy);
/* try out an operation */
if (SUNMatScaleAddI(RCONST(-1.0), B)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" SUNMatScaleAddI returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
/* check matrix */
if (check_matrix_entry(B, ZERO, tol)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" check_matrix_entry returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
TEST_STATUS(" PASSED test -- SetKernelExecPolicy \n", myid);
SUNMatDestroy(B);
return 0;
}
/* ----------------------------------------------------------------------
* Check matrix
* --------------------------------------------------------------------*/
int check_matrix(SUNMatrix dA, SUNMatrix dB, realtype tol)
{
int failure = 0;
SUNMatrix A, B;
realtype *Adata, *Bdata;
sunindextype *Aindexptrs, *Bindexptrs;
sunindextype *Aindexvals, *Bindexvals;
sunindextype i, ANP, Annz, Bnnz;
/* copy matrix data to host for the checks */
A = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dA), SUNMatrix_cuSparse_Columns(dA),
SUNMatrix_cuSparse_NNZ(dA), CSR_MAT);
B = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dB), SUNMatrix_cuSparse_Columns(dB),
SUNMatrix_cuSparse_NNZ(dB), CSR_MAT);
failure = SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A),
SM_INDEXPTRS_S(A),
SM_INDEXVALS_S(A));
failure = SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B),
SM_INDEXPTRS_S(B),
SM_INDEXVALS_S(B));
hipDeviceSynchronize();
/* get matrix pointers */
Adata = SUNSparseMatrix_Data(A);
Aindexptrs = SUNSparseMatrix_IndexPointers(A);
Aindexvals = SUNSparseMatrix_IndexValues(A);
ANP = SUNSparseMatrix_NP(A);
Annz = SUNSparseMatrix_NNZ(A);
Bdata = SUNSparseMatrix_Data(B);
Bindexptrs = SUNSparseMatrix_IndexPointers(B);
Bindexvals = SUNSparseMatrix_IndexValues(B);
Bnnz = SUNSparseMatrix_NNZ(B);
/* matrices must have same sparsetype, shape and actual data lengths */
if (SUNMatGetID(dA) != SUNMatGetID(dB)) {
printf(">>> ERROR: check_matrix: Different storage types (%d vs %d)\n",
SUNMatGetID(dA), SUNMatGetID(dB));
SUNMatDestroy(dA); SUNMatDestroy(dB);
return(1);
}
if (SUNMatrix_cuSparse_SparseType(A) != SUNMatrix_cuSparse_SparseType(B)) {
printf(">>> ERROR: check_matrix: Different storage types (%d vs %d)\n",
SUNMatrix_cuSparse_SparseType(A), SUNMatrix_cuSparse_SparseType(B));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (SUNMatrix_cuSparse_Rows(dA) != SUNMatrix_cuSparse_Rows(dB)) {
printf(">>> ERROR: check_matrix: Different numbers of rows (%ld vs %ld)\n",
(long int) SUNMatrix_cuSparse_Rows(dA), (long int) SUNMatrix_cuSparse_Rows(dB));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (SUNMatrix_cuSparse_Columns(dA) != SUNMatrix_cuSparse_Columns(dB)) {
printf(">>> ERROR: check_matrix: Different numbers of columns (%ld vs %ld)\n",
(long int) SUNMatrix_cuSparse_Columns(dA),
(long int) SUNMatrix_cuSparse_Columns(dB));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (Annz != Bnnz) {
printf(">>> ERROR: check_matrix: Different numbers of nonzeros (%ld vs %ld)\n",
(long int) Annz, (long int) Bnnz);
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
/* compare sparsity patterns */
for (i=0; i<ANP; i++)
failure += (Aindexptrs[i] != Bindexptrs[i]);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different indexptrs \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
for (i=0; i<Annz; i++)
failure += (Aindexvals[i] != Bindexvals[i]);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different indexvals \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
/* compare matrix values */
for(i=0; i<Annz; i++)
failure += SUNRCompareTol(Adata[i], Bdata[i], tol);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different entries \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
SUNMatDestroy(A); SUNMatDestroy(B);
return(0);
}
int check_matrix_entry(SUNMatrix dA, realtype val, realtype tol)
{
int failure = 0;
realtype *Adata;
sunindextype i;
/* copy matrix data to host for the checks */
Adata = (realtype*) malloc(SUNMatrix_cuSparse_NNZ(dA)*sizeof(realtype));
failure = SUNMatrix_cuSparse_CopyFromDevice(dA, Adata, NULL, NULL);
hipDeviceSynchronize();
/* compare data */
for(i=0; i < SUNMatrix_cuSparse_NNZ(dA); i++) {
failure += SUNRCompareTol(Adata[i], val, tol);
}
free(Adata);
if (failure > ZERO)
return(1);
else
return(0);
}
int check_vector(N_Vector expected, N_Vector computed, realtype tol)
{
int failure = 0;
realtype *xdata, *ydata;
sunindextype xldata, yldata;
sunindextype i;
/* get vector data */
xdata = N_VGetHostArrayPointer_Cuda(expected);
ydata = N_VGetHostArrayPointer_Cuda(computed);
/* copy data to host */
N_VCopyFromDevice_Cuda(expected);
N_VCopyFromDevice_Cuda(computed);
hipDeviceSynchronize();
/* check data lengths */
xldata = N_VGetLength_Cuda(expected);
yldata = N_VGetLength_Cuda(computed);
if (xldata != yldata) {
printf(">>> ERROR: check_vector: Different data array lengths \n");
return(1);
}
/* check vector data */
for(i=0; i < xldata; i++){
failure += SUNRCompareTol(xdata[i], ydata[i], tol);
}
if (failure > ZERO)
return(1);
else
return(0);
}
booleantype has_data(SUNMatrix A)
{
realtype *Adata = SUNMatrix_cuSparse_Data(A);
if (Adata == NULL)
return SUNFALSE;
else
return SUNTRUE;
}
booleantype is_square(SUNMatrix A)
{
if (SUNMatrix_cuSparse_Rows(A) == SUNMatrix_cuSparse_Columns(A))
return SUNTRUE;
else
return SUNFALSE;
}
void sync_device(SUNMatrix A)
{
hipDeviceSynchronize();
}
| e8ade37516d404774e766160d46f65b51827a4ef.cu | /*
* -----------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* SUNMATRIX_CUSPARSE unit tests.
* -----------------------------------------------------------------
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <nvector/nvector_cuda.h>
#include <nvector/nvector_serial.h>
#include <sundials/sundials_math.h>
#include <sundials/sundials_matrix.h>
#include <sundials/sundials_types.h>
#include <sunmatrix/sunmatrix_cusparse.h>
#include <sunmatrix/sunmatrix_sparse.h>
#include "test_sunmatrix.h"
#include "dreadrb.h"
enum { IDENTITY, RANDOM, RBFILE };
/* Implementation specific test of SUNMatrix_cuSparse_SetKernelExecPolicy */
int Test_SetKernelExecPolicy(SUNMatrix A, int myid);
class ATestExecPolicy : public SUNCudaExecPolicy
{
public:
ATestExecPolicy() : stream_(0) {}
virtual size_t gridSize(size_t numWorkElements = 0, size_t blockDim = 0) const
{
return 1;
}
virtual size_t blockSize(size_t numWorkElements = 0, size_t gridDim = 0) const
{
return 1;
}
virtual const cudaStream_t* stream() const
{
return &stream_;
}
virtual SUNCudaExecPolicy* clone() const
{
return static_cast<SUNCudaExecPolicy*>(new ATestExecPolicy());
}
private:
const cudaStream_t stream_;
};
/* ----------------------------------------------------------------------
* Main SUNMatrix Testing Routine
* --------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
int fails=0; /* counter for test failures */
sunindextype M, N; /* overall matrix dims */
sunindextype blkrows, blkcols; /* block matrix dims */
int nblocks; /* number of matrix blocks */
int block_nnz_max; /* max number of nnz in block */
int mattype; /* matrix storage type */
N_Vector x, y, d_x, d_y; /* test vectors */
realtype* vecdata; /* pointers to vector data */
SUNMatrix A, B, C, D, dA, dB, dI; /* test matrices */
realtype* matdata; /* pointer to matrix data */
int print_timing, square;
int matrix_to_use;
sunindextype i, j;
FILE* matrixfp;
char* filename;
cusparseStatus_t cusp_status;
cusparseHandle_t cusp_handle;
/* initialize some input variables */
blkrows = 0;
blkcols = 0;
nblocks = 0;
square = 0;
/* check input */
if (argc < 7) {
printf("ERROR: SIX (6) inputs required: matrix (filename|random|identity), matrix rows, matrix cols, number of blocks, matrix type (CSR/BCSR), print timing (0/1)\n");
return(-1);
}
/* determine what test matrix to use */
if (!strcmp(argv[1], "random")) {
matrix_to_use = RANDOM;
} else if (!strcmp(argv[1], "identity")) {
matrix_to_use = IDENTITY;
} else {
matrix_to_use = RBFILE;
filename = argv[1];
}
/* if we are not reading from a file, verify that the dimension args are legal */
if (matrix_to_use != RBFILE) {
blkrows = (sunindextype) atol(argv[2]);
if (blkrows <= 0) {
printf("ERROR: number of rows must be a positive integer\n");
return(-1);
}
blkcols = (sunindextype) atol(argv[3]);
if (blkcols <= 0) {
printf("ERROR: number of cols must be a positive integer\n");
return(-1);
}
square = (blkrows == blkcols) ? 1 : 0;
}
nblocks = (sunindextype) atol(argv[4]);
if (nblocks < 1) {
printf("ERROR: number of blocks must be a positive integer\n");
return(-1);
}
if (!strcmp(argv[5], "CSR")) {
mattype = SUNMAT_CUSPARSE_CSR;
if (nblocks != 1) {
printf("ERROR: the CSR format only supports 1 block\n");
return(-1);
}
} else if (!strcmp(argv[5], "BCSR")) {
mattype = SUNMAT_CUSPARSE_BCSR;
if (matrix_to_use == RBFILE) {
printf("ERROR: cannot read BCSR format from a file\n");
}
if (!square) {
printf("ERROR: the BCSR format only supports square block matrices\n");
return(-1);
}
} else {
printf("ERROR: matrix type must be CSR or BCSR\n");
return(-1);
}
print_timing = atoi(argv[6]);
SetTiming(print_timing);
/* Initialize cuSPARSE */
cusp_status = cusparseCreate(&cusp_handle);
if (cusp_status != CUSPARSE_STATUS_SUCCESS) {
printf("ERROR: could not create cuSPARSE handle\n");
return(-1);
}
/* Initialize vectors and matrices to NULL */
x = NULL;
y = NULL;
A = NULL;
B = NULL;
C = NULL;
D = NULL;
dA = NULL;
dB = NULL;
dI = NULL;
if (matrix_to_use == RANDOM) {
M = blkrows * nblocks;
N = blkcols * nblocks;
block_nnz_max = blkrows*blkcols / 2;
/* Create sparsity pattern for a block. */
sunindextype *cols = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
sunindextype *rows = (sunindextype *) malloc(block_nnz_max*sizeof(sunindextype));
for (i=0; i<block_nnz_max; i++) {
cols[i] = rand() % blkcols;
rows[i] = rand() % blkrows;
}
/* Fill matrix with uniform random data in [0,1/N] */
D = SUNDenseMatrix(M, N);
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + blkcols*i;
sunindextype row = rows[j] + blkrows*i;
matdata = SUNDenseMatrix_Column(D,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
if (SUNMatScaleAddI(RCONST(1.0), D)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix D\n");
return(-1);
}
/* Fill matrix with uniform random data in [0,1/N] */
C = SUNDenseMatrix(M, N);
for (i=0; i<nblocks; i++) {
for (j=0; j<block_nnz_max; j++) {
sunindextype col = cols[j] + blkcols*i;
sunindextype row = rows[j] + blkrows*i;
matdata = SUNDenseMatrix_Column(C,col);
matdata[row] = (realtype) rand() / (realtype) RAND_MAX / N;
}
}
if (SUNMatScaleAddI(RCONST(1.0), C)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix C\n");
return(-1);
}
free(cols);
free(rows);
/* Create sparse matrices from dense */
A = SUNSparseFromDenseMatrix(C, ZERO, CSR_MAT);
if (A == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL for A\n");
return(-1);
}
B = SUNSparseFromDenseMatrix(D, ZERO, CSR_MAT);
if (B == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL B\n");
return(-1);
}
} else if (matrix_to_use == IDENTITY) {
M = blkrows * nblocks;
N = blkcols * nblocks;
D = SUNDenseMatrix(M, N);
SUNMatScaleAddI(RCONST(0.0), D);
if (SUNMatScaleAddI(RCONST(0.0), D)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix D\n");
return(-1);
}
C = SUNDenseMatrix(M, N);
if (SUNMatScaleAddI(RCONST(0.0), C)) {
printf("ERROR: SUNMatScaleAddI failed for dense matrix C\n");
return(-1);
}
/* Create sparse matrices from dense */
A = SUNSparseFromDenseMatrix(C, ZERO, CSR_MAT);
if (A == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL for A\n");
return(-1);
}
B = SUNSparseFromDenseMatrix(D, ZERO, CSR_MAT);
if (B == NULL) {
printf("ERROR: SUNSparseFromDenseMatrix returned NULL B\n");
return(-1);
}
} else {
SUNMatrix cscA;
matrixfp = fopen(filename, "r");
dreadrb_dist(0, matrixfp, &cscA);
fclose(matrixfp);
if (SUNSparseMatrix_ToCSR(cscA, &A)) {
printf("ERROR: cannot convert matrix that was read to CSR\n");
return(-1);
}
SUNMatDestroy(cscA);
if (SUNMatScaleAddI(RCONST(1.0), A)) {
printf("ERROR: SUNMatScaleAddI failed on matrix that read\n");
return(-1);
}
blkrows = SUNSparseMatrix_Rows(A);
blkcols = SUNSparseMatrix_Columns(A);
square = (blkrows == blkcols) ? 1 : 0;
nblocks = 1;
M = blkrows * nblocks;
N = blkcols * nblocks;
B = SUNMatClone(A);
if (B == NULL || (SUNMatCopy(A, B) != 0)) {
printf("ERROR: failed to SUNMatClone and SUNMatCopy\n");
return(-1);
}
}
printf("cuSPARSE SUNMatrix test: size %ld by %ld, nblocks %ld, block size %ld by %ld, format = %i\n\n",
(long int) M, (long int) N, (long int) nblocks, (long int) blkrows, (long int) blkcols, mattype);
if (mattype == SUNMAT_CUSPARSE_CSR) {
/* Create matrices that will be on the device */
dA = SUNMatrix_cuSparse_NewCSR(SM_ROWS_S(A), SM_COLUMNS_S(A), SM_NNZ_S(A), cusp_handle);
if (dA == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dA\n");
return(-1);
}
dB = SUNMatrix_cuSparse_NewCSR(SM_ROWS_S(B), SM_COLUMNS_S(B), SM_NNZ_S(B), cusp_handle);
if (dB == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dB\n");
return(-1);
}
} else if (mattype == SUNMAT_CUSPARSE_BCSR) {
sunindextype block_nnz;
/* Calculate actual number of nonzeros per block */
block_nnz = SUNSparseMatrix_NNZ(A) / nblocks;
/* Create matrices that will be on the device */
dA = SUNMatrix_cuSparse_NewBlockCSR(nblocks, blkrows, blkrows, block_nnz, cusp_handle);
if (dA == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dA\n");
return(-1);
}
dB = SUNMatrix_cuSparse_NewBlockCSR(nblocks, blkrows, blkrows, block_nnz, cusp_handle);
if (dB == NULL) {
printf("ERROR: SUNMatrix_cuSparse_NewCSR returned NULL for dB\n");
return(-1);
}
} else {
printf("ERROR: unknown mattype\n");
return(-1);
}
/* Copy data to device */
fails = SUNMatrix_cuSparse_CopyToDevice(dA, SM_DATA_S(A), SM_INDEXPTRS_S(A), SM_INDEXVALS_S(A));
if (fails != 0) {
printf("ERROR: could not copy A to the device\n");
return(-1);
}
fails = SUNMatrix_cuSparse_CopyToDevice(dB, SM_DATA_S(B), SM_INDEXPTRS_S(B), SM_INDEXVALS_S(B));
if (fails != 0) {
printf("ERROR: could not copy B to the device\n");
return(-1);
}
/* Create/fill I matrix */
dI = NULL;
if (square) {
dI = SUNMatClone_cuSparse(dA);
if (dI == NULL) {
printf("ERROR: SUNMatClone_cuSparse returned NULL\n");
return(-1);
}
if (SUNMatCopy_cuSparse(dA, dI)) {
printf("ERROR: SUNMatCopy_cuSparse failed\n");
return(-1);
}
if (SUNMatScaleAddI_cuSparse(ZERO, dI)) {
printf("ERROR: SUNMatScaleAddI_cuSparse failed\n");
return(-1);
}
}
/* Create vectors */
d_x = N_VNew_Cuda(N);
d_y = N_VNew_Cuda(M);
if (d_x == NULL || d_y == NULL) {
printf("ERROR: N_VNew_Cuda returned NULL\n");
return(-1);
}
x = N_VMake_Serial(N, N_VGetHostArrayPointer_Cuda(d_x));
y = N_VMake_Serial(M, N_VGetHostArrayPointer_Cuda(d_y));
if (x == NULL || y == NULL) {
printf("ERROR: N_VMake_Serial returned NULL\n");
return(-1);
}
/* Zero the vectors on the host */
N_VConst(ZERO, x);
N_VConst(ZERO, y);
/* Fill vector on the host */
vecdata = N_VGetArrayPointer(x);
for(i=0; i<N; i++)
vecdata[i] = (realtype) rand() / (realtype) RAND_MAX;
/* Compute reference y on the host */
if (SUNMatMatvec(A, x, y)) {
printf("FAIL: SUNSparseMatrix matvec failure \n \n");
SUNMatDestroy(A); SUNMatDestroy(B);
SUNMatDestroy(C); SUNMatDestroy(D);
SUNMatDestroy(dA); SUNMatDestroy(dB);
N_VDestroy(x); N_VDestroy(y);
N_VDestroy(d_x); N_VDestroy(d_y);
if (square) {
SUNMatDestroy(dI);
}
return(1);
}
/* Copy vectors to the device */
N_VCopyToDevice_Cuda(d_x);
N_VCopyToDevice_Cuda(d_y);
printf("Setup complete\n");
printf("Beginning tests\n\n");
/* SUNMatrix Tests */
fails += Test_SUNMatGetID(dA, SUNMATRIX_CUSPARSE, 0);
fails += Test_SUNMatClone(dA, 0);
fails += Test_SUNMatCopy(dA, 0);
fails += Test_SUNMatZero(dA, 0);
fails += Test_SUNMatScaleAdd(dA, dI, 0);
if (square) fails += Test_SUNMatScaleAddI(dA, dI, 0);
fails += Test_SUNMatMatvec(dA, d_x, d_y, 0);
if (square) fails += Test_SetKernelExecPolicy(dI, 0);
/* Print result */
if (fails) {
SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A), NULL, NULL);
SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B), NULL, NULL);
printf("\nA =\n");
SUNSparseMatrix_Print(A,stdout);
printf("\nB =\n");
SUNSparseMatrix_Print(B,stdout);
N_VCopyFromDevice_Cuda(d_x);
N_VCopyFromDevice_Cuda(d_y);
printf("\nx\n");
N_VPrint_Cuda(d_x);
printf("\ny = Ax (reference)\n");
N_VPrint_Cuda(d_y);
} else {
printf("SUCCESS: SUNMatrix module passed all tests \n \n");
}
printf("Beginning teardown\n");
/* Free vectors and matrices */
N_VDestroy(x);
N_VDestroy(y);
N_VDestroy(d_x);
N_VDestroy(d_y);
SUNMatDestroy(A);
SUNMatDestroy(B);
SUNMatDestroy(C);
SUNMatDestroy(D);
SUNMatDestroy(dA);
SUNMatDestroy(dB);
if (square) {
SUNMatDestroy(dI);
}
cusparseDestroy(cusp_handle);
printf("Teardown complete\n");
return(fails);
}
/* ----------------------------------------------------------------------
* Test the SUNMatrix_cuSparse_SetKernelExecPolicy function.
* --------------------------------------------------------------------*/
int Test_SetKernelExecPolicy(SUNMatrix I, int myid)
{
int print_all_ranks = 0;
realtype tol = 100*UNIT_ROUNDOFF;
SUNMatrix B = SUNMatClone(I);
/* check cloned matrix */
if (B == NULL) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" After SUNMatClone, B == NULL \n \n", myid);
return(1);
}
/* copy data */
if (SUNMatCopy(I, B)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" SUNMatCopy returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
/* set kernel exec policy */
ATestExecPolicy exec_policy;
SUNMatrix_cuSparse_SetKernelExecPolicy(B, &exec_policy);
/* try out an operation */
if (SUNMatScaleAddI(RCONST(-1.0), B)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" SUNMatScaleAddI returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
/* check matrix */
if (check_matrix_entry(B, ZERO, tol)) {
TEST_STATUS(">>> FAILED test -- SetKernelExecPolicy \n", myid);
TEST_STATUS(" check_matrix_entry returned nonzero \n \n", myid);
SUNMatDestroy(B);
return(1);
}
TEST_STATUS(" PASSED test -- SetKernelExecPolicy \n", myid);
SUNMatDestroy(B);
return 0;
}
/* ----------------------------------------------------------------------
* Check matrix
* --------------------------------------------------------------------*/
int check_matrix(SUNMatrix dA, SUNMatrix dB, realtype tol)
{
int failure = 0;
SUNMatrix A, B;
realtype *Adata, *Bdata;
sunindextype *Aindexptrs, *Bindexptrs;
sunindextype *Aindexvals, *Bindexvals;
sunindextype i, ANP, Annz, Bnnz;
/* copy matrix data to host for the checks */
A = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dA), SUNMatrix_cuSparse_Columns(dA),
SUNMatrix_cuSparse_NNZ(dA), CSR_MAT);
B = SUNSparseMatrix(SUNMatrix_cuSparse_Rows(dB), SUNMatrix_cuSparse_Columns(dB),
SUNMatrix_cuSparse_NNZ(dB), CSR_MAT);
failure = SUNMatrix_cuSparse_CopyFromDevice(dA, SM_DATA_S(A),
SM_INDEXPTRS_S(A),
SM_INDEXVALS_S(A));
failure = SUNMatrix_cuSparse_CopyFromDevice(dB, SM_DATA_S(B),
SM_INDEXPTRS_S(B),
SM_INDEXVALS_S(B));
cudaDeviceSynchronize();
/* get matrix pointers */
Adata = SUNSparseMatrix_Data(A);
Aindexptrs = SUNSparseMatrix_IndexPointers(A);
Aindexvals = SUNSparseMatrix_IndexValues(A);
ANP = SUNSparseMatrix_NP(A);
Annz = SUNSparseMatrix_NNZ(A);
Bdata = SUNSparseMatrix_Data(B);
Bindexptrs = SUNSparseMatrix_IndexPointers(B);
Bindexvals = SUNSparseMatrix_IndexValues(B);
Bnnz = SUNSparseMatrix_NNZ(B);
/* matrices must have same sparsetype, shape and actual data lengths */
if (SUNMatGetID(dA) != SUNMatGetID(dB)) {
printf(">>> ERROR: check_matrix: Different storage types (%d vs %d)\n",
SUNMatGetID(dA), SUNMatGetID(dB));
SUNMatDestroy(dA); SUNMatDestroy(dB);
return(1);
}
if (SUNMatrix_cuSparse_SparseType(A) != SUNMatrix_cuSparse_SparseType(B)) {
printf(">>> ERROR: check_matrix: Different storage types (%d vs %d)\n",
SUNMatrix_cuSparse_SparseType(A), SUNMatrix_cuSparse_SparseType(B));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (SUNMatrix_cuSparse_Rows(dA) != SUNMatrix_cuSparse_Rows(dB)) {
printf(">>> ERROR: check_matrix: Different numbers of rows (%ld vs %ld)\n",
(long int) SUNMatrix_cuSparse_Rows(dA), (long int) SUNMatrix_cuSparse_Rows(dB));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (SUNMatrix_cuSparse_Columns(dA) != SUNMatrix_cuSparse_Columns(dB)) {
printf(">>> ERROR: check_matrix: Different numbers of columns (%ld vs %ld)\n",
(long int) SUNMatrix_cuSparse_Columns(dA),
(long int) SUNMatrix_cuSparse_Columns(dB));
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
if (Annz != Bnnz) {
printf(">>> ERROR: check_matrix: Different numbers of nonzeros (%ld vs %ld)\n",
(long int) Annz, (long int) Bnnz);
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
/* compare sparsity patterns */
for (i=0; i<ANP; i++)
failure += (Aindexptrs[i] != Bindexptrs[i]);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different indexptrs \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
for (i=0; i<Annz; i++)
failure += (Aindexvals[i] != Bindexvals[i]);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different indexvals \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
/* compare matrix values */
for(i=0; i<Annz; i++)
failure += SUNRCompareTol(Adata[i], Bdata[i], tol);
if (failure > ZERO) {
printf(">>> ERROR: check_matrix: Different entries \n");
SUNMatDestroy(A); SUNMatDestroy(B);
return(1);
}
SUNMatDestroy(A); SUNMatDestroy(B);
return(0);
}
int check_matrix_entry(SUNMatrix dA, realtype val, realtype tol)
{
int failure = 0;
realtype *Adata;
sunindextype i;
/* copy matrix data to host for the checks */
Adata = (realtype*) malloc(SUNMatrix_cuSparse_NNZ(dA)*sizeof(realtype));
failure = SUNMatrix_cuSparse_CopyFromDevice(dA, Adata, NULL, NULL);
cudaDeviceSynchronize();
/* compare data */
for(i=0; i < SUNMatrix_cuSparse_NNZ(dA); i++) {
failure += SUNRCompareTol(Adata[i], val, tol);
}
free(Adata);
if (failure > ZERO)
return(1);
else
return(0);
}
int check_vector(N_Vector expected, N_Vector computed, realtype tol)
{
int failure = 0;
realtype *xdata, *ydata;
sunindextype xldata, yldata;
sunindextype i;
/* get vector data */
xdata = N_VGetHostArrayPointer_Cuda(expected);
ydata = N_VGetHostArrayPointer_Cuda(computed);
/* copy data to host */
N_VCopyFromDevice_Cuda(expected);
N_VCopyFromDevice_Cuda(computed);
cudaDeviceSynchronize();
/* check data lengths */
xldata = N_VGetLength_Cuda(expected);
yldata = N_VGetLength_Cuda(computed);
if (xldata != yldata) {
printf(">>> ERROR: check_vector: Different data array lengths \n");
return(1);
}
/* check vector data */
for(i=0; i < xldata; i++){
failure += SUNRCompareTol(xdata[i], ydata[i], tol);
}
if (failure > ZERO)
return(1);
else
return(0);
}
booleantype has_data(SUNMatrix A)
{
realtype *Adata = SUNMatrix_cuSparse_Data(A);
if (Adata == NULL)
return SUNFALSE;
else
return SUNTRUE;
}
booleantype is_square(SUNMatrix A)
{
if (SUNMatrix_cuSparse_Rows(A) == SUNMatrix_cuSparse_Columns(A))
return SUNTRUE;
else
return SUNFALSE;
}
void sync_device(SUNMatrix A)
{
cudaDeviceSynchronize();
}
|
50ed5fe8a5a2bfb6d3369cd6a389ca6b6d96debb.hip | // !!! This is a file automatically generated by hipify!!!
#include "ReadInput.cuh"
#include "ReadInput.cu"
#include "ReadTFS.cuh"
#include "ReadTFS.cu"
#include "Common.cuh"
#include "Common.cu"
#include "Constants.cuh"
#include "Radiation.cuh"
#include "Radiation.cu"
#include "Output.cuh"
#include "Output.cu"
#include "Distributions.cuh"
#include "Distributions.hip"
#include "Datastructures.cuh"
#include "Datastructures.cu"
int main(){
std::map<std::string, bool> inputMapBool;
std::map<std::string,int> inputMapInt;
std::map<std::string,double> inputMapDouble;
std::map<std::string,std::string> inputMapString;
std::map<std::string,std::vector<double>> inputMapVector;
std::string file = "testinput.in";
/* ****************************************************************************** */
/* */
/* Read Sim Settings */
/* */
/* ****************************************************************************** */
READINPUT::ReadInputFile(file, inputMapBool, inputMapInt, inputMapDouble, inputMapString, inputMapVector);
// check input
READINPUT::PrintInputBoolMap(inputMapBool);
READINPUT::PrintInputIntMap(inputMapInt);
READINPUT::PrintInputDoubleMap(inputMapDouble);
READINPUT::PrintInputStringMap(inputMapString);
READINPUT::PrintInputVectorMap(inputMapVector);
/* ****************************************************************************** */
/* */
/* Read Twiss */
/* */
/* ****************************************************************************** */
std::string twfile = inputMapString["TwissFileNameBeam1"];
std::map<std::string, double> twheader = GetTwissHeader(twfile);
printTwissHeader(twheader);
std::map<std::string, std::vector<double>> tw = GetTwissTableAsMap(twfile);
/* ****************************************************************************** */
/* */
/* Read Bunch Files */
/* */
/* ****************************************************************************** */
std::string bfile1 = inputMapString["BunchFileNameBeam1"];
std::map<int,std::vector<double>> bmap1;
READINPUT::readBunchFile(bfile1, bmap1);
READINPUT::PrintInputBunch(bmap1);
/* ****************************************************************************** */
/* */
/* Add parameters */
/* */
/* ****************************************************************************** */
// define variable and add param
// in principle one needs to do this for both beams in a collider setting
std::map<std::string, double> b1Param;
b1Param["aatom"] = inputMapDouble["atomNumber1"];
b1Param["charge"] = inputMapDouble["charge1"];
b1Param["timeratio"] = (double)inputMapInt["TimeRatio"];
// set basic params like omega, frev, etc..
COMMON::setBasic(twheader, b1Param);
// READINPUT::PrintInputDoubleMap(b1Param);
// add radiation loss per turn - NECESSARY TO DO IN THIS ORDER
b1Param["U0"] = RADIATION::RadiationLossesPerTurn(twheader, b1Param);
READINPUT::PrintInputDoubleMap(b1Param);
// add longitudinal parameters, phis, synch tune, etc...
COMMON::setLongParam(twheader, b1Param, inputMapDouble, inputMapVector, bmap1);
//OUTPUT::PrintIntVectorMap(bmap1);
READINPUT::PrintInputBunch(bmap1);
// add radiation equilib values, decay and quant excitation coefficients
COMMON::setRadParam(twheader, b1Param);
READINPUT::PrintInputDoubleMap(b1Param);
/* ****************************************************************************** */
/* */
/* Generate distributions */
/* */
/* ****************************************************************************** */
// distribution map bucket -> dist
std::map<int, std::vector<std::vector<double>>> b1dist;
// loop over the bunches and add dist
for (std::map<int, std::vector<double>>::iterator it = bmap1.begin(); it!=bmap1.end(); ++it){
b1dist[it->first] = DISTRIBUTIONS::GenerateDistributionMatched(b1Param,inputMapVector,inputMapInt, twheader, it->second );
}
// print out the dist for bucket == 0
std::cout << b1dist[0];
std::cout<<std::endl;
thrust::host_vector<DATASTRUCTURES::double6> test = DATASTRUCTURES::hostVectorD6FromStdVector(b1dist[0]);
thrust::for_each(test.begin(),test.end(),[&](DATASTRUCTURES::double6 &particle){
std::cout << std::setw(16) << particle.x;
std::cout << std::setw(16) << particle.px;
std::cout << std::setw(16) << particle.y;
std::cout << std::setw(16) << particle.py;
std::cout << std::setw(16) << particle.t;
std::cout << std::setw(16) << particle.delta;
std::cout << std::endl;
});
return 0;
}
| 50ed5fe8a5a2bfb6d3369cd6a389ca6b6d96debb.cu | #include "ReadInput.cuh"
#include "ReadInput.cu"
#include "ReadTFS.cuh"
#include "ReadTFS.cu"
#include "Common.cuh"
#include "Common.cu"
#include "Constants.cuh"
#include "Radiation.cuh"
#include "Radiation.cu"
#include "Output.cuh"
#include "Output.cu"
#include "Distributions.cuh"
#include "Distributions.cu"
#include "Datastructures.cuh"
#include "Datastructures.cu"
int main(){
std::map<std::string, bool> inputMapBool;
std::map<std::string,int> inputMapInt;
std::map<std::string,double> inputMapDouble;
std::map<std::string,std::string> inputMapString;
std::map<std::string,std::vector<double>> inputMapVector;
std::string file = "testinput.in";
/* ****************************************************************************** */
/* */
/* Read Sim Settings */
/* */
/* ****************************************************************************** */
READINPUT::ReadInputFile(file, inputMapBool, inputMapInt, inputMapDouble, inputMapString, inputMapVector);
// check input
READINPUT::PrintInputBoolMap(inputMapBool);
READINPUT::PrintInputIntMap(inputMapInt);
READINPUT::PrintInputDoubleMap(inputMapDouble);
READINPUT::PrintInputStringMap(inputMapString);
READINPUT::PrintInputVectorMap(inputMapVector);
/* ****************************************************************************** */
/* */
/* Read Twiss */
/* */
/* ****************************************************************************** */
std::string twfile = inputMapString["TwissFileNameBeam1"];
std::map<std::string, double> twheader = GetTwissHeader(twfile);
printTwissHeader(twheader);
std::map<std::string, std::vector<double>> tw = GetTwissTableAsMap(twfile);
/* ****************************************************************************** */
/* */
/* Read Bunch Files */
/* */
/* ****************************************************************************** */
std::string bfile1 = inputMapString["BunchFileNameBeam1"];
std::map<int,std::vector<double>> bmap1;
READINPUT::readBunchFile(bfile1, bmap1);
READINPUT::PrintInputBunch(bmap1);
/* ****************************************************************************** */
/* */
/* Add parameters */
/* */
/* ****************************************************************************** */
// define variable and add param
// in principle one needs to do this for both beams in a collider setting
std::map<std::string, double> b1Param;
b1Param["aatom"] = inputMapDouble["atomNumber1"];
b1Param["charge"] = inputMapDouble["charge1"];
b1Param["timeratio"] = (double)inputMapInt["TimeRatio"];
// set basic params like omega, frev, etc..
COMMON::setBasic(twheader, b1Param);
// READINPUT::PrintInputDoubleMap(b1Param);
// add radiation loss per turn - NECESSARY TO DO IN THIS ORDER
b1Param["U0"] = RADIATION::RadiationLossesPerTurn(twheader, b1Param);
READINPUT::PrintInputDoubleMap(b1Param);
// add longitudinal parameters, phis, synch tune, etc...
COMMON::setLongParam(twheader, b1Param, inputMapDouble, inputMapVector, bmap1);
//OUTPUT::PrintIntVectorMap(bmap1);
READINPUT::PrintInputBunch(bmap1);
// add radiation equilib values, decay and quant excitation coefficients
COMMON::setRadParam(twheader, b1Param);
READINPUT::PrintInputDoubleMap(b1Param);
/* ****************************************************************************** */
/* */
/* Generate distributions */
/* */
/* ****************************************************************************** */
// distribution map bucket -> dist
std::map<int, std::vector<std::vector<double>>> b1dist;
// loop over the bunches and add dist
for (std::map<int, std::vector<double>>::iterator it = bmap1.begin(); it!=bmap1.end(); ++it){
b1dist[it->first] = DISTRIBUTIONS::GenerateDistributionMatched(b1Param,inputMapVector,inputMapInt, twheader, it->second );
}
// print out the dist for bucket == 0
std::cout << b1dist[0];
std::cout<<std::endl;
thrust::host_vector<DATASTRUCTURES::double6> test = DATASTRUCTURES::hostVectorD6FromStdVector(b1dist[0]);
thrust::for_each(test.begin(),test.end(),[&](DATASTRUCTURES::double6 &particle){
std::cout << std::setw(16) << particle.x;
std::cout << std::setw(16) << particle.px;
std::cout << std::setw(16) << particle.y;
std::cout << std::setw(16) << particle.py;
std::cout << std::setw(16) << particle.t;
std::cout << std::setw(16) << particle.delta;
std::cout << std::endl;
});
return 0;
}
|
c66100f84025dd2e01f90cb41f158ea895a96655.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <sstream>
#include <vector>
using std::cout;
using std::endl;
using std::vector;
#include "helper.h"
#include "parameter.h"
#include "poly_arithmetic.cuh"
#include "distributions.cuh"
__global__ void ternary_test(unsigned char* in, int* out)
{
int i = threadIdx.x + blockIdx.x * convertBlockSize;
register float d = (float)in[i];
d /= (255.0f / 3);
if (d >= 2)
out[i] = 1;
else if (d >= 1)
out[i] = 0;
else
out[i] = -1;
}
__global__ void ternary_test2(unsigned char* in, int* out)
{
int i = threadIdx.x + blockIdx.x * convertBlockSize;
register float d = (float)in[i];
d /= (255.0f / 3);
out[i] = int(d) - 1;
}
int main()
{
int n = 1024 * 333333;
unsigned char* in;
int* out;
int* host;
hipHostMalloc(&host, n * sizeof(int));
hipMalloc(&in, n * sizeof(char)); hipMalloc(&out, n * sizeof(int));
generate_random_default(in, n);
hipLaunchKernelGGL(( ternary_test2), dim3(n/convertBlockSize), dim3(convertBlockSize), 0, 0, in, out);
hipMemcpy(host, out, n * sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
int sum0 = 0, sum1 = 0, sum2 = 0;
for (int i = 0; i < n; i++)
{
if (host[i] == -1)
sum0++;
if (host[i] == 0)
sum1++;
if (host[i] == 1)
sum2++;
}
printf("Number of -1 generated: %d\n", sum0);
printf("Number of 0 generated: %d\n", sum1);
printf("Number of 1 generated: %d\n", sum2);
return 0;
}
| c66100f84025dd2e01f90cb41f158ea895a96655.cu | #include <iostream>
#include <string>
#include <sstream>
#include <vector>
using std::cout;
using std::endl;
using std::vector;
#include "helper.h"
#include "parameter.h"
#include "poly_arithmetic.cuh"
#include "distributions.cuh"
__global__ void ternary_test(unsigned char* in, int* out)
{
int i = threadIdx.x + blockIdx.x * convertBlockSize;
register float d = (float)in[i];
d /= (255.0f / 3);
if (d >= 2)
out[i] = 1;
else if (d >= 1)
out[i] = 0;
else
out[i] = -1;
}
__global__ void ternary_test2(unsigned char* in, int* out)
{
int i = threadIdx.x + blockIdx.x * convertBlockSize;
register float d = (float)in[i];
d /= (255.0f / 3);
out[i] = int(d) - 1;
}
int main()
{
int n = 1024 * 333333;
unsigned char* in;
int* out;
int* host;
cudaMallocHost(&host, n * sizeof(int));
cudaMalloc(&in, n * sizeof(char)); cudaMalloc(&out, n * sizeof(int));
generate_random_default(in, n);
ternary_test2<<<n/convertBlockSize, convertBlockSize>>>(in, out);
cudaMemcpy(host, out, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
int sum0 = 0, sum1 = 0, sum2 = 0;
for (int i = 0; i < n; i++)
{
if (host[i] == -1)
sum0++;
if (host[i] == 0)
sum1++;
if (host[i] == 1)
sum2++;
}
printf("Number of -1 generated: %d\n", sum0);
printf("Number of 0 generated: %d\n", sum1);
printf("Number of 1 generated: %d\n", sum2);
return 0;
}
|
f5b5bf71bd004bcbafb94a726e33df6d330d64c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void matAdd(int *A, int *B, int *C)
{
int i = blockIdx.x;
int j = threadIdx.x;
int col = blockDim.x;
C[i * col + j] = A[i * col + j] + B[i * col + j];
}
int main()
{
int row, col;
printf("Enter the row and columns of matrices\n");
scanf("%i %i", &row, &col);
int totalElements = row * col;
int *matA = (int *)calloc(totalElements, sizeof(int)), *dA;
int *matB = (int *)calloc(totalElements, sizeof(int)), *dB;
int *matC = (int *)calloc(totalElements, sizeof(int)), *dC;
printf("Enter elements of A matrix\n");
for (int i = 0; i < row; i++)
{
for (int j = 0; j < col; j++)
{
scanf("%i", &matA[i * col + j]);
}
}
printf("Enter elements of B matrix\n");
for (int i = 0; i < row; i++)
{
for (int j = 0; j < col; j++)
{
scanf("%i", &matB[i * col + j]);
}
}
hipMalloc(&dA, sizeof(int) * totalElements);
hipMalloc(&dB, sizeof(int) * totalElements);
hipMalloc(&dC, sizeof(int) * totalElements);
hipMemcpy(dA, matA, sizeof(int) * totalElements, hipMemcpyHostToDevice);
hipMemcpy(dB, matB, sizeof(int) * totalElements, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matAdd), dim3(row), dim3(col), 0, 0, dA, dB, dC);
hipMemcpy(matC, dC, sizeof(int) * totalElements, hipMemcpyDeviceToHost);
printf("Result Matrix is\n");
for (int i = 0; i < row; i++)
{
for (int j = 0; j < col; j++)
{
printf("%i ", matC[i * col + j]);
}
printf("\n");
}
}
| f5b5bf71bd004bcbafb94a726e33df6d330d64c6.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void matAdd(int *A, int *B, int *C)
{
int i = blockIdx.x;
int j = threadIdx.x;
int col = blockDim.x;
C[i * col + j] = A[i * col + j] + B[i * col + j];
}
int main()
{
int row, col;
printf("Enter the row and columns of matrices\n");
scanf("%i %i", &row, &col);
int totalElements = row * col;
int *matA = (int *)calloc(totalElements, sizeof(int)), *dA;
int *matB = (int *)calloc(totalElements, sizeof(int)), *dB;
int *matC = (int *)calloc(totalElements, sizeof(int)), *dC;
printf("Enter elements of A matrix\n");
for (int i = 0; i < row; i++)
{
for (int j = 0; j < col; j++)
{
scanf("%i", &matA[i * col + j]);
}
}
printf("Enter elements of B matrix\n");
for (int i = 0; i < row; i++)
{
for (int j = 0; j < col; j++)
{
scanf("%i", &matB[i * col + j]);
}
}
cudaMalloc(&dA, sizeof(int) * totalElements);
cudaMalloc(&dB, sizeof(int) * totalElements);
cudaMalloc(&dC, sizeof(int) * totalElements);
cudaMemcpy(dA, matA, sizeof(int) * totalElements, cudaMemcpyHostToDevice);
cudaMemcpy(dB, matB, sizeof(int) * totalElements, cudaMemcpyHostToDevice);
matAdd<<<row, col>>>(dA, dB, dC);
cudaMemcpy(matC, dC, sizeof(int) * totalElements, cudaMemcpyDeviceToHost);
printf("Result Matrix is\n");
for (int i = 0; i < row; i++)
{
for (int j = 0; j < col; j++)
{
printf("%i ", matC[i * col + j]);
}
printf("\n");
}
}
|
b378ffb2c7b5d211208d3d618947d866d160c130.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::binary(OperatorType op,
const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
ElementBinary *ele = new ElementBinary(*this, op, in1, in2, inplace_a, name);
layers.push_back(ele);
return ele->outputs[0];
}
Tensor FFModel::add(const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
return this->binary(OP_EW_ADD, in1, in2, inplace_a, name);
}
Tensor FFModel::subtract(const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
return this->binary(OP_EW_SUB, in1, in2, inplace_a, name);
}
Tensor FFModel::multiply(const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
return this->binary(OP_EW_MUL, in1, in2, inplace_a, name);
}
Tensor FFModel::divide(const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
return this->binary(OP_EW_DIV, in1, in2, inplace_a, name);
}
ElementBinary::ElementBinary(FFModel& model,
OperatorType _op_type,
const Tensor& in1,
const Tensor& in2,
bool _inplace_a,
const char* name)
: Op(
model,
_op_type,
name,
in1,
in2
),
inplace_a(_inplace_a)
{
//TODO: implement broadcast op
numOutputs = 1;
numWeights = 0;
assert(in1.numDim == in2.numDim);
int dim = in1.numDim;
outputs[0].numDim = in1.numDim;
for (int i = 0; i < dim; i++) {
assert(in1.adim[i] == in2.adim[i]);
outputs[0].adim[i] = in1.adim[i];
}
}
bool ElementBinary::can_inplace_output(void)
{
if (op_type == OP_EW_ADD)
return true;
if (op_type == OP_EW_MUL)
return true;
return false;
}
bool ElementBinary::has_inplace_output(void)
{
return inplace_a;
}
void ElementBinary::do_inplace_output(void)
{
inplace_a = true;
}
void ElementBinary::create_weights(FFModel& model)
{
// Do nothing
}
void ElementBinary::create_output_and_partition(FFModel& model)
{
//TODO: implement broadcast op
assert(inputs[0].numDim == inputs[1].numDim);
int dim = inputs[0].numDim;
for (int i = 0; i < dim; i++)
assert(inputs[0].adim[i] == inputs[1].adim[i]);
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim for ElementWiseBinary operator
assert(false);
}
}
}
template<int NDIM>
void ElementBinary::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
if (inplace_a) {
outputs[0] = inputs[0];
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < 2; i++) {
Rect<NDIM> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
// inplace_a require part_rect == inputs[0].part_rect
if (i == 0)
assert(input_rect == part_rect);
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]);
}
}
return;
}
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < 2; i++) {
Rect<NDIM> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]);
}
}
}
__host__
OpMeta* ElementBinary::init_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
ElementBinary* eb = (ElementBinary*) task->args;
FFHandler handle = *((FFHandler*) task->local_args);
ElementBinaryMeta* m = new ElementBinaryMeta(handle);
m->op_type = eb->op_type;
m->profiling = eb->profiling;
m->inplace_a = eb->inplace_a;
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain output_domain;
if (m->inplace_a) {
assert(regions.size() == 2);
assert(task->regions.size() == regions.size());
output_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(output_domain == input_domain);
} else {
assert(regions.size() == 3);
assert(task->regions.size() == regions.size());
output_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(output_domain == input_domain);
}
cudnnOpTensorOp_t mode;
switch (eb->op_type) {
case OP_EW_ADD:
case OP_EW_SUB:
mode = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
mode = CUDNN_OP_TENSOR_MUL;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetOpTensorDescriptor(m->opDesc, mode,
CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->inputTensor, input_domain));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->outputTensor, output_domain));
return m;
}
void ElementBinary::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
ParallelConfig pc; \
std::string pcname = name; \
ff.config.find_parallel_config(DIM, pcname, pc); \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(ELEMENTBINARY_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementBinary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
if (!inplace_a) {
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
} else {
assert(outputs[0].part == input_lps[0]);
assert(outputs[0].region == inputs[0].region);
}
//launcher.add_region_requirement(
// RegionRequirement(input_grad_lps[0], 0/*projection id*/,
// WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
//launcher.add_field(3, FID_DATA);
//if (inputs[0].region_grad != inputs[1].region_grad) {
// regions[4](I/O): input1_grad
// launcher.add_region_requirement(
// RegionRequirement(input_grad_lps[1], 0/*projection id*/,
// WRITE_ONLY, EXCLUSIVE, inputs[1].region_grad));
// launcher.add_field(4, FID_DATA);
//}
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
meta[idx++] = fm.get_result<OpMeta*>(*it); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
__global__
void elewise_binary_forward_kernel(coord_t volume,
const float alpha,
const float beta,
OperatorType type,
const float* in1,
const float* in2,
float* out)
{
switch (type) {
case OP_EW_ADD:
{
CUDA_KERNEL_LOOP(i, volume)
{
out[i] = alpha * (in1[i] + in2[i]) + beta * out[i];
}
break;
}
case OP_EW_SUB:
{
CUDA_KERNEL_LOOP(i, volume)
{
out[i] = alpha * (in1[i] - in2[i]) + beta * out[i];
}
break;
}
case OP_EW_MUL:
{
CUDA_KERNEL_LOOP(i, volume)
{
out[i] = alpha * in1[i] * in2[i] + beta * out[i];
}
break;
}
case OP_EW_DIV:
{
CUDA_KERNEL_LOOP(i, volume)
{
out[i] = alpha * (in1[i] / in2[i]) + beta * out[i];
}
break;
}
default:
assert(false);
}
}
/*static*/
void ElementBinary::forward_kernel(const ElementBinaryMeta* m,
const float* in1_ptr,
const float* in2_ptr,
float* out_ptr)
{
float alpha1 = 1.0f, alpha2 = 1.0f, beta = 0.0f;
switch (m->op_type) {
case OP_EW_SUB:
alpha2 = -1.0f;
break;
case OP_EW_ADD:
case OP_EW_MUL:
break;
default:
assert(false);
}
checkCUDNN(cudnnOpTensor(m->handle.dnn, m->opDesc,
&alpha1, m->inputTensor, in1_ptr,
&alpha2, m->inputTensor, in2_ptr,
&beta, m->outputTensor, out_ptr));
}
/*
regions[0](I): in1
regions[1](I): in2
regions[2](O): output
*/
__host__
void ElementBinary::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
//const ElementBinary* ele = (const ElementBinary*) task->args;
const ElementBinaryMeta* m = *((ElementBinaryMeta**) task->local_args);
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain in2_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(in1_domain == in2_domain);
const float* in1_ptr = NULL, *in2_ptr = NULL;
float *out_ptr = NULL;
if (m->inplace_a) {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
out_ptr = helperGetTensorPointerRW<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
in2_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
in1_ptr = out_ptr;
} else {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(out_domain == in1_domain);
in1_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
in2_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
out_ptr = helperGetTensorPointerWO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
}
hipEvent_t t_start, t_end;
if (m->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
//print_tensor<float>(in1_ptr, in1_domain.get_volume(), "input1:");
//print_tensor<float>(in2_ptr, in2_domain.get_volume(), "input2:");
forward_kernel(m, in1_ptr, in2_ptr, out_ptr);
//print_tensor<float>(out_ptr, in1_domain.get_volume(), "output:");
if (m->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
char const *opName;
switch (m->op_type) {
case OP_EW_ADD:
opName = "Add";
break;
case OP_EW_SUB:
opName = "Sub";
break;
case OP_EW_MUL:
opName = "Mul";
break;
case OP_EW_DIV:
opName = "Div";
break;
default:
assert(false);
}
printf("[%s] forward time (CF) = %.2fms\n", opName, elapsed);
}
}
void ElementBinary::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
if (inplace_a) {
assert(outputs[0].part == input_lps[0]);
assert(outputs[0].region == inputs[0].region);
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
} else {
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
}
runtime->execute_index_space(ctx, launcher);
}
__global__
void elewise_binary_backward_kernel(coord_t volume,
const float alpha,
const float beta,
OperatorType type,
const float* out_grad,
const float* in1,
const float* in2,
float* in1_grad,
float* in2_grad)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case OP_EW_ADD:
{
in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i];
in2_grad[i] = alpha * out_grad[i] + beta * in2_grad[i];
break;
}
case OP_EW_SUB:
{
in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i];
in2_grad[i] = - alpha * out_grad[i] + beta * in2_grad[i];
break;
}
case OP_EW_MUL:
{
in1_grad[i] = alpha * out_grad[i] * in2[i] + beta * in1_grad[i];
in2_grad[i] = alpha * out_grad[i] * in1[i] + beta * in2_grad[i];
break;
}
case OP_EW_DIV:
{
in1_grad[i] = alpha * out_grad[i] / in2[i] + beta * in1_grad[i];
in2_grad[i] = - alpha * out_grad[i] * in1[i] / (in2[i] * in2[i]) + beta * in2_grad[i];
break;
}
default:
assert(false);
}
}
}
/*static*/
void ElementBinary::backward_kernel(const ElementBinaryMeta* m,
const float* out_grad_ptr,
const float* in1_ptr,
const float* in2_ptr,
float* in1_grad_ptr,
float* in2_grad_ptr)
{
float alpha1 = 1.0f, alpha2 = 1.0f, beta = 1.0f;
switch (m->op_type) {
case OP_EW_ADD:
alpha1 = 1.0f;
alpha2 = 0.0f;
break;
case OP_EW_SUB:
alpha1 = -1.0f;
alpha2 = 0.0f;
break;
case OP_EW_MUL:
alpha1 = 1.0f;
alpha2 = 1.0f;
break;
default:
assert(false);
}
checkCUDNN(cudnnOpTensor(m->handle.dnn, m->opDesc,
&alpha1, m->outputTensor, out_grad_ptr,
&alpha2, m->inputTensor, in1_ptr,
&beta, m->inputTensor, in2_grad_ptr));
switch (m->op_type) {
case OP_EW_ADD:
case OP_EW_SUB:
alpha1 = 1.0f;
alpha2 = 0.0f;
break;
case OP_EW_MUL:
alpha1 = 1.0f;
alpha2 = 1.0f;
break;
default:
assert(false);
}
checkCUDNN(cudnnOpTensor(m->handle.dnn, m->opDesc,
&alpha1, m->outputTensor, out_grad_ptr,
&alpha2, m->inputTensor, in2_ptr,
&beta, m->inputTensor, in1_grad_ptr));
}
/*
regions[0](I or I/O): out_grad (I/O if inplace_a)
regions[1](I): in0
regions[2](I/O): in0_grad (Missing if in0_grad = out_grad)
regions[3](I): in1 (Missing if in0 = in1)
regions[4](I/O): in1_grad (Missing if in0=in1)
*/
void ElementBinary::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
//const ElementBinary* ele = (const ElementBinary*) task->args;
const ElementBinaryMeta* m = *((ElementBinaryMeta**) task->local_args);
const float *in0_ptr = NULL, *in1_ptr = NULL, *out_grad_ptr = NULL;
float *in0_grad_ptr = NULL, *in1_grad_ptr = NULL;
Domain out_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
if (m->inplace_a) {
in0_grad_ptr = helperGetTensorPointerRW<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
if (regions.size() == 2 || regions.size() == 4);
assert(task->regions.size() == regions.size());
if (regions.size() == 2) {
Domain in0_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(in0_domain == out_grad_domain);
in0_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
in1_ptr = in0_ptr;
in1_grad_ptr = in0_grad_ptr;
out_grad_ptr = in0_grad_ptr;
} else {
Domain in0_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(in0_domain == out_grad_domain);
assert(in1_domain == out_grad_domain);
in0_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
in1_ptr = helperGetTensorPointerRO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
in1_grad_ptr = helperGetTensorPointerRW<float>(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
out_grad_ptr = in0_grad_ptr;
}
} else {
assert(regions.size() == 3 || regions.size() == 5);
assert(task->regions.size() == regions.size());
out_grad_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
Domain in0_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain in0_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(out_grad_domain == in0_grad_domain);
assert(out_grad_domain == in0_domain);
in0_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
in0_grad_ptr = helperGetTensorPointerRW<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
if (regions.size() == 3) {
// in0 == in1
in1_ptr = in0_ptr;
in1_grad_ptr = in0_grad_ptr;
} else {
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
Domain in1_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[4].region.get_index_space());
assert(out_grad_domain == in1_domain);
assert(out_grad_domain == in1_grad_domain);
in1_ptr = helperGetTensorPointerRO<float>(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
in1_grad_ptr = helperGetTensorPointerRW<float>(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
}
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
backward_kernel(m, out_grad_ptr, in0_ptr, in1_ptr, in0_grad_ptr, in1_grad_ptr);
//elewise_binary_backward_kernel<<<GET_BLOCKS(out_grad_domain.get_volume()), CUDA_NUM_THREADS>>>(
//out_grad_domain.get_volume(), alpha, alpha, ele->op_type, out_grad_ptr, in1_ptr, in2_ptr,
//in1_grad_ptr, in2_grad_ptr);
}
void ElementBinary::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(ELEMENTBINARY_BWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
if (inplace_a) {
// regions[0](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I): input0
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(1, FID_DATA);
if (inputs[0].region == inputs[1].region) {
// regions[3](I): input1
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(2, FID_DATA);
// regions[4](I/O): input1_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region_grad));
launcher.add_field(3, FID_DATA);
}
} else {
// regions[0](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I): input0
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(1, FID_DATA);
// regions[2](I/O): input0_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(2, FID_DATA);
if (inputs[0].region == inputs[1].region) {
// regions[3](I): input1
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(3, FID_DATA);
// regions[4](I/O): input1_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region_grad));
launcher.add_field(4, FID_DATA);
}
}
runtime->execute_index_space(ctx, launcher);
}
ElementBinaryMeta::ElementBinaryMeta(FFHandler handler)
: OpMeta(handler)
{
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc));
op_type = OP_ANY;
}
bool ElementBinary::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
Tensor sub_output, sub_input1, sub_input0;
if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type))
return false;
if (!inputs[0].get_input_sub_tensor(pc, sub_input0, op_type))
return false;
if (!inputs[1].get_input_sub_tensor(pc, sub_input1, op_type))
return false;
ElementBinaryMeta* m = sim->ele_binary_meta;
m->op_type = op_type;
cudnnOpTensorOp_t mode;
switch (op_type) {
case OP_EW_ADD:
case OP_EW_SUB:
mode = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
mode = CUDNN_OP_TENSOR_MUL;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetOpTensorDescriptor(m->opDesc, mode,
CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN));
Domain input_domain = sub_input0.get_domain();
Domain output_domain = sub_output.get_domain();
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->inputTensor, input_domain));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->outputTensor, output_domain));
sim->free_all();
float* input0_ptr = (float*)sim->allocate(sub_input0.get_volume(), DT_FLOAT);
assert(input0_ptr != NULL);
float* input1_ptr = (float*)sim->allocate(sub_input1.get_volume(), DT_FLOAT);
assert(input1_ptr != NULL);
float* output_ptr = NULL;
if (inplace_a) {
output_ptr = input0_ptr;
} else {
output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT);
}
assert(output_ptr != NULL);
std::function<void()> forward, backward;
forward = [&] {
forward_kernel(m, input0_ptr, input1_ptr, output_ptr);
};
if (sim->computationMode == COMP_MODE_TRAINING) {
float* input0_grad_ptr = (float*)sim->allocate(sub_input0.get_volume(), DT_FLOAT);
assert(input0_grad_ptr != NULL);
float* input1_grad_ptr = (float*)sim->allocate(sub_input0.get_volume(), DT_FLOAT);
assert(input1_grad_ptr != NULL);
float* output_grad_ptr = NULL;
if (inplace_a) {
output_grad_ptr = input0_grad_ptr;
} else {
output_grad_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT);
}
assert(output_grad_ptr != NULL);
backward = [&] {
backward_kernel(m, output_grad_ptr, input0_ptr, input1_ptr, input0_grad_ptr, input1_grad_ptr);
};
}
inner_measure_operator_cost(sim, forward, backward, cost_metrics);
if (sim->computationMode == COMP_MODE_TRAINING) {
printf("[Measure Elewise Binary] name(%s) num_elements(%zu) forward_time(%.4lf) backward_time(%.4lf)\n",
name, sub_output.get_volume(),
cost_metrics.forward_time,
cost_metrics.backward_time);
} else {
printf("[Measure Elewise Binary] name(%s) num_elements(%zu) forward_time(%.4lf)\n",
name, sub_output.get_volume(),
cost_metrics.forward_time);
}
return true;
}
| b378ffb2c7b5d211208d3d618947d866d160c130.cu | /* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::binary(OperatorType op,
const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
ElementBinary *ele = new ElementBinary(*this, op, in1, in2, inplace_a, name);
layers.push_back(ele);
return ele->outputs[0];
}
Tensor FFModel::add(const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
return this->binary(OP_EW_ADD, in1, in2, inplace_a, name);
}
Tensor FFModel::subtract(const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
return this->binary(OP_EW_SUB, in1, in2, inplace_a, name);
}
Tensor FFModel::multiply(const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
return this->binary(OP_EW_MUL, in1, in2, inplace_a, name);
}
Tensor FFModel::divide(const Tensor& in1,
const Tensor& in2,
bool inplace_a,
char const *name)
{
return this->binary(OP_EW_DIV, in1, in2, inplace_a, name);
}
ElementBinary::ElementBinary(FFModel& model,
OperatorType _op_type,
const Tensor& in1,
const Tensor& in2,
bool _inplace_a,
const char* name)
: Op(
model,
_op_type,
name,
in1,
in2
),
inplace_a(_inplace_a)
{
//TODO: implement broadcast op
numOutputs = 1;
numWeights = 0;
assert(in1.numDim == in2.numDim);
int dim = in1.numDim;
outputs[0].numDim = in1.numDim;
for (int i = 0; i < dim; i++) {
assert(in1.adim[i] == in2.adim[i]);
outputs[0].adim[i] = in1.adim[i];
}
}
bool ElementBinary::can_inplace_output(void)
{
if (op_type == OP_EW_ADD)
return true;
if (op_type == OP_EW_MUL)
return true;
return false;
}
bool ElementBinary::has_inplace_output(void)
{
return inplace_a;
}
void ElementBinary::do_inplace_output(void)
{
inplace_a = true;
}
void ElementBinary::create_weights(FFModel& model)
{
// Do nothing
}
void ElementBinary::create_output_and_partition(FFModel& model)
{
//TODO: implement broadcast op
assert(inputs[0].numDim == inputs[1].numDim);
int dim = inputs[0].numDim;
for (int i = 0; i < dim; i++)
assert(inputs[0].adim[i] == inputs[1].adim[i]);
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim for ElementWiseBinary operator
assert(false);
}
}
}
template<int NDIM>
void ElementBinary::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
if (inplace_a) {
outputs[0] = inputs[0];
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < 2; i++) {
Rect<NDIM> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
// inplace_a require part_rect == inputs[0].part_rect
if (i == 0)
assert(input_rect == part_rect);
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]);
}
}
return;
}
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
for (int i = 0; i < 2; i++) {
Rect<NDIM> input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]);
}
}
}
__host__
OpMeta* ElementBinary::init_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
ElementBinary* eb = (ElementBinary*) task->args;
FFHandler handle = *((FFHandler*) task->local_args);
ElementBinaryMeta* m = new ElementBinaryMeta(handle);
m->op_type = eb->op_type;
m->profiling = eb->profiling;
m->inplace_a = eb->inplace_a;
Domain input_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain output_domain;
if (m->inplace_a) {
assert(regions.size() == 2);
assert(task->regions.size() == regions.size());
output_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(output_domain == input_domain);
} else {
assert(regions.size() == 3);
assert(task->regions.size() == regions.size());
output_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(output_domain == input_domain);
}
cudnnOpTensorOp_t mode;
switch (eb->op_type) {
case OP_EW_ADD:
case OP_EW_SUB:
mode = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
mode = CUDNN_OP_TENSOR_MUL;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetOpTensorDescriptor(m->opDesc, mode,
CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->inputTensor, input_domain));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->outputTensor, output_domain));
return m;
}
void ElementBinary::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
ParallelConfig pc; \
std::string pcname = name; \
ff.config.find_parallel_config(DIM, pcname, pc); \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(ELEMENTBINARY_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementBinary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
if (!inplace_a) {
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
} else {
assert(outputs[0].part == input_lps[0]);
assert(outputs[0].region == inputs[0].region);
}
//launcher.add_region_requirement(
// RegionRequirement(input_grad_lps[0], 0/*projection id*/,
// WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad));
//launcher.add_field(3, FID_DATA);
//if (inputs[0].region_grad != inputs[1].region_grad) {
// regions[4](I/O): input1_grad
// launcher.add_region_requirement(
// RegionRequirement(input_grad_lps[1], 0/*projection id*/,
// WRITE_ONLY, EXCLUSIVE, inputs[1].region_grad));
// launcher.add_field(4, FID_DATA);
//}
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
meta[idx++] = fm.get_result<OpMeta*>(*it); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
__global__
void elewise_binary_forward_kernel(coord_t volume,
const float alpha,
const float beta,
OperatorType type,
const float* in1,
const float* in2,
float* out)
{
switch (type) {
case OP_EW_ADD:
{
CUDA_KERNEL_LOOP(i, volume)
{
out[i] = alpha * (in1[i] + in2[i]) + beta * out[i];
}
break;
}
case OP_EW_SUB:
{
CUDA_KERNEL_LOOP(i, volume)
{
out[i] = alpha * (in1[i] - in2[i]) + beta * out[i];
}
break;
}
case OP_EW_MUL:
{
CUDA_KERNEL_LOOP(i, volume)
{
out[i] = alpha * in1[i] * in2[i] + beta * out[i];
}
break;
}
case OP_EW_DIV:
{
CUDA_KERNEL_LOOP(i, volume)
{
out[i] = alpha * (in1[i] / in2[i]) + beta * out[i];
}
break;
}
default:
assert(false);
}
}
/*static*/
void ElementBinary::forward_kernel(const ElementBinaryMeta* m,
const float* in1_ptr,
const float* in2_ptr,
float* out_ptr)
{
float alpha1 = 1.0f, alpha2 = 1.0f, beta = 0.0f;
switch (m->op_type) {
case OP_EW_SUB:
alpha2 = -1.0f;
break;
case OP_EW_ADD:
case OP_EW_MUL:
break;
default:
assert(false);
}
checkCUDNN(cudnnOpTensor(m->handle.dnn, m->opDesc,
&alpha1, m->inputTensor, in1_ptr,
&alpha2, m->inputTensor, in2_ptr,
&beta, m->outputTensor, out_ptr));
}
/*
regions[0](I): in1
regions[1](I): in2
regions[2](O): output
*/
__host__
void ElementBinary::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
//const ElementBinary* ele = (const ElementBinary*) task->args;
const ElementBinaryMeta* m = *((ElementBinaryMeta**) task->local_args);
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain in2_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(in1_domain == in2_domain);
const float* in1_ptr = NULL, *in2_ptr = NULL;
float *out_ptr = NULL;
if (m->inplace_a) {
assert(regions.size() == 2);
assert(task->regions.size() == 2);
out_ptr = helperGetTensorPointerRW<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
in2_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
in1_ptr = out_ptr;
} else {
assert(regions.size() == 3);
assert(task->regions.size() == 3);
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(out_domain == in1_domain);
in1_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
in2_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
out_ptr = helperGetTensorPointerWO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
}
cudaEvent_t t_start, t_end;
if (m->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
//print_tensor<float>(in1_ptr, in1_domain.get_volume(), "input1:");
//print_tensor<float>(in2_ptr, in2_domain.get_volume(), "input2:");
forward_kernel(m, in1_ptr, in2_ptr, out_ptr);
//print_tensor<float>(out_ptr, in1_domain.get_volume(), "output:");
if (m->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
char const *opName;
switch (m->op_type) {
case OP_EW_ADD:
opName = "Add";
break;
case OP_EW_SUB:
opName = "Sub";
break;
case OP_EW_MUL:
opName = "Mul";
break;
case OP_EW_DIV:
opName = "Div";
break;
default:
assert(false);
}
printf("[%s] forward time (CF) = %.2fms\n", opName, elapsed);
}
}
void ElementBinary::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
if (inplace_a) {
assert(outputs[0].part == input_lps[0]);
assert(outputs[0].region == inputs[0].region);
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
} else {
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
}
runtime->execute_index_space(ctx, launcher);
}
__global__
void elewise_binary_backward_kernel(coord_t volume,
const float alpha,
const float beta,
OperatorType type,
const float* out_grad,
const float* in1,
const float* in2,
float* in1_grad,
float* in2_grad)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case OP_EW_ADD:
{
in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i];
in2_grad[i] = alpha * out_grad[i] + beta * in2_grad[i];
break;
}
case OP_EW_SUB:
{
in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i];
in2_grad[i] = - alpha * out_grad[i] + beta * in2_grad[i];
break;
}
case OP_EW_MUL:
{
in1_grad[i] = alpha * out_grad[i] * in2[i] + beta * in1_grad[i];
in2_grad[i] = alpha * out_grad[i] * in1[i] + beta * in2_grad[i];
break;
}
case OP_EW_DIV:
{
in1_grad[i] = alpha * out_grad[i] / in2[i] + beta * in1_grad[i];
in2_grad[i] = - alpha * out_grad[i] * in1[i] / (in2[i] * in2[i]) + beta * in2_grad[i];
break;
}
default:
assert(false);
}
}
}
/*static*/
void ElementBinary::backward_kernel(const ElementBinaryMeta* m,
const float* out_grad_ptr,
const float* in1_ptr,
const float* in2_ptr,
float* in1_grad_ptr,
float* in2_grad_ptr)
{
float alpha1 = 1.0f, alpha2 = 1.0f, beta = 1.0f;
switch (m->op_type) {
case OP_EW_ADD:
alpha1 = 1.0f;
alpha2 = 0.0f;
break;
case OP_EW_SUB:
alpha1 = -1.0f;
alpha2 = 0.0f;
break;
case OP_EW_MUL:
alpha1 = 1.0f;
alpha2 = 1.0f;
break;
default:
assert(false);
}
checkCUDNN(cudnnOpTensor(m->handle.dnn, m->opDesc,
&alpha1, m->outputTensor, out_grad_ptr,
&alpha2, m->inputTensor, in1_ptr,
&beta, m->inputTensor, in2_grad_ptr));
switch (m->op_type) {
case OP_EW_ADD:
case OP_EW_SUB:
alpha1 = 1.0f;
alpha2 = 0.0f;
break;
case OP_EW_MUL:
alpha1 = 1.0f;
alpha2 = 1.0f;
break;
default:
assert(false);
}
checkCUDNN(cudnnOpTensor(m->handle.dnn, m->opDesc,
&alpha1, m->outputTensor, out_grad_ptr,
&alpha2, m->inputTensor, in2_ptr,
&beta, m->inputTensor, in1_grad_ptr));
}
/*
regions[0](I or I/O): out_grad (I/O if inplace_a)
regions[1](I): in0
regions[2](I/O): in0_grad (Missing if in0_grad = out_grad)
regions[3](I): in1 (Missing if in0 = in1)
regions[4](I/O): in1_grad (Missing if in0=in1)
*/
void ElementBinary::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
//const ElementBinary* ele = (const ElementBinary*) task->args;
const ElementBinaryMeta* m = *((ElementBinaryMeta**) task->local_args);
const float *in0_ptr = NULL, *in1_ptr = NULL, *out_grad_ptr = NULL;
float *in0_grad_ptr = NULL, *in1_grad_ptr = NULL;
Domain out_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
if (m->inplace_a) {
in0_grad_ptr = helperGetTensorPointerRW<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
if (regions.size() == 2 || regions.size() == 4);
assert(task->regions.size() == regions.size());
if (regions.size() == 2) {
Domain in0_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
assert(in0_domain == out_grad_domain);
in0_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
in1_ptr = in0_ptr;
in1_grad_ptr = in0_grad_ptr;
out_grad_ptr = in0_grad_ptr;
} else {
Domain in0_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(in0_domain == out_grad_domain);
assert(in1_domain == out_grad_domain);
in0_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
in1_ptr = helperGetTensorPointerRO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
in1_grad_ptr = helperGetTensorPointerRW<float>(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
out_grad_ptr = in0_grad_ptr;
}
} else {
assert(regions.size() == 3 || regions.size() == 5);
assert(task->regions.size() == regions.size());
out_grad_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
Domain in0_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain in0_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(out_grad_domain == in0_grad_domain);
assert(out_grad_domain == in0_domain);
in0_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
in0_grad_ptr = helperGetTensorPointerRW<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
if (regions.size() == 3) {
// in0 == in1
in1_ptr = in0_ptr;
in1_grad_ptr = in0_grad_ptr;
} else {
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
Domain in1_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[4].region.get_index_space());
assert(out_grad_domain == in1_domain);
assert(out_grad_domain == in1_grad_domain);
in1_ptr = helperGetTensorPointerRO<float>(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
in1_grad_ptr = helperGetTensorPointerRW<float>(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
}
}
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
backward_kernel(m, out_grad_ptr, in0_ptr, in1_ptr, in0_grad_ptr, in1_grad_ptr);
//elewise_binary_backward_kernel<<<GET_BLOCKS(out_grad_domain.get_volume()), CUDA_NUM_THREADS>>>(
//out_grad_domain.get_volume(), alpha, alpha, ele->op_type, out_grad_ptr, in1_ptr, in2_ptr,
//in1_grad_ptr, in2_grad_ptr);
}
void ElementBinary::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(ELEMENTBINARY_BWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
if (inplace_a) {
// regions[0](I/O): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_WRITE, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I): input0
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(1, FID_DATA);
if (inputs[0].region == inputs[1].region) {
// regions[3](I): input1
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(2, FID_DATA);
// regions[4](I/O): input1_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region_grad));
launcher.add_field(3, FID_DATA);
}
} else {
// regions[0](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I): input0
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(1, FID_DATA);
// regions[2](I/O): input0_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(2, FID_DATA);
if (inputs[0].region == inputs[1].region) {
// regions[3](I): input1
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(3, FID_DATA);
// regions[4](I/O): input1_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region_grad));
launcher.add_field(4, FID_DATA);
}
}
runtime->execute_index_space(ctx, launcher);
}
ElementBinaryMeta::ElementBinaryMeta(FFHandler handler)
: OpMeta(handler)
{
checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor));
checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor));
checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc));
op_type = OP_ANY;
}
bool ElementBinary::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
Tensor sub_output, sub_input1, sub_input0;
if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type))
return false;
if (!inputs[0].get_input_sub_tensor(pc, sub_input0, op_type))
return false;
if (!inputs[1].get_input_sub_tensor(pc, sub_input1, op_type))
return false;
ElementBinaryMeta* m = sim->ele_binary_meta;
m->op_type = op_type;
cudnnOpTensorOp_t mode;
switch (op_type) {
case OP_EW_ADD:
case OP_EW_SUB:
mode = CUDNN_OP_TENSOR_ADD;
break;
case OP_EW_MUL:
mode = CUDNN_OP_TENSOR_MUL;
break;
default:
assert(false);
}
checkCUDNN(cudnnSetOpTensorDescriptor(m->opDesc, mode,
CUDNN_DATA_FLOAT, CUDNN_PROPAGATE_NAN));
Domain input_domain = sub_input0.get_domain();
Domain output_domain = sub_output.get_domain();
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->inputTensor, input_domain));
checkCUDNN(cudnnSetTensorDescriptorFromDomain(m->outputTensor, output_domain));
sim->free_all();
float* input0_ptr = (float*)sim->allocate(sub_input0.get_volume(), DT_FLOAT);
assert(input0_ptr != NULL);
float* input1_ptr = (float*)sim->allocate(sub_input1.get_volume(), DT_FLOAT);
assert(input1_ptr != NULL);
float* output_ptr = NULL;
if (inplace_a) {
output_ptr = input0_ptr;
} else {
output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT);
}
assert(output_ptr != NULL);
std::function<void()> forward, backward;
forward = [&] {
forward_kernel(m, input0_ptr, input1_ptr, output_ptr);
};
if (sim->computationMode == COMP_MODE_TRAINING) {
float* input0_grad_ptr = (float*)sim->allocate(sub_input0.get_volume(), DT_FLOAT);
assert(input0_grad_ptr != NULL);
float* input1_grad_ptr = (float*)sim->allocate(sub_input0.get_volume(), DT_FLOAT);
assert(input1_grad_ptr != NULL);
float* output_grad_ptr = NULL;
if (inplace_a) {
output_grad_ptr = input0_grad_ptr;
} else {
output_grad_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT);
}
assert(output_grad_ptr != NULL);
backward = [&] {
backward_kernel(m, output_grad_ptr, input0_ptr, input1_ptr, input0_grad_ptr, input1_grad_ptr);
};
}
inner_measure_operator_cost(sim, forward, backward, cost_metrics);
if (sim->computationMode == COMP_MODE_TRAINING) {
printf("[Measure Elewise Binary] name(%s) num_elements(%zu) forward_time(%.4lf) backward_time(%.4lf)\n",
name, sub_output.get_volume(),
cost_metrics.forward_time,
cost_metrics.backward_time);
} else {
printf("[Measure Elewise Binary] name(%s) num_elements(%zu) forward_time(%.4lf)\n",
name, sub_output.get_volume(),
cost_metrics.forward_time);
}
return true;
}
|
e061128ec0eb2cacf1a6d323848e771615dcafb2.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<stdio.h>
#include<vector>
#include<array>
#include<cassert>
#include<chrono>
#include<cmath>
#include "hip/hip_runtime_api.h"
#include "matrix.h"
#include "ReadWriteData.h"
#include "header.h"
#include "factorization.h"
#include "parILU_0.h"
namespace{
__device__ __forceinline__ void parilu0_sweep_for_batch_entry_approach2(const int nnz, const double* const A_vals , double* const L_vals , double* const U_vals ,
const int* const dependencies , const int* const nz_ptrs)
{
for(int el = threadIdx.x ; el < nnz; el+= blockDim.x) //non-coalesced access but some data locality
{
double diag_val = 1;
int st = nz_ptrs[el];
int end = nz_ptrs[el + 1] - 1;
bool has_diag_dependency = (end + 1- st )%2 == 0 ? true : false;
double sum = 0;
for(int idx = st + 1; idx <= end - 1; idx += 2)
{
sum += L_vals[dependencies[idx]] * U_vals[dependencies[idx + 1]];
}
if(has_diag_dependency == true)
{
diag_val = U_vals[dependencies[end]];
}
double to_write = (A_vals[el] - sum)/diag_val;
if(has_diag_dependency == true)
{
L_vals[dependencies[st]] = to_write;
}
else
{
U_vals[dependencies[st]] = to_write;
}
}
}
__global__ void compute_parilu_0_approach2_kernel(const int npages , const int nrows, const int nnz,
const double* const values, const int L_nnz, double* const L_values,
const int U_nnz, double* const U_values , const int num_iterations , const int* const dependencies, const int* const nz_ptrs)
{
for(int page_id = blockIdx.x; page_id < npages; page_id += gridDim.x)
{
extern __shared__ double shared_mem[];
double* L_values_sh = shared_mem;
double* U_values_sh = L_values_sh + L_nnz;
// __shared__ double L_values_sh[MAX_NUM_NZ];
// __shared__ double U_values_sh[MAX_NUM_NZ]; or MAX_NUM_NZ + MAX_NUM_ROWS to account for diagonal addition in case there are some missing diagonal elements.
for(int i = threadIdx.x ; i < L_nnz ; i += blockDim.x)
{
L_values_sh[i] = *(L_values + page_id * L_nnz + i);
}
for(int i = threadIdx.x ; i < U_nnz ; i += blockDim.x)
{
U_values_sh[i] = *(U_values + page_id * U_nnz + i);
}
__syncthreads();
for(int iter = 0; iter < num_iterations; iter++)
{
// parilu0_sweep_for_batch_entry_approach2(nnz, values + page_id * nnz , L_values + page_id * L_nnz , U_values + page_id * U_nnz ,
// dependencies , nz_ptrs);
parilu0_sweep_for_batch_entry_approach2(nnz, values + page_id * nnz , L_values_sh , U_values_sh,
dependencies , nz_ptrs);
__syncthreads();
}
for(int i = threadIdx.x ; i < L_nnz ; i += blockDim.x)
{
*(L_values + page_id * L_nnz + i) = L_values_sh[i];
}
for(int i = threadIdx.x ; i < U_nnz ; i += blockDim.x)
{
*(U_values + page_id * U_nnz + i) = U_values_sh[i];
}
}
}
void create_dependency_graph_parilu(const PagedCSRMatrices & A_pages, std::vector<int> & dependencies , std::vector<int> & nz_ptrs,
const PagedCSRMatrices & L_pages , const PagedCSRMatrices & U_pages)
{
const int nrows = A_pages.GetNumRows();
int* const row_ptrs = new int[A_pages.GetNumRows() + 1];
int* const col_idxs = new int[A_pages.GetNumNz()];
int* const L_row_ptrs = new int[L_pages.GetNumRows() + 1];
int* const L_col_idxs = new int[L_pages.GetNumNz()];
int* const U_row_ptrs = new int[U_pages.GetNumRows() + 1];
int* const U_col_idxs = new int[U_pages.GetNumNz()];
hipMemcpy(row_ptrs , A_pages.GetPtrToGpuRowPtrs(), sizeof(int) * (A_pages.GetNumRows() + 1) , hipMemcpyDeviceToHost);
hipMemcpy(col_idxs , A_pages.GetPtrToGpuColInd() , sizeof(int) * A_pages.GetNumNz(), hipMemcpyDeviceToHost );
hipMemcpy(L_row_ptrs , L_pages.GetPtrToGpuRowPtrs(), sizeof(int) * (L_pages.GetNumRows() + 1) , hipMemcpyDeviceToHost );
hipMemcpy(L_col_idxs , L_pages.GetPtrToGpuColInd() , sizeof(int) * L_pages.GetNumNz() , hipMemcpyDeviceToHost );
hipMemcpy(U_row_ptrs , U_pages.GetPtrToGpuRowPtrs(), sizeof(int) * (U_pages.GetNumRows() + 1) , hipMemcpyDeviceToHost );
hipMemcpy(U_col_idxs , U_pages.GetPtrToGpuColInd() , sizeof(int) * U_pages.GetNumNz() , hipMemcpyDeviceToHost );
nz_ptrs[0] = 0;
for(int row_index = 0; row_index < nrows ; row_index++ )
{
const int row_start = row_ptrs[row_index];
const int row_end = row_ptrs[row_index + 1];
for(int loc = row_start; loc < row_end; loc++)
{
const int col_index = col_idxs[loc];
if(row_index > col_index)
{
//find corr. index in L
const int L_idx = loc - row_start + L_row_ptrs[row_index];
dependencies.push_back(L_idx);
// printf("\n write in L: %d \n", L_idx);
}
else
{
//find corr. index in U
const int U_idx = ( U_row_ptrs[row_index + 1] - 1) - (row_end -1 - loc );
dependencies.push_back(U_idx);
// printf("\n write in U: %d , U_row_ptrs[row_index + 1] : %d , row_end -1 : %d , loc: %d \n", U_idx, U_row_ptrs[row_index + 1] , row_end -1 , loc );
}
const int k_max = ::min(row_index , col_index) - 1;
int num_dependencies = 0;
for(int l_idx = L_row_ptrs[row_index]; l_idx < L_row_ptrs[row_index + 1]; l_idx++)
{
const int k = L_col_idxs[l_idx];
if(k > k_max)
{
continue;
}
//find corresponding u at position k,col_index
for(int u_idx = U_row_ptrs[k]; u_idx < U_row_ptrs[k + 1]; u_idx++)
{
if(U_col_idxs[u_idx] == col_index)
{
dependencies.push_back(l_idx);
dependencies.push_back(u_idx);
num_dependencies += 2;
}
}
}
if(row_index > col_index)
{
const int diag_loc = U_row_ptrs[col_index];
//std::cout << "line 346: " << col_index << std::endl;
dependencies.push_back(diag_loc);
num_dependencies++;
}
nz_ptrs[loc + 1] = nz_ptrs[loc] + num_dependencies + 1;
}
}
delete[] row_ptrs;
delete[] col_idxs;
delete[] L_row_ptrs;
delete[] L_col_idxs;
delete[] U_row_ptrs;
delete[] U_col_idxs;
}
void Print_Parilu_Dep_Graph(const std::vector<int> & dependencies_cpu , const std::vector<int> & nz_ptrs_cpu)
{
for(int loc = 0; loc < nz_ptrs_cpu.size() - 1 ; loc++)
{
const int start = nz_ptrs_cpu[loc];
const int end = nz_ptrs_cpu[loc + 1];
printf("\n\n Dependencies for element at loc = %d are: ", loc);
if( (end - start)%2 == 0)
{
printf("\nwrite in L\n");
}
else
{
printf("\n write in U \n");
}
printf("\n To write at idx: %d \n", dependencies_cpu[start]);
for(int i = start + 1; i < end; i++)
{
printf("\n %d ", dependencies_cpu[i]);
}
}
}
void ParILU0_Approach2(const PagedCSRMatrices & A_sorted_Pages, const PagedCSRMatrices & L_pages, const PagedCSRMatrices & U_pages, const int num_iterations )
{
std::vector<int> dependencies_cpu;
std::vector<int > nz_ptrs_cpu(A_sorted_Pages.GetNumNz() + 1);
create_dependency_graph_parilu(A_sorted_Pages, dependencies_cpu, nz_ptrs_cpu, L_pages , U_pages);
int* dependencies = nullptr;
int* nz_ptrs = nullptr;
hipMalloc((void**)&dependencies , dependencies_cpu.size() * sizeof(int));
hipMemcpy(dependencies , dependencies_cpu.data() , dependencies_cpu.size() * sizeof(int) , hipMemcpyHostToDevice );
hipMalloc((void**)&nz_ptrs , nz_ptrs_cpu.size() * sizeof(int) );
hipMemcpy( nz_ptrs , nz_ptrs_cpu.data() , nz_ptrs_cpu.size() * sizeof(int) , hipMemcpyHostToDevice );
//Print_Parilu_Dep_Graph(dependencies_cpu , nz_ptrs_cpu);
dim3 block(THREADS_PER_BLOCK);
int grid_dim = A_sorted_Pages.GetNumPages();
dim3 grid( grid_dim );
const int dynamic_shared_mem_size = sizeof(double) * ( L_pages.GetNumNz() + U_pages.GetNumNz());
hipLaunchKernelGGL(( compute_parilu_0_approach2_kernel) , dim3(grid) , dim3(block), dynamic_shared_mem_size , 0, A_sorted_Pages.GetNumPages(), A_sorted_Pages.GetNumRows(), A_sorted_Pages.GetNumNz(),
A_sorted_Pages.GetPtrToGpuValues(), L_pages.GetNumNz(), L_pages.GetPtrToGpuValues(),
U_pages.GetNumNz(), U_pages.GetPtrToGpuValues() , num_iterations , dependencies, nz_ptrs );
hipFree(dependencies);
hipFree(nz_ptrs);
}
} //unnamed namespace
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
void ParILU_0_Factorization_Gpu(const PagedCSRMatrices & A_pages , PagedCSRMatrices & L_pages, PagedCSRMatrices & U_pages, const int num_iterations)
{
//first assert matrix is square
assert(A_pages.GetNumCols() == A_pages.GetNumRows());
PagedCSRMatrices A_sorted_Pages;
//We would want to use copy assignment here... or even a copy constructor. implement it later...
//copy A to A_sorted
Copy_Gpu_PagedCSRMatrices(A_pages , A_sorted_Pages); //TODO: avoid an extra copy here... if matrix is already sorted.
//SortCSRMatrix(A_sorted_Pages); if unsorted, pls sort the paged matrix before proceeding. (All these matrices are already sorted.(sorted while storing))
int* diag_info = nullptr;
hipMalloc((void**)&diag_info, sizeof(int) * A_sorted_Pages.GetNumRows());
int num_missing_diagonal_eles = Count_Missing_Diagonal_Elements(A_sorted_Pages , diag_info);
if(num_missing_diagonal_eles > 0)
{
PagedCSRMatrices New_A_sorted_Pages;
Add_Missing_Diagonal_Elements(New_A_sorted_Pages, A_sorted_Pages, diag_info , num_missing_diagonal_eles);
Copy_Gpu_PagedCSRMatrices(New_A_sorted_Pages , A_sorted_Pages); //TODO: avoid an extra copy here
}
// std::cout << "\n\nMATRIX AFTER ADDITION OF DIAGONAL ELEMENTS: " << std::endl;
// PrintPagedCSRMatrix(A_sorted_Pages);
//continue to use A_sorted here...
Find_locations_of_diagonal_elements(A_sorted_Pages, diag_info);
//std::cout << "\n\nLocn of diagonal elements:" << std::endl;
//print_kernel<<< 1, 1 >>>(A_sorted_Pages.GetNumRows(), diag_info);
//hipDeviceSynchronize();
Update_row_pointers_L_and_U_and_Allocate_Memory(A_sorted_Pages , diag_info, L_pages, U_pages);
Fill_L_and_U_col_idxs_and_vals(A_sorted_Pages, L_pages, U_pages);
//Now L_pages and U_pages are initialized... (Initial guess is ready)
hipProfilerStart();
//approach 2
ParILU0_Approach2(A_sorted_Pages , L_pages , U_pages , num_iterations);
hipProfilerStop();
// std::cout << "\n\nMATRIX L: " << std::endl;
// PrintPagedCSRMatrix(L_pages);
// std::cout << "\n\nMATRIX U: " << std::endl;
// PrintPagedCSRMatrix(U_pages);
hipFree(diag_info);
hipDeviceSynchronize(); //for timing purpose
}
| e061128ec0eb2cacf1a6d323848e771615dcafb2.cu | #include<iostream>
#include<stdio.h>
#include<vector>
#include<array>
#include<cassert>
#include<chrono>
#include<cmath>
#include "cuda_profiler_api.h"
#include "matrix.h"
#include "ReadWriteData.h"
#include "header.h"
#include "factorization.h"
#include "parILU_0.h"
namespace{
__device__ __forceinline__ void parilu0_sweep_for_batch_entry_approach2(const int nnz, const double* const A_vals , double* const L_vals , double* const U_vals ,
const int* const dependencies , const int* const nz_ptrs)
{
for(int el = threadIdx.x ; el < nnz; el+= blockDim.x) //non-coalesced access but some data locality
{
double diag_val = 1;
int st = nz_ptrs[el];
int end = nz_ptrs[el + 1] - 1;
bool has_diag_dependency = (end + 1- st )%2 == 0 ? true : false;
double sum = 0;
for(int idx = st + 1; idx <= end - 1; idx += 2)
{
sum += L_vals[dependencies[idx]] * U_vals[dependencies[idx + 1]];
}
if(has_diag_dependency == true)
{
diag_val = U_vals[dependencies[end]];
}
double to_write = (A_vals[el] - sum)/diag_val;
if(has_diag_dependency == true)
{
L_vals[dependencies[st]] = to_write;
}
else
{
U_vals[dependencies[st]] = to_write;
}
}
}
__global__ void compute_parilu_0_approach2_kernel(const int npages , const int nrows, const int nnz,
const double* const values, const int L_nnz, double* const L_values,
const int U_nnz, double* const U_values , const int num_iterations , const int* const dependencies, const int* const nz_ptrs)
{
for(int page_id = blockIdx.x; page_id < npages; page_id += gridDim.x)
{
extern __shared__ double shared_mem[];
double* L_values_sh = shared_mem;
double* U_values_sh = L_values_sh + L_nnz;
// __shared__ double L_values_sh[MAX_NUM_NZ];
// __shared__ double U_values_sh[MAX_NUM_NZ]; or MAX_NUM_NZ + MAX_NUM_ROWS to account for diagonal addition in case there are some missing diagonal elements.
for(int i = threadIdx.x ; i < L_nnz ; i += blockDim.x)
{
L_values_sh[i] = *(L_values + page_id * L_nnz + i);
}
for(int i = threadIdx.x ; i < U_nnz ; i += blockDim.x)
{
U_values_sh[i] = *(U_values + page_id * U_nnz + i);
}
__syncthreads();
for(int iter = 0; iter < num_iterations; iter++)
{
// parilu0_sweep_for_batch_entry_approach2(nnz, values + page_id * nnz , L_values + page_id * L_nnz , U_values + page_id * U_nnz ,
// dependencies , nz_ptrs);
parilu0_sweep_for_batch_entry_approach2(nnz, values + page_id * nnz , L_values_sh , U_values_sh,
dependencies , nz_ptrs);
__syncthreads();
}
for(int i = threadIdx.x ; i < L_nnz ; i += blockDim.x)
{
*(L_values + page_id * L_nnz + i) = L_values_sh[i];
}
for(int i = threadIdx.x ; i < U_nnz ; i += blockDim.x)
{
*(U_values + page_id * U_nnz + i) = U_values_sh[i];
}
}
}
void create_dependency_graph_parilu(const PagedCSRMatrices & A_pages, std::vector<int> & dependencies , std::vector<int> & nz_ptrs,
const PagedCSRMatrices & L_pages , const PagedCSRMatrices & U_pages)
{
const int nrows = A_pages.GetNumRows();
int* const row_ptrs = new int[A_pages.GetNumRows() + 1];
int* const col_idxs = new int[A_pages.GetNumNz()];
int* const L_row_ptrs = new int[L_pages.GetNumRows() + 1];
int* const L_col_idxs = new int[L_pages.GetNumNz()];
int* const U_row_ptrs = new int[U_pages.GetNumRows() + 1];
int* const U_col_idxs = new int[U_pages.GetNumNz()];
cudaMemcpy(row_ptrs , A_pages.GetPtrToGpuRowPtrs(), sizeof(int) * (A_pages.GetNumRows() + 1) , cudaMemcpyDeviceToHost);
cudaMemcpy(col_idxs , A_pages.GetPtrToGpuColInd() , sizeof(int) * A_pages.GetNumNz(), cudaMemcpyDeviceToHost );
cudaMemcpy(L_row_ptrs , L_pages.GetPtrToGpuRowPtrs(), sizeof(int) * (L_pages.GetNumRows() + 1) , cudaMemcpyDeviceToHost );
cudaMemcpy(L_col_idxs , L_pages.GetPtrToGpuColInd() , sizeof(int) * L_pages.GetNumNz() , cudaMemcpyDeviceToHost );
cudaMemcpy(U_row_ptrs , U_pages.GetPtrToGpuRowPtrs(), sizeof(int) * (U_pages.GetNumRows() + 1) , cudaMemcpyDeviceToHost );
cudaMemcpy(U_col_idxs , U_pages.GetPtrToGpuColInd() , sizeof(int) * U_pages.GetNumNz() , cudaMemcpyDeviceToHost );
nz_ptrs[0] = 0;
for(int row_index = 0; row_index < nrows ; row_index++ )
{
const int row_start = row_ptrs[row_index];
const int row_end = row_ptrs[row_index + 1];
for(int loc = row_start; loc < row_end; loc++)
{
const int col_index = col_idxs[loc];
if(row_index > col_index)
{
//find corr. index in L
const int L_idx = loc - row_start + L_row_ptrs[row_index];
dependencies.push_back(L_idx);
// printf("\n write in L: %d \n", L_idx);
}
else
{
//find corr. index in U
const int U_idx = ( U_row_ptrs[row_index + 1] - 1) - (row_end -1 - loc );
dependencies.push_back(U_idx);
// printf("\n write in U: %d , U_row_ptrs[row_index + 1] : %d , row_end -1 : %d , loc: %d \n", U_idx, U_row_ptrs[row_index + 1] , row_end -1 , loc );
}
const int k_max = std::min(row_index , col_index) - 1;
int num_dependencies = 0;
for(int l_idx = L_row_ptrs[row_index]; l_idx < L_row_ptrs[row_index + 1]; l_idx++)
{
const int k = L_col_idxs[l_idx];
if(k > k_max)
{
continue;
}
//find corresponding u at position k,col_index
for(int u_idx = U_row_ptrs[k]; u_idx < U_row_ptrs[k + 1]; u_idx++)
{
if(U_col_idxs[u_idx] == col_index)
{
dependencies.push_back(l_idx);
dependencies.push_back(u_idx);
num_dependencies += 2;
}
}
}
if(row_index > col_index)
{
const int diag_loc = U_row_ptrs[col_index];
//std::cout << "line 346: " << col_index << std::endl;
dependencies.push_back(diag_loc);
num_dependencies++;
}
nz_ptrs[loc + 1] = nz_ptrs[loc] + num_dependencies + 1;
}
}
delete[] row_ptrs;
delete[] col_idxs;
delete[] L_row_ptrs;
delete[] L_col_idxs;
delete[] U_row_ptrs;
delete[] U_col_idxs;
}
void Print_Parilu_Dep_Graph(const std::vector<int> & dependencies_cpu , const std::vector<int> & nz_ptrs_cpu)
{
for(int loc = 0; loc < nz_ptrs_cpu.size() - 1 ; loc++)
{
const int start = nz_ptrs_cpu[loc];
const int end = nz_ptrs_cpu[loc + 1];
printf("\n\n Dependencies for element at loc = %d are: ", loc);
if( (end - start)%2 == 0)
{
printf("\nwrite in L\n");
}
else
{
printf("\n write in U \n");
}
printf("\n To write at idx: %d \n", dependencies_cpu[start]);
for(int i = start + 1; i < end; i++)
{
printf("\n %d ", dependencies_cpu[i]);
}
}
}
void ParILU0_Approach2(const PagedCSRMatrices & A_sorted_Pages, const PagedCSRMatrices & L_pages, const PagedCSRMatrices & U_pages, const int num_iterations )
{
std::vector<int> dependencies_cpu;
std::vector<int > nz_ptrs_cpu(A_sorted_Pages.GetNumNz() + 1);
create_dependency_graph_parilu(A_sorted_Pages, dependencies_cpu, nz_ptrs_cpu, L_pages , U_pages);
int* dependencies = nullptr;
int* nz_ptrs = nullptr;
cudaMalloc((void**)&dependencies , dependencies_cpu.size() * sizeof(int));
cudaMemcpy(dependencies , dependencies_cpu.data() , dependencies_cpu.size() * sizeof(int) , cudaMemcpyHostToDevice );
cudaMalloc((void**)&nz_ptrs , nz_ptrs_cpu.size() * sizeof(int) );
cudaMemcpy( nz_ptrs , nz_ptrs_cpu.data() , nz_ptrs_cpu.size() * sizeof(int) , cudaMemcpyHostToDevice );
//Print_Parilu_Dep_Graph(dependencies_cpu , nz_ptrs_cpu);
dim3 block(THREADS_PER_BLOCK);
int grid_dim = A_sorted_Pages.GetNumPages();
dim3 grid( grid_dim );
const int dynamic_shared_mem_size = sizeof(double) * ( L_pages.GetNumNz() + U_pages.GetNumNz());
compute_parilu_0_approach2_kernel <<< grid , block, dynamic_shared_mem_size >>>(A_sorted_Pages.GetNumPages(), A_sorted_Pages.GetNumRows(), A_sorted_Pages.GetNumNz(),
A_sorted_Pages.GetPtrToGpuValues(), L_pages.GetNumNz(), L_pages.GetPtrToGpuValues(),
U_pages.GetNumNz(), U_pages.GetPtrToGpuValues() , num_iterations , dependencies, nz_ptrs );
cudaFree(dependencies);
cudaFree(nz_ptrs);
}
} //unnamed namespace
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------
void ParILU_0_Factorization_Gpu(const PagedCSRMatrices & A_pages , PagedCSRMatrices & L_pages, PagedCSRMatrices & U_pages, const int num_iterations)
{
//first assert matrix is square
assert(A_pages.GetNumCols() == A_pages.GetNumRows());
PagedCSRMatrices A_sorted_Pages;
//We would want to use copy assignment here... or even a copy constructor. implement it later...
//copy A to A_sorted
Copy_Gpu_PagedCSRMatrices(A_pages , A_sorted_Pages); //TODO: avoid an extra copy here... if matrix is already sorted.
//SortCSRMatrix(A_sorted_Pages); if unsorted, pls sort the paged matrix before proceeding. (All these matrices are already sorted.(sorted while storing))
int* diag_info = nullptr;
cudaMalloc((void**)&diag_info, sizeof(int) * A_sorted_Pages.GetNumRows());
int num_missing_diagonal_eles = Count_Missing_Diagonal_Elements(A_sorted_Pages , diag_info);
if(num_missing_diagonal_eles > 0)
{
PagedCSRMatrices New_A_sorted_Pages;
Add_Missing_Diagonal_Elements(New_A_sorted_Pages, A_sorted_Pages, diag_info , num_missing_diagonal_eles);
Copy_Gpu_PagedCSRMatrices(New_A_sorted_Pages , A_sorted_Pages); //TODO: avoid an extra copy here
}
// std::cout << "\n\nMATRIX AFTER ADDITION OF DIAGONAL ELEMENTS: " << std::endl;
// PrintPagedCSRMatrix(A_sorted_Pages);
//continue to use A_sorted here...
Find_locations_of_diagonal_elements(A_sorted_Pages, diag_info);
//std::cout << "\n\nLocn of diagonal elements:" << std::endl;
//print_kernel<<< 1, 1 >>>(A_sorted_Pages.GetNumRows(), diag_info);
//cudaDeviceSynchronize();
Update_row_pointers_L_and_U_and_Allocate_Memory(A_sorted_Pages , diag_info, L_pages, U_pages);
Fill_L_and_U_col_idxs_and_vals(A_sorted_Pages, L_pages, U_pages);
//Now L_pages and U_pages are initialized... (Initial guess is ready)
cudaProfilerStart();
//approach 2
ParILU0_Approach2(A_sorted_Pages , L_pages , U_pages , num_iterations);
cudaProfilerStop();
// std::cout << "\n\nMATRIX L: " << std::endl;
// PrintPagedCSRMatrix(L_pages);
// std::cout << "\n\nMATRIX U: " << std::endl;
// PrintPagedCSRMatrix(U_pages);
cudaFree(diag_info);
cudaDeviceSynchronize(); //for timing purpose
}
|
6d6a799cefc069a2fbe1a82e3e044d997a897515.hip | // !!! This is a file automatically generated by hipify!!!
/* Example sobel code for ECE574 -- Spring 2017 */
/* By Vince Weaver <vincent.weaver@maine.edu> */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <errno.h>
#include <math.h>
#include <jpeglib.h>
#include <hip/hip_runtime.h>
#include <papi.h>
/* Filters */
static int sobel_x_filter[3][3]={{-1,0,+1},{-2,0,+2},{-1,0,+1}};
static int sobel_y_filter[3][3]={{-1,-2,-1},{0,0,0},{1,2,+1}};
/* Structure describing the image */
struct image_t {
int x;
int y;
int depth; /* bytes */
unsigned char *pixels;
};
struct convolve_data_t {
struct image_t *old;
struct image_t *newt;
int (*filter)[3][3];
int ystart;
int yend;
};
#if 0
/* For the generic convolve, you will also need to upload the sobelx and sobely matrices to the
device. A simple array of 9 ints is probably best. */
__global__ //fine grained
void cuda_generic_convolve (int n, char *in, int *matrix, char *out) {
//Can get block number with blockIdx.x and thread index with threadIdx.x
}
#endif
/* How to get the grid/block/thread count right:
int blockId = blockIdx.y* gridDim.x+ blockIdx.x;
int i = blockId * blockDim.x + threadIdx.x; */
__global__ //coarse grained
void cuda_combine (int n, unsigned char *in_x,unsigned char *in_y,unsigned char *out) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
out[i]=sqrt(double(
(in_x[i]*in_x[i])+
(in_y[i]*in_y[i])
));
if (out[i]>255) out[i]=255;
// if (out[i]<0) out[i]=0;
// out[i]=0xff;
}
/* very inefficient convolve code */
static void *generic_convolve(void *argument) {
int x,y,k,l,d;
uint32_t color;
int sum,depth,width;
struct image_t *old;
struct image_t *newt;
int (*filter)[3][3];
struct convolve_data_t *data;
int ystart, yend;
/* Convert from void pointer to the actual data type */
data=(struct convolve_data_t *)argument;
old=data->old;
newt=data->newt;
filter=data->filter;
ystart=data->ystart;
yend=data->yend;
depth=old->depth;
width=old->x*old->depth;
if (ystart==0) ystart=1;
if (yend==old->y) yend=old->y-1;
for(d=0;d<3;d++) {
for(x=1;x<old->x-1;x++) {
for(y=ystart;y<yend;y++) {
sum=0;
for(k=-1;k<2;k++) {
for(l=-1;l<2;l++) {
color=old->pixels[((y+l)*width)+(x*depth+d+k*depth)];
sum+=color * (*filter)[k+1][l+1];
}
}
if (sum<0) sum=0;
if (sum>255) sum=255;
newt->pixels[(y*width)+x*depth+d]=sum;
}
}
}
return NULL;
}
// static int combine(struct image_t *s_x,
// struct image_t *s_y,
// struct image_t *newt) {
// int i;
// int out;
//
// for(i=0;i<( s_x->depth * s_x->x * s_x->y );i++) {
//
// out=sqrt(
// (s_x->pixels[i]*s_x->pixels[i])+
// (s_y->pixels[i]*s_y->pixels[i])
// );
// if (out>255) out=255;
// if (out<0) out=0;
// newt->pixels[i]=out;
// }
//
// return 0;
// }
static int load_jpeg(char *filename, struct image_t *image) {
FILE *fff;
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPROW output_data;
unsigned int scanline_len;
int scanline_count=0;
fff=fopen(filename,"rb");
if (fff==NULL) {
fprintf(stderr, "Could not load %s: %s\n",
filename, strerror(errno));
return -1;
}
/* set up jpeg error routines */
cinfo.err = jpeg_std_error(&jerr);
/* Initialize cinfo */
jpeg_create_decompress(&cinfo);
/* Set input file */
jpeg_stdio_src(&cinfo, fff);
/* read header */
jpeg_read_header(&cinfo, TRUE);
/* Start decompressor */
jpeg_start_decompress(&cinfo);
printf("output_width=%d, output_height=%d, output_components=%d\n",
cinfo.output_width,
cinfo.output_height,
cinfo.output_components);
image->x=cinfo.output_width;
image->y=cinfo.output_height;
image->depth=cinfo.output_components;
scanline_len = cinfo.output_width * cinfo.output_components;
image->pixels=(unsigned char *)malloc(cinfo.output_width * cinfo.output_height * cinfo.output_components);
while (scanline_count < cinfo.output_height) {
output_data = (image->pixels + (scanline_count * scanline_len));
jpeg_read_scanlines(&cinfo, &output_data, 1);
scanline_count++;
}
/* Finish decompressing */
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(fff);
return 0;
}
static int store_jpeg(const char *filename, struct image_t *image) {
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
int quality=90; /* % */
int i;
FILE *fff;
JSAMPROW row_pointer[1];
int row_stride;
/* setup error handler */
cinfo.err = jpeg_std_error(&jerr);
/* initialize jpeg compression object */
jpeg_create_compress(&cinfo);
/* Open file */
fff = fopen(filename, "wb");
if (fff==NULL) {
fprintf(stderr, "can't open %s: %s\n",
filename,strerror(errno));
return -1;
}
jpeg_stdio_dest(&cinfo, fff);
/* Set compression parameters */
cinfo.image_width = image->x;
cinfo.image_height = image->y;
cinfo.input_components = image->depth;
cinfo.in_color_space = JCS_RGB;
jpeg_set_defaults(&cinfo);
jpeg_set_quality(&cinfo, quality, TRUE);
/* start compressing */
jpeg_start_compress(&cinfo, TRUE);
row_stride=image->x*image->depth;
for(i=0;i<image->y;i++) {
row_pointer[0] = & image->pixels[i * row_stride];
jpeg_write_scanlines(&cinfo, row_pointer, 1);
}
/* finish compressing */
jpeg_finish_compress(&cinfo);
/* close file */
fclose(fff);
/* clean up */
jpeg_destroy_compress(&cinfo);
return 0;
}
int main(int argc, char **argv) {
struct image_t image,sobel_x,sobel_y,new_image;
struct convolve_data_t sobel_data[2];
long long start_time,load_time,convolve_time;
long long combine_after,combine_before;
long long copy_before,copy_after,copy2_before,copy2_after;
long long store_after,store_before;
long long cudaMalloc_after,cudaMalloc_before;
unsigned char *dev_x, *dev_y,*out;// Pointer to host & device arrays
long long n;// Number of pixels in a picture
/* Check command line usage */
if (argc<2) {
fprintf(stderr,"Usage: %s image_file\n",argv[0]);
return -1;
}
PAPI_library_init(PAPI_VER_CURRENT);
start_time=PAPI_get_real_usec();
/* Load an image */
load_jpeg(argv[1],&image);
load_time=PAPI_get_real_usec();
/* Allocate device buffers for sobelx, sobely, and the output using hipMalloc() */
/* Allocate space for output image */
new_image.x=image.x;
new_image.y=image.y;
new_image.depth=image.depth;
new_image.pixels=(unsigned char *)malloc(image.x*image.y*image.depth*sizeof(char));
// new_image.pixels=(unsigned char *)hipMalloc(image.x*image.y*image.depth*sizeof(char));
/* Allocate space for output image */
sobel_x.x=image.x;
sobel_x.y=image.y;
sobel_x.depth=image.depth;
sobel_x.pixels=(unsigned char *)malloc(image.x*image.y*image.depth*sizeof(char));
// sobel_x.pixels=(unsigned char *)hipMalloc(image.x*image.y*image.depth*sizeof(char));
/* Allocate space for output image */
sobel_y.x=image.x;
sobel_y.y=image.y;
sobel_y.depth=image.depth;
sobel_y.pixels=(unsigned char *)malloc(image.x*image.y*image.depth*sizeof(char));
// sobel_y.pixels=(unsigned char *)hipMalloc(image.x*image.y*image.depth*sizeof(char));
n=image.x*image.y*image.depth*sizeof(char);//number of pixels of the picture
/* PERFORM KERNEL: cuda_generic_convolve */
/* convolution */
sobel_data[0].old=ℑ
sobel_data[0].newt=&sobel_x;
sobel_data[0].filter=&sobel_x_filter;
sobel_data[0].ystart=0;
sobel_data[0].yend=image.y;
generic_convolve((void *)&sobel_data[0]);
//cuda_generic_convolve (int n, char *in, int *matrix, char *out)
// first inside brackets is number of blocks, second is threads per block
// cuda_generic_convolve<<<dimGrid, dimBlock>>>(int n, char *in, int *matrix, char *out);
sobel_data[1].old=ℑ
sobel_data[1].newt=&sobel_y;
sobel_data[1].filter=&sobel_y_filter;
sobel_data[1].ystart=0;
sobel_data[1].yend=image.y;
generic_convolve((void *)&sobel_data[1]);
// cuda_generic_convolve<<<dimGrid, dimBlock>>>(int n, char *in, int *matrix, char *out);
// make the host block until the device is finished
hipDeviceSynchronize();
convolve_time=PAPI_get_real_usec();
/* Allocate arrays on GPU */
cudaMalloc_before=PAPI_get_real_usec();
hipMalloc((void**)&dev_x,n*sizeof(unsigned char));
hipMalloc((void**)&dev_y,n*sizeof(unsigned char));
hipMalloc((void**)&out,n*sizeof(unsigned char));
cudaMalloc_after=PAPI_get_real_usec();
/* Copy the local sobel_x.pixels and sobel_y.pixels to the device using hipMemcpy() */
copy_before=PAPI_get_real_usec();
hipMemcpy(dev_x,sobel_x.pixels,n*sizeof(unsigned char),hipMemcpyHostToDevice);
hipMemcpy(dev_y,sobel_y.pixels,n*sizeof(unsigned char),hipMemcpyHostToDevice);
copy_after=PAPI_get_real_usec();
/* Some hints: to debug that your kernel works, you can first set all output to 0xff and verify you get an all-white image back. */
// new_image.pixels=0xff;
/* Combine to form output */
// combine(&sobel_x,&sobel_y,&new_image);
// cuda_combine (int n, unsigned char *in_x, unsigned char *in_y, unsigned char *out)
// first inside brackets is number of blocks, second is threads per block
combine_before=PAPI_get_real_usec();
hipLaunchKernelGGL(( cuda_combine), dim3((n+256)/256), dim3(256), 0, 0, n,dev_x,dev_y,out);
combine_after=PAPI_get_real_usec();
/* Copy the results back into new_image.pixels using hipMemcpy() (be sure to get the direction right) */
copy2_before=PAPI_get_real_usec();
hipMemcpy(new_image.pixels,out,n*sizeof(unsigned char),hipMemcpyDeviceToHost);
copy2_after=PAPI_get_real_usec();
/* REPLACE THE ABOVE WITH YOUR CODE */
/* IT SHOULD ALLOCATE SPACE ON DEVICE */
/* COPY SOBEL_X and SOBEL_Y data to device */
/* RUN THE KERNEL */
/* THEN COPY THE RESULTS BACK */
/* Write data back out to disk */
store_before=PAPI_get_real_usec();
store_jpeg("out.jpg",&new_image);
store_after=PAPI_get_real_usec();
/* Print timing results */
printf("Load time: %lld\n",load_time-start_time);
printf("Convolve time: %lld\n",convolve_time-load_time);
printf("hipMalloc time: %lld\n",cudaMalloc_after-cudaMalloc_before);
printf("Copy time: %lld\n",(copy_after-copy_before)+(copy2_after-copy2_before));
printf("Combine time: %lld\n",combine_after-combine_before);
printf("Store time: %lld\n",store_after-store_before);
printf("Total time = %lld\n",store_after-start_time);
hipFree(dev_x);//hipFree device name
hipFree(dev_y);
hipFree(out);
return 0;
}
| 6d6a799cefc069a2fbe1a82e3e044d997a897515.cu | /* Example sobel code for ECE574 -- Spring 2017 */
/* By Vince Weaver <vincent.weaver@maine.edu> */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <errno.h>
#include <math.h>
#include <jpeglib.h>
#include <cuda.h>
#include <papi.h>
/* Filters */
static int sobel_x_filter[3][3]={{-1,0,+1},{-2,0,+2},{-1,0,+1}};
static int sobel_y_filter[3][3]={{-1,-2,-1},{0,0,0},{1,2,+1}};
/* Structure describing the image */
struct image_t {
int x;
int y;
int depth; /* bytes */
unsigned char *pixels;
};
struct convolve_data_t {
struct image_t *old;
struct image_t *newt;
int (*filter)[3][3];
int ystart;
int yend;
};
#if 0
/* For the generic convolve, you will also need to upload the sobelx and sobely matrices to the
device. A simple array of 9 ints is probably best. */
__global__ //fine grained
void cuda_generic_convolve (int n, char *in, int *matrix, char *out) {
//Can get block number with blockIdx.x and thread index with threadIdx.x
}
#endif
/* How to get the grid/block/thread count right:
int blockId = blockIdx.y* gridDim.x+ blockIdx.x;
int i = blockId * blockDim.x + threadIdx.x; */
__global__ //coarse grained
void cuda_combine (int n, unsigned char *in_x,unsigned char *in_y,unsigned char *out) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
out[i]=sqrt(double(
(in_x[i]*in_x[i])+
(in_y[i]*in_y[i])
));
if (out[i]>255) out[i]=255;
// if (out[i]<0) out[i]=0;
// out[i]=0xff;
}
/* very inefficient convolve code */
static void *generic_convolve(void *argument) {
int x,y,k,l,d;
uint32_t color;
int sum,depth,width;
struct image_t *old;
struct image_t *newt;
int (*filter)[3][3];
struct convolve_data_t *data;
int ystart, yend;
/* Convert from void pointer to the actual data type */
data=(struct convolve_data_t *)argument;
old=data->old;
newt=data->newt;
filter=data->filter;
ystart=data->ystart;
yend=data->yend;
depth=old->depth;
width=old->x*old->depth;
if (ystart==0) ystart=1;
if (yend==old->y) yend=old->y-1;
for(d=0;d<3;d++) {
for(x=1;x<old->x-1;x++) {
for(y=ystart;y<yend;y++) {
sum=0;
for(k=-1;k<2;k++) {
for(l=-1;l<2;l++) {
color=old->pixels[((y+l)*width)+(x*depth+d+k*depth)];
sum+=color * (*filter)[k+1][l+1];
}
}
if (sum<0) sum=0;
if (sum>255) sum=255;
newt->pixels[(y*width)+x*depth+d]=sum;
}
}
}
return NULL;
}
// static int combine(struct image_t *s_x,
// struct image_t *s_y,
// struct image_t *newt) {
// int i;
// int out;
//
// for(i=0;i<( s_x->depth * s_x->x * s_x->y );i++) {
//
// out=sqrt(
// (s_x->pixels[i]*s_x->pixels[i])+
// (s_y->pixels[i]*s_y->pixels[i])
// );
// if (out>255) out=255;
// if (out<0) out=0;
// newt->pixels[i]=out;
// }
//
// return 0;
// }
static int load_jpeg(char *filename, struct image_t *image) {
FILE *fff;
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPROW output_data;
unsigned int scanline_len;
int scanline_count=0;
fff=fopen(filename,"rb");
if (fff==NULL) {
fprintf(stderr, "Could not load %s: %s\n",
filename, strerror(errno));
return -1;
}
/* set up jpeg error routines */
cinfo.err = jpeg_std_error(&jerr);
/* Initialize cinfo */
jpeg_create_decompress(&cinfo);
/* Set input file */
jpeg_stdio_src(&cinfo, fff);
/* read header */
jpeg_read_header(&cinfo, TRUE);
/* Start decompressor */
jpeg_start_decompress(&cinfo);
printf("output_width=%d, output_height=%d, output_components=%d\n",
cinfo.output_width,
cinfo.output_height,
cinfo.output_components);
image->x=cinfo.output_width;
image->y=cinfo.output_height;
image->depth=cinfo.output_components;
scanline_len = cinfo.output_width * cinfo.output_components;
image->pixels=(unsigned char *)malloc(cinfo.output_width * cinfo.output_height * cinfo.output_components);
while (scanline_count < cinfo.output_height) {
output_data = (image->pixels + (scanline_count * scanline_len));
jpeg_read_scanlines(&cinfo, &output_data, 1);
scanline_count++;
}
/* Finish decompressing */
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(fff);
return 0;
}
static int store_jpeg(const char *filename, struct image_t *image) {
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
int quality=90; /* % */
int i;
FILE *fff;
JSAMPROW row_pointer[1];
int row_stride;
/* setup error handler */
cinfo.err = jpeg_std_error(&jerr);
/* initialize jpeg compression object */
jpeg_create_compress(&cinfo);
/* Open file */
fff = fopen(filename, "wb");
if (fff==NULL) {
fprintf(stderr, "can't open %s: %s\n",
filename,strerror(errno));
return -1;
}
jpeg_stdio_dest(&cinfo, fff);
/* Set compression parameters */
cinfo.image_width = image->x;
cinfo.image_height = image->y;
cinfo.input_components = image->depth;
cinfo.in_color_space = JCS_RGB;
jpeg_set_defaults(&cinfo);
jpeg_set_quality(&cinfo, quality, TRUE);
/* start compressing */
jpeg_start_compress(&cinfo, TRUE);
row_stride=image->x*image->depth;
for(i=0;i<image->y;i++) {
row_pointer[0] = & image->pixels[i * row_stride];
jpeg_write_scanlines(&cinfo, row_pointer, 1);
}
/* finish compressing */
jpeg_finish_compress(&cinfo);
/* close file */
fclose(fff);
/* clean up */
jpeg_destroy_compress(&cinfo);
return 0;
}
int main(int argc, char **argv) {
struct image_t image,sobel_x,sobel_y,new_image;
struct convolve_data_t sobel_data[2];
long long start_time,load_time,convolve_time;
long long combine_after,combine_before;
long long copy_before,copy_after,copy2_before,copy2_after;
long long store_after,store_before;
long long cudaMalloc_after,cudaMalloc_before;
unsigned char *dev_x, *dev_y,*out;// Pointer to host & device arrays
long long n;// Number of pixels in a picture
/* Check command line usage */
if (argc<2) {
fprintf(stderr,"Usage: %s image_file\n",argv[0]);
return -1;
}
PAPI_library_init(PAPI_VER_CURRENT);
start_time=PAPI_get_real_usec();
/* Load an image */
load_jpeg(argv[1],&image);
load_time=PAPI_get_real_usec();
/* Allocate device buffers for sobelx, sobely, and the output using cudaMalloc() */
/* Allocate space for output image */
new_image.x=image.x;
new_image.y=image.y;
new_image.depth=image.depth;
new_image.pixels=(unsigned char *)malloc(image.x*image.y*image.depth*sizeof(char));
// new_image.pixels=(unsigned char *)cudaMalloc(image.x*image.y*image.depth*sizeof(char));
/* Allocate space for output image */
sobel_x.x=image.x;
sobel_x.y=image.y;
sobel_x.depth=image.depth;
sobel_x.pixels=(unsigned char *)malloc(image.x*image.y*image.depth*sizeof(char));
// sobel_x.pixels=(unsigned char *)cudaMalloc(image.x*image.y*image.depth*sizeof(char));
/* Allocate space for output image */
sobel_y.x=image.x;
sobel_y.y=image.y;
sobel_y.depth=image.depth;
sobel_y.pixels=(unsigned char *)malloc(image.x*image.y*image.depth*sizeof(char));
// sobel_y.pixels=(unsigned char *)cudaMalloc(image.x*image.y*image.depth*sizeof(char));
n=image.x*image.y*image.depth*sizeof(char);//number of pixels of the picture
/* PERFORM KERNEL: cuda_generic_convolve */
/* convolution */
sobel_data[0].old=ℑ
sobel_data[0].newt=&sobel_x;
sobel_data[0].filter=&sobel_x_filter;
sobel_data[0].ystart=0;
sobel_data[0].yend=image.y;
generic_convolve((void *)&sobel_data[0]);
//cuda_generic_convolve (int n, char *in, int *matrix, char *out)
// first inside brackets is number of blocks, second is threads per block
// cuda_generic_convolve<<<dimGrid, dimBlock>>>(int n, char *in, int *matrix, char *out);
sobel_data[1].old=ℑ
sobel_data[1].newt=&sobel_y;
sobel_data[1].filter=&sobel_y_filter;
sobel_data[1].ystart=0;
sobel_data[1].yend=image.y;
generic_convolve((void *)&sobel_data[1]);
// cuda_generic_convolve<<<dimGrid, dimBlock>>>(int n, char *in, int *matrix, char *out);
// make the host block until the device is finished
cudaDeviceSynchronize();
convolve_time=PAPI_get_real_usec();
/* Allocate arrays on GPU */
cudaMalloc_before=PAPI_get_real_usec();
cudaMalloc((void**)&dev_x,n*sizeof(unsigned char));
cudaMalloc((void**)&dev_y,n*sizeof(unsigned char));
cudaMalloc((void**)&out,n*sizeof(unsigned char));
cudaMalloc_after=PAPI_get_real_usec();
/* Copy the local sobel_x.pixels and sobel_y.pixels to the device using cudaMemcpy() */
copy_before=PAPI_get_real_usec();
cudaMemcpy(dev_x,sobel_x.pixels,n*sizeof(unsigned char),cudaMemcpyHostToDevice);
cudaMemcpy(dev_y,sobel_y.pixels,n*sizeof(unsigned char),cudaMemcpyHostToDevice);
copy_after=PAPI_get_real_usec();
/* Some hints: to debug that your kernel works, you can first set all output to 0xff and verify you get an all-white image back. */
// new_image.pixels=0xff;
/* Combine to form output */
// combine(&sobel_x,&sobel_y,&new_image);
// cuda_combine (int n, unsigned char *in_x, unsigned char *in_y, unsigned char *out)
// first inside brackets is number of blocks, second is threads per block
combine_before=PAPI_get_real_usec();
cuda_combine<<<(n+256)/256, 256>>>(n,dev_x,dev_y,out);
combine_after=PAPI_get_real_usec();
/* Copy the results back into new_image.pixels using cudaMemcpy() (be sure to get the direction right) */
copy2_before=PAPI_get_real_usec();
cudaMemcpy(new_image.pixels,out,n*sizeof(unsigned char),cudaMemcpyDeviceToHost);
copy2_after=PAPI_get_real_usec();
/* REPLACE THE ABOVE WITH YOUR CODE */
/* IT SHOULD ALLOCATE SPACE ON DEVICE */
/* COPY SOBEL_X and SOBEL_Y data to device */
/* RUN THE KERNEL */
/* THEN COPY THE RESULTS BACK */
/* Write data back out to disk */
store_before=PAPI_get_real_usec();
store_jpeg("out.jpg",&new_image);
store_after=PAPI_get_real_usec();
/* Print timing results */
printf("Load time: %lld\n",load_time-start_time);
printf("Convolve time: %lld\n",convolve_time-load_time);
printf("cudaMalloc time: %lld\n",cudaMalloc_after-cudaMalloc_before);
printf("Copy time: %lld\n",(copy_after-copy_before)+(copy2_after-copy2_before));
printf("Combine time: %lld\n",combine_after-combine_before);
printf("Store time: %lld\n",store_after-store_before);
printf("Total time = %lld\n",store_after-start_time);
cudaFree(dev_x);//cudaFree device name
cudaFree(dev_y);
cudaFree(out);
return 0;
}
|
c5b7bbcb54f0e698cac1372c876ff48b70701675.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n)
{
secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n)
{
const int NB_THREAD = Indice2D::nbThread();
const int TID = Indice2D::tid();
// Debug
if (TID == 0)
{
printf("Coucou from device tid%d", TID); //required Device::synchronize(); after the call of kernel
}
//TODO pattern entrelacement
int s = TID;
while (s < n)
{
ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s];
s += NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| c5b7bbcb54f0e698cac1372c876ff48b70701675.cu | #include "Indice2D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW,int n);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void addVector(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n)
{
secondaire(ptrDevV1, ptrDevV2, ptrDevW, n); // pas necessaire, just for fun
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void secondaire(float* ptrDevV1, float* ptrDevV2, float* ptrDevW, int n)
{
const int NB_THREAD = Indice2D::nbThread();
const int TID = Indice2D::tid();
// Debug
if (TID == 0)
{
printf("Coucou from device tid%d", TID); //required Device::synchronize(); after the call of kernel
}
//TODO pattern entrelacement
int s = TID;
while (s < n)
{
ptrDevW[s] = ptrDevV1[s] + ptrDevV2[s];
s += NB_THREAD;
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
8b780d1f9c729d15b9912d6cad1e95b7bb54d760.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_ltScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0f:0.0f;
}
} | 8b780d1f9c729d15b9912d6cad1e95b7bb54d760.cu | #include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_ltScalarf (size_t n, float *result, float *x, float y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0f:0.0f;
}
} |
72324e739c0616d0a4c8b43d7b51495cf4aa8733.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/chemv.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ahmad Abdelfattah
* @date 2017-11-13
**/
#include "syhemv_core.cuh"
#if(SM >= 30)
#define chemv_upper_bs (32)
#define chemv_upper_ty (2)
#define chemv_upper_by (2)
#define chemv_lower_bs (32)
#define chemv_lower_ty (2)
#define chemv_lower_by (2)
#else
#define chemv_upper_bs (64)
#define chemv_upper_ty (8)
#define chemv_upper_by (2)
#define chemv_lower_bs (32)
#define chemv_lower_ty (4)
#define chemv_lower_by (2)
#endif
int kblas_chemv_driver( char uplo,
int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
hipStream_t stream)
{
// handle the case when incx and/or incy is -ve
if(incx < 0) dX -= (m-1) * incx;
if(incy < 0) dY -= (m-1) * incy;
if(uplo == 'U' || uplo == 'u')
{
/** configuration params **/
/**
* If you change the configuration parameters,
* you must revise the case statement of the upper case
* to make sure it covers all the possible cases
**/
const int chemv_bs = chemv_upper_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_upper_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
/** end configuration params **/
int mod = m % chemv_bs;
int blocks = m / chemv_bs + (mod != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_upper_by);
if(mod == 0)
{
hipLaunchKernelGGL(( syhemvu_special_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy);
hipLaunchKernelGGL(( syhemvu_special_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy);
}
else
{
hipLaunchKernelGGL(( syhemvu_generic_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod);
/**
* The upper case kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
const int irregular_part = mod % elements_per_thread;
switch(irregular_part)
{
case 0:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 1:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 2:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 3:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 4:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 5:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 6:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 7:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 8:hipLaunchKernelGGL(( syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
// return error otherwise:
default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else if(uplo == 'L' || uplo == 'l')
{
/** configuration params **/
const int chemv_bs = chemv_lower_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_lower_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
/** end configuration params **/
int mod = m % chemv_bs;
int blocks = m / chemv_bs + (mod != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks,chemv_lower_by);
if(mod == 0)
{
hipLaunchKernelGGL(( syhemvl_special_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy);
hipLaunchKernelGGL(( syhemvl_special_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy);
}
else
{
hipLaunchKernelGGL(( syhemvl_generic_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod);
hipLaunchKernelGGL(( syhemvl_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid_), dim3(dimBlock), 0, stream, m, alpha, dA, lda, dX, incx, beta, dY, incy, mod);
}
}
else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;}
return 0;
}
extern "C"
int kblas_chemv( char uplo,
int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy)
{
return kblas_chemv_driver( uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, 0);
}
extern "C"
int kblas_chemv_async( char uplo,
int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy, hipStream_t stream)
{
return kblas_chemv_driver( uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, stream);
}
| 72324e739c0616d0a4c8b43d7b51495cf4aa8733.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/chemv.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 2.0.0
* @author Ahmad Abdelfattah
* @date 2017-11-13
**/
#include "syhemv_core.cuh"
#if(SM >= 30)
#define chemv_upper_bs (32)
#define chemv_upper_ty (2)
#define chemv_upper_by (2)
#define chemv_lower_bs (32)
#define chemv_lower_ty (2)
#define chemv_lower_by (2)
#else
#define chemv_upper_bs (64)
#define chemv_upper_ty (8)
#define chemv_upper_by (2)
#define chemv_lower_bs (32)
#define chemv_lower_ty (4)
#define chemv_lower_by (2)
#endif
int kblas_chemv_driver( char uplo,
int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy,
cudaStream_t stream)
{
// handle the case when incx and/or incy is -ve
if(incx < 0) dX -= (m-1) * incx;
if(incy < 0) dY -= (m-1) * incy;
if(uplo == 'U' || uplo == 'u')
{
/** configuration params **/
/**
* If you change the configuration parameters,
* you must revise the case statement of the upper case
* to make sure it covers all the possible cases
**/
const int chemv_bs = chemv_upper_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_upper_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
/** end configuration params **/
int mod = m % chemv_bs;
int blocks = m / chemv_bs + (mod != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks, chemv_upper_by);
if(mod == 0)
{
syhemvu_special_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy);
syhemvu_special_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy);
}
else
{
syhemvu_generic_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod);
/**
* The upper case kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
const int irregular_part = mod % elements_per_thread;
switch(irregular_part)
{
case 0: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 1: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 2: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 3: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 4: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 5: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 6: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 7: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
case 8: syhemvu_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid_, dimBlock, 0, stream>>>( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod); break;
// return error otherwise:
default: printf("CHEMV-UPPER ERROR: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else if(uplo == 'L' || uplo == 'l')
{
/** configuration params **/
const int chemv_bs = chemv_lower_bs;
const int thread_x = chemv_bs;
const int thread_y = chemv_lower_ty;
const int elements_per_thread = (chemv_bs/(2*thread_y)) ;
/** end configuration params **/
int mod = m % chemv_bs;
int blocks = m / chemv_bs + (mod != 0);
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks,1);
dim3 dimGrid_(blocks,chemv_lower_by);
if(mod == 0)
{
syhemvl_special_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy);
syhemvl_special_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy);
}
else
{
syhemvl_generic_d<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod);
syhemvl_generic_nd<cuFloatComplex, chemv_bs, thread_x, thread_y, elements_per_thread><<<dimGrid_, dimBlock, 0, stream>>> ( m, alpha, dA, lda, dX, incx, beta, dY, incy, mod);
}
}
else{printf("Upper/Lower mode %c is not supported \n", uplo); return -1;}
return 0;
}
extern "C"
int kblas_chemv( char uplo,
int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy)
{
return kblas_chemv_driver( uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, 0);
}
extern "C"
int kblas_chemv_async( char uplo,
int m, cuFloatComplex alpha, cuFloatComplex *dA, int lda,
cuFloatComplex *dX, int incx,
cuFloatComplex beta, cuFloatComplex *dY, int incy, cudaStream_t stream)
{
return kblas_chemv_driver( uplo, m, alpha, dA, lda, dX, incx, beta, dY, incy, stream);
}
|
cb8dc1aaf5ae6549ec8e6ae4941a67933ef160c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "constants.h"
#include "pass_types.h"
extern "C" {
#include "bsparseconv.h"
}
__global__ void bsparseconv_kernel(int64 *c, const int64 *a, const b_sparse_poly *b)
{
int64 i = 0;
int64 k = 0;
int thread = threadIdx.x + blockDim.x*blockIdx.x;
if(thread < PASS_N)
{
for (i = 0; i < PASS_b; i++) {
k = b->ind[i];
if(b->val[k] > 0) {
if(thread < k)
{
c[(thread)] += a[thread - k + PASS_N];
}
else //(thread >= k)
{
c[thread] += a[thread-k];
}
}
else
{ /* b->val[i] == -1 */
if(thread < k)
{
c[thread] -= a[thread - k + PASS_N];
}
else //(thread > k)
{
c[thread] -= a[thread-k];
}
}
}
}
//return 0;
}
extern "C" void bsparseconv_gpu(int64 *c, const int64 *a, const b_sparse_poly *b)
{
int msg_count=1;
unsigned int num_blocks = msg_count ;
unsigned int num_threads = PASS_N;
/* z = y += f*c */
hipLaunchKernelGGL(( bsparseconv_kernel), dim3(num_blocks),dim3(num_threads), 0, 0, c, a, b);
/* No modular reduction required. */
}
| cb8dc1aaf5ae6549ec8e6ae4941a67933ef160c1.cu | #include "constants.h"
#include "pass_types.h"
extern "C" {
#include "bsparseconv.h"
}
__global__ void bsparseconv_kernel(int64 *c, const int64 *a, const b_sparse_poly *b)
{
int64 i = 0;
int64 k = 0;
int thread = threadIdx.x + blockDim.x*blockIdx.x;
if(thread < PASS_N)
{
for (i = 0; i < PASS_b; i++) {
k = b->ind[i];
if(b->val[k] > 0) {
if(thread < k)
{
c[(thread)] += a[thread - k + PASS_N];
}
else //(thread >= k)
{
c[thread] += a[thread-k];
}
}
else
{ /* b->val[i] == -1 */
if(thread < k)
{
c[thread] -= a[thread - k + PASS_N];
}
else //(thread > k)
{
c[thread] -= a[thread-k];
}
}
}
}
//return 0;
}
extern "C" void bsparseconv_gpu(int64 *c, const int64 *a, const b_sparse_poly *b)
{
int msg_count=1;
unsigned int num_blocks = msg_count ;
unsigned int num_threads = PASS_N;
/* z = y += f*c */
bsparseconv_kernel<<<num_blocks,num_threads>>>(c, a, b);
/* No modular reduction required. */
}
|
54d1a24375704cc76d59a56ca2e975c0517fe410.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "sys/time.h"
using namespace std;
double timeInSeconds (timeval& starttime, timeval& stopstime) {
return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec));
}
//__device__ double* dev_vector1 = 0;
//__device__ double* dev_vector2 = 0;
//__device__ double* dev_results = 0;
__global__ void device_vector_mult (double* v1, double* v2, double* res) {
// IMPLEMENT ME 6: Multiply the threadIdx.x element of dev_vector1 by the
// corresponding element of dev_vector2, and store in dev_results.
res[threadIdx.x] = 2 * v1[threadIdx.x] * v2[threadIdx.x];
}
__global__ void device_vector_reduce (double* results, int* length) {
int len = *length;
while (len > 1) {
if (threadIdx.x < (len - (len % 2)) / 2) {
results[threadIdx.x] += results[threadIdx.x + (len + (len % 2)) / 2];
}
len = (len + (len % 2)) / 2;
__syncthreads();
}
}
__global__ void device_vector_simpleAdd (double* vec, double* res) {
if (threadIdx.x == 0) {
res[0] = 2;
}
}
int main (int argc, char** argv) {
int sizeOfVector = 100;
if (argc > 1) sizeOfVector = atoi(argv[1]);
// Declare and fill host-side arrays of doubles.
double* vector1 = new double[sizeOfVector];
double* vector2 = new double[sizeOfVector];
double* results = new double[sizeOfVector];
double* gpuresults = new double[sizeOfVector];
double* gpuAddresults = new double[sizeOfVector];
srand(42);
for (int i = 0; i < sizeOfVector; ++i) {
vector1[i] = rand() % 100;
vector2[i] = rand() % 100;
results[i] = 0;
gpuresults[i] = 0;
gpuAddresults[i] = 0;
}
timeval startTime;
timeval interTime;
timeval stopsTime;
gettimeofday(&startTime, NULL);
// Use the CPU for this part.
// IMPLEMENT ME 1: Multiply each element of vector1 by the corresponding
// element in vector2 and store in results.
for (int i = 0; i < sizeOfVector; ++i) {
results[i] = vector1[i] * vector2[i];
}
gettimeofday(&interTime, NULL);
double total = 0;
// IMPLEMENT ME 2: Sum the results array and store the sum in total.
for (int i = 0; i < sizeOfVector; ++i) {
total += results[i];
}
gettimeofday(&stopsTime, NULL);
cout << "Dot product is: " << total << endl;
// IMPLEMENT ME 3: Time the above operations together and separately
// using 'gettimeofday'.
cout << "Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
double* dev_vector1 = 0;
double* dev_vector2 = 0;
double* dev_results = 0;
int sizeInBytes = sizeOfVector * sizeof(double);
hipMalloc((void**) &dev_vector1, sizeInBytes);
hipMalloc((void**) &dev_vector2, sizeInBytes);
hipMalloc((void**) &dev_results, sizeInBytes);
hipMemcpy(dev_vector1, vector1, sizeInBytes, hipMemcpyHostToDevice);
hipMemcpy(dev_vector2, vector2, sizeInBytes, hipMemcpyHostToDevice);
gettimeofday(&startTime, NULL);
hipLaunchKernelGGL(( device_vector_mult), dim3(1), dim3(sizeOfVector), 0, 0, dev_vector1, dev_vector2, dev_results);
double gputotal = 0;
hipMemcpy(gpuresults, dev_results, sizeInBytes, hipMemcpyDeviceToHost);
gettimeofday(&interTime, NULL);
for (int i = 0; i < sizeOfVector; ++i) {
gputotal += gpuresults[i];
}
gettimeofday(&stopsTime, NULL);
cout << "GPU-mult Dot product is: " << gputotal << endl;
cout << "GPU-mult Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "GPU-mult Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "GPU-mult Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
double * dev_added = 0;
hipMalloc((void**) &dev_added, sizeof(double));
//device_vector_simpleAdd<<<1, sizeOfVector>>>(dev_results, dev_added);
hipLaunchKernelGGL(( device_vector_reduce), dim3(1), dim3(sizeOfVector), 0, 0, dev_results, &sizeOfVector);
double host_added = 2;
//hipMemcpy(&host_added, &dev_added[0], sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(&host_added, &dev_results[0], sizeof(double), hipMemcpyDeviceToHost);
cout <<"GPU-full Dot product is: " << host_added << endl;
cout << "Size of Vectors is: " << sizeOfVector << endl;
return 0;
}
| 54d1a24375704cc76d59a56ca2e975c0517fe410.cu | #include <iostream>
#include "sys/time.h"
using namespace std;
double timeInSeconds (timeval& starttime, timeval& stopstime) {
return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec));
}
//__device__ double* dev_vector1 = 0;
//__device__ double* dev_vector2 = 0;
//__device__ double* dev_results = 0;
__global__ void device_vector_mult (double* v1, double* v2, double* res) {
// IMPLEMENT ME 6: Multiply the threadIdx.x element of dev_vector1 by the
// corresponding element of dev_vector2, and store in dev_results.
res[threadIdx.x] = 2 * v1[threadIdx.x] * v2[threadIdx.x];
}
__global__ void device_vector_reduce (double* results, int* length) {
int len = *length;
while (len > 1) {
if (threadIdx.x < (len - (len % 2)) / 2) {
results[threadIdx.x] += results[threadIdx.x + (len + (len % 2)) / 2];
}
len = (len + (len % 2)) / 2;
__syncthreads();
}
}
__global__ void device_vector_simpleAdd (double* vec, double* res) {
if (threadIdx.x == 0) {
res[0] = 2;
}
}
int main (int argc, char** argv) {
int sizeOfVector = 100;
if (argc > 1) sizeOfVector = atoi(argv[1]);
// Declare and fill host-side arrays of doubles.
double* vector1 = new double[sizeOfVector];
double* vector2 = new double[sizeOfVector];
double* results = new double[sizeOfVector];
double* gpuresults = new double[sizeOfVector];
double* gpuAddresults = new double[sizeOfVector];
srand(42);
for (int i = 0; i < sizeOfVector; ++i) {
vector1[i] = rand() % 100;
vector2[i] = rand() % 100;
results[i] = 0;
gpuresults[i] = 0;
gpuAddresults[i] = 0;
}
timeval startTime;
timeval interTime;
timeval stopsTime;
gettimeofday(&startTime, NULL);
// Use the CPU for this part.
// IMPLEMENT ME 1: Multiply each element of vector1 by the corresponding
// element in vector2 and store in results.
for (int i = 0; i < sizeOfVector; ++i) {
results[i] = vector1[i] * vector2[i];
}
gettimeofday(&interTime, NULL);
double total = 0;
// IMPLEMENT ME 2: Sum the results array and store the sum in total.
for (int i = 0; i < sizeOfVector; ++i) {
total += results[i];
}
gettimeofday(&stopsTime, NULL);
cout << "Dot product is: " << total << endl;
// IMPLEMENT ME 3: Time the above operations together and separately
// using 'gettimeofday'.
cout << "Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
double* dev_vector1 = 0;
double* dev_vector2 = 0;
double* dev_results = 0;
int sizeInBytes = sizeOfVector * sizeof(double);
cudaMalloc((void**) &dev_vector1, sizeInBytes);
cudaMalloc((void**) &dev_vector2, sizeInBytes);
cudaMalloc((void**) &dev_results, sizeInBytes);
cudaMemcpy(dev_vector1, vector1, sizeInBytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_vector2, vector2, sizeInBytes, cudaMemcpyHostToDevice);
gettimeofday(&startTime, NULL);
device_vector_mult<<<1, sizeOfVector>>>(dev_vector1, dev_vector2, dev_results);
double gputotal = 0;
cudaMemcpy(gpuresults, dev_results, sizeInBytes, cudaMemcpyDeviceToHost);
gettimeofday(&interTime, NULL);
for (int i = 0; i < sizeOfVector; ++i) {
gputotal += gpuresults[i];
}
gettimeofday(&stopsTime, NULL);
cout << "GPU-mult Dot product is: " << gputotal << endl;
cout << "GPU-mult Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "GPU-mult Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "GPU-mult Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
double * dev_added = 0;
cudaMalloc((void**) &dev_added, sizeof(double));
//device_vector_simpleAdd<<<1, sizeOfVector>>>(dev_results, dev_added);
device_vector_reduce<<<1, sizeOfVector>>>(dev_results, &sizeOfVector);
double host_added = 2;
//cudaMemcpy(&host_added, &dev_added[0], sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&host_added, &dev_results[0], sizeof(double), cudaMemcpyDeviceToHost);
cout <<"GPU-full Dot product is: " << host_added << endl;
cout << "Size of Vectors is: " << sizeOfVector << endl;
return 0;
}
|
60dda855c6badfcce5366db4c7c07763c7c9e639.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
char* concat(char *s1, char *s2);
__global__ void x_calculation(float * x ,float * r,float * r_squared ,int size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x ;
if (index < size)
{
float alpha = r_squared[0] ;
x[index] = x[index] + alpha * r[index] ;
}
} | 60dda855c6badfcce5366db4c7c07763c7c9e639.cu | #include "includes.h"
char* concat(char *s1, char *s2);
__global__ void x_calculation(float * x ,float * r,float * r_squared ,int size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x ;
if (index < size)
{
float alpha = r_squared[0] ;
x[index] = x[index] + alpha * r[index] ;
}
} |
d5390c6b2416ce9862621063aa1d51ec7c70b7a9.hip | // !!! This is a file automatically generated by hipify!!!
/**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define NI 4096
#define NJ 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv2D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
B[i*NJ + j] = c11 * A[(i - 1)*NJ + (j - 1)] + c12 * A[(i + 0)*NJ + (j - 1)] + c13 * A[(i + 1)*NJ + (j - 1)]
+ c21 * A[(i - 1)*NJ + (j + 0)] + c22 * A[(i + 0)*NJ + (j + 0)] + c23 * A[(i + 1)*NJ + (j + 0)]
+ c31 * A[(i - 1)*NJ + (j + 1)] + c32 * A[(i + 0)*NJ + (j + 1)] + c33 * A[(i + 1)*NJ + (j + 1)];
}
}
}
void init(DATA_TYPE* A)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
A[i*NJ + j] = (float)rand()/RAND_MAX;
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare a and b
for (i=1; i < (NI-1); i++)
{
for (j=1; j < (NJ-1); j++)
{
if (percentDiff(B[i*NJ + j], B_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void Convolution2D_kernel(DATA_TYPE *A, DATA_TYPE *B)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
if ((i < NI-1) && (j < NJ-1) && (i > 0) && (j > 0))
{
B[i * NJ + j] = c11 * A[(i - 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] + c31 * A[(i - 1) * NJ + (j + 1)]
+ c12 * A[(i + 0) * NJ + (j - 1)] + c22 * A[(i + 0) * NJ + (j + 0)] + c32 * A[(i + 0) * NJ + (j + 1)]
+ c13 * A[(i + 1) * NJ + (j - 1)] + c23 * A[(i + 1) * NJ + (j + 0)] + c33 * A[(i + 1) * NJ + (j + 1)];
}
}
void convolution2DCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)NI) / ((float)block.x) ), (size_t)ceil( ((float)NJ) / ((float)block.y)) );
t_start = rtclock();
hipLaunchKernelGGL(( Convolution2D_kernel), dim3(grid),dim3(block), 0, 0, A_gpu,B_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);//);
hipMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(B_gpu);
}
int main(int argc, char *argv[])
{
int devID = 0;
if(argc == 2) {
devID = atoi(argv[1]);
}
printf("select device : %d\n", devID);
hipSetDevice(devID);
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess){
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}else{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
A = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
//initialize the arrays
init(A);
//GPU_argv_init();
convolution2DCuda(A, B, B_outputFromGpu);
t_start = rtclock();
conv2D(A, B);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);//);
compareResults(B, B_outputFromGpu);
free(A);
free(B);
free(B_outputFromGpu);
return 0;
}
| d5390c6b2416ce9862621063aa1d51ec7c70b7a9.cu | /**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define NI 4096
#define NJ 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv2D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
B[i*NJ + j] = c11 * A[(i - 1)*NJ + (j - 1)] + c12 * A[(i + 0)*NJ + (j - 1)] + c13 * A[(i + 1)*NJ + (j - 1)]
+ c21 * A[(i - 1)*NJ + (j + 0)] + c22 * A[(i + 0)*NJ + (j + 0)] + c23 * A[(i + 1)*NJ + (j + 0)]
+ c31 * A[(i - 1)*NJ + (j + 1)] + c32 * A[(i + 0)*NJ + (j + 1)] + c33 * A[(i + 1)*NJ + (j + 1)];
}
}
}
void init(DATA_TYPE* A)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
A[i*NJ + j] = (float)rand()/RAND_MAX;
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, fail;
fail = 0;
// Compare a and b
for (i=1; i < (NI-1); i++)
{
for (j=1; j < (NJ-1); j++)
{
if (percentDiff(B[i*NJ + j], B_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void Convolution2D_kernel(DATA_TYPE *A, DATA_TYPE *B)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
if ((i < NI-1) && (j < NJ-1) && (i > 0) && (j > 0))
{
B[i * NJ + j] = c11 * A[(i - 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] + c31 * A[(i - 1) * NJ + (j + 1)]
+ c12 * A[(i + 0) * NJ + (j - 1)] + c22 * A[(i + 0) * NJ + (j + 0)] + c32 * A[(i + 0) * NJ + (j + 1)]
+ c13 * A[(i + 1) * NJ + (j - 1)] + c23 * A[(i + 1) * NJ + (j + 0)] + c33 * A[(i + 1) * NJ + (j + 1)];
}
}
void convolution2DCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)NI) / ((float)block.x) ), (size_t)ceil( ((float)NJ) / ((float)block.y)) );
t_start = rtclock();
Convolution2D_kernel<<<grid,block>>>(A_gpu,B_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);//);
cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
}
int main(int argc, char *argv[])
{
int devID = 0;
if(argc == 2) {
devID = atoi(argv[1]);
}
printf("select device : %d\n", devID);
cudaSetDevice(devID);
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess){
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}else{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
A = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
//initialize the arrays
init(A);
//GPU_argv_init();
convolution2DCuda(A, B, B_outputFromGpu);
t_start = rtclock();
conv2D(A, B);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);//);
compareResults(B, B_outputFromGpu);
free(A);
free(B);
free(B_outputFromGpu);
return 0;
}
|
39f08d1f98a8324ebc21562c90f06b1fdbf602fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Filter.cuh"
#ifdef USE_ROCM
#include "Filter.hpp"
#include "Image.hpp"
#include "Pixel.hpp"
#include "Exception.hpp"
#include <iostream>
#define CUDA_BLOCK_COUNT 128
#define CUDA_THREAD_COUNT 128
__global__
void ApplyFilterCuda(void* kernel, uint16_t kernelWidth, uint16_t kernelHeight,
void* image, uint32_t imageWidth, uint32_t imageHeight,
void* res)
{
auto imageData = static_cast<RGBPixel*>(image);
auto result = static_cast<RGBPixel*>(res);
auto kernelData = static_cast<float*>(kernel);
uint32_t block = blockIdx.x;
uint32_t thread = threadIdx.x;
// a & b loops go through final image
for (uint32_t a = block; a < imageHeight; a += CUDA_BLOCK_COUNT) // y
{
for (uint32_t b = thread; b < imageWidth; b+= CUDA_THREAD_COUNT) // x
{
float red = 0;
float green = 0;
float blue = 0;
for (int c = 0; c < kernelHeight; c++) // y
{
for (int d = 0; d < kernelWidth; d++) // x
{
auto y = c - kernelHeight / 2;
y = (kernelHeight + a + y) % imageHeight;
auto x = d - kernelWidth / 2;
x = (kernelWidth + b + x) % imageWidth;
auto kernelVal = kernelData[c * kernelWidth + d];
auto imageVal = imageData[y * imageWidth + x];
if(kernelVal > 0)
red += kernelVal * imageVal.red;
green += kernelVal * imageVal.green;
blue += kernelVal * imageVal.blue;
}
}
red = red > 0 ? (red > 255 ? 255 : red) : 0;
green = green > 0 ? (green > 255 ? 255 : green) : 0;
blue = blue > 0 ? (blue > 255 ? 255 : blue) : 0;
result[a * imageWidth + b].red = static_cast<uint8_t>(red);
result[a * imageWidth + b].green = static_cast<uint8_t>(green);
result[a * imageWidth + b].blue = static_cast<uint8_t>(blue);
}
}
}
void* CopyToGRAM(uint32_t size, void* data) noexcept
{
void* gpuData = nullptr;
hipError_t error;
// Alocate GPU memory
error = hipMalloc(&gpuData, size);
if (error != hipSuccess)
{
std::cerr << "Error: Unable to allocate GRAM of size: " << size << std::endl;
hipFree(gpuData);
return nullptr;
}
hipDeviceSynchronize();
// Copy Data to GPU memory
error = hipMemcpy(gpuData, data, size, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
std::cerr << "Error: Unable to copy data to GRAM of size: " << size << std::endl;
hipFree(gpuData);
return nullptr;
}
hipDeviceSynchronize();
return gpuData;
}
void* CopyToGRAM(const Image& data) noexcept
{
auto size = data.GetWidth() * data.GetHeight();
std::unique_ptr<RGBPixel[]> imageData = std::unique_ptr<RGBPixel[]>(new RGBPixel[size]);
// Prepare data
uint64_t location = 0;
for (uint32_t i = 0; i < data.GetHeight(); i++)
{
for (uint32_t j = 0; j < data.GetWidth(); j++)
{
auto pixel = data.GetPixel(j, i).ToRGB();
imageData[location].red = pixel.red;
imageData[location].green = pixel.green;
imageData[location].blue = pixel.blue;
location++;
}
}
// Copy to GRAM
return CopyToGRAM(static_cast<uint32_t>(size * 3), imageData.get());
}
void* CopyToGRAM(const Kernel& data) noexcept
{
auto size = data.GetWidth() * data.GetHeight();
std::unique_ptr<float[]> kernelData = std::unique_ptr<float[]>(new float[size]);
uint32_t location = 0;
for (uint16_t i = 0; i < data.GetHeight(); i++)
{
for (uint16_t j = 0; j < data.GetWidth(); j++)
{
kernelData[location++] = data.Get(j, i);
}
}
// Copy to GRAM
return CopyToGRAM(static_cast<uint32_t>(size * sizeof(float)), kernelData.get());
}
std::shared_ptr<Image> Filter::ApplyFilter()
{
bool error = false;
std::shared_ptr<Image> result = std::shared_ptr<Image>(new Image(this->image->GetWidth(), this->image->GetHeight()));
// Copy data to GPU memory
void* cudaKernel = CopyToGRAM(this->kernel);
void* cudaImage = CopyToGRAM(*this->image.get());
void* cudaResult = nullptr;
hipMalloc(&cudaResult, this->image->GetHeight() * this->image->GetWidth() * 3);
hipDeviceSynchronize();
if ((cudaKernel != nullptr) && (cudaImage != nullptr) && (cudaResult!=nullptr))
{
hipLaunchKernelGGL(( ApplyFilterCuda), dim3(CUDA_BLOCK_COUNT), dim3(CUDA_THREAD_COUNT) , 0, 0,
cudaKernel, this->kernel.GetWidth(), this->kernel.GetHeight(),
cudaImage, this->image->GetWidth(), this->image->GetHeight(),
cudaResult);
hipDeviceSynchronize();
// Copy result image back from gram
auto imageSize = this->image->GetWidth() * this->image->GetHeight();
std::unique_ptr<RGBPixel[]> imageData =
std::unique_ptr<RGBPixel[]>(new RGBPixel[imageSize]);
hipMemcpy(imageData.get(), cudaResult, imageSize * 3, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Postprocess data
uint64_t loc = 0;
for (uint32_t i = 0; i < this->image->GetHeight(); i++)
{
for (uint32_t j = 0; j < this->image->GetWidth(); j++)
{
// Set Pixel
Pixel pixel = { imageData[loc++] };
result->SetPixel(j, i, pixel);
}
}
}
else
{
error = true;
}
// Cleanup
if (cudaKernel != nullptr)
{
hipFree(cudaKernel);
}
if (cudaImage != nullptr)
{
hipFree(cudaImage);
}
if (cudaResult != nullptr)
{
hipFree(cudaResult);
}
if (error)
{
throw RuntimeException("Error: Unable to allocate data on GPU!");
}
return result;
}
#endif /* USE_ROCM */ | 39f08d1f98a8324ebc21562c90f06b1fdbf602fe.cu | #include "Filter.cuh"
#ifdef USE_CUDA
#include "Filter.hpp"
#include "Image.hpp"
#include "Pixel.hpp"
#include "Exception.hpp"
#include <iostream>
#define CUDA_BLOCK_COUNT 128
#define CUDA_THREAD_COUNT 128
__global__
void ApplyFilterCuda(void* kernel, uint16_t kernelWidth, uint16_t kernelHeight,
void* image, uint32_t imageWidth, uint32_t imageHeight,
void* res)
{
auto imageData = static_cast<RGBPixel*>(image);
auto result = static_cast<RGBPixel*>(res);
auto kernelData = static_cast<float*>(kernel);
uint32_t block = blockIdx.x;
uint32_t thread = threadIdx.x;
// a & b loops go through final image
for (uint32_t a = block; a < imageHeight; a += CUDA_BLOCK_COUNT) // y
{
for (uint32_t b = thread; b < imageWidth; b+= CUDA_THREAD_COUNT) // x
{
float red = 0;
float green = 0;
float blue = 0;
for (int c = 0; c < kernelHeight; c++) // y
{
for (int d = 0; d < kernelWidth; d++) // x
{
auto y = c - kernelHeight / 2;
y = (kernelHeight + a + y) % imageHeight;
auto x = d - kernelWidth / 2;
x = (kernelWidth + b + x) % imageWidth;
auto kernelVal = kernelData[c * kernelWidth + d];
auto imageVal = imageData[y * imageWidth + x];
if(kernelVal > 0)
red += kernelVal * imageVal.red;
green += kernelVal * imageVal.green;
blue += kernelVal * imageVal.blue;
}
}
red = red > 0 ? (red > 255 ? 255 : red) : 0;
green = green > 0 ? (green > 255 ? 255 : green) : 0;
blue = blue > 0 ? (blue > 255 ? 255 : blue) : 0;
result[a * imageWidth + b].red = static_cast<uint8_t>(red);
result[a * imageWidth + b].green = static_cast<uint8_t>(green);
result[a * imageWidth + b].blue = static_cast<uint8_t>(blue);
}
}
}
void* CopyToGRAM(uint32_t size, void* data) noexcept
{
void* gpuData = nullptr;
cudaError_t error;
// Alocate GPU memory
error = cudaMalloc(&gpuData, size);
if (error != cudaSuccess)
{
std::cerr << "Error: Unable to allocate GRAM of size: " << size << std::endl;
cudaFree(gpuData);
return nullptr;
}
cudaDeviceSynchronize();
// Copy Data to GPU memory
error = cudaMemcpy(gpuData, data, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
std::cerr << "Error: Unable to copy data to GRAM of size: " << size << std::endl;
cudaFree(gpuData);
return nullptr;
}
cudaDeviceSynchronize();
return gpuData;
}
void* CopyToGRAM(const Image& data) noexcept
{
auto size = data.GetWidth() * data.GetHeight();
std::unique_ptr<RGBPixel[]> imageData = std::unique_ptr<RGBPixel[]>(new RGBPixel[size]);
// Prepare data
uint64_t location = 0;
for (uint32_t i = 0; i < data.GetHeight(); i++)
{
for (uint32_t j = 0; j < data.GetWidth(); j++)
{
auto pixel = data.GetPixel(j, i).ToRGB();
imageData[location].red = pixel.red;
imageData[location].green = pixel.green;
imageData[location].blue = pixel.blue;
location++;
}
}
// Copy to GRAM
return CopyToGRAM(static_cast<uint32_t>(size * 3), imageData.get());
}
void* CopyToGRAM(const Kernel& data) noexcept
{
auto size = data.GetWidth() * data.GetHeight();
std::unique_ptr<float[]> kernelData = std::unique_ptr<float[]>(new float[size]);
uint32_t location = 0;
for (uint16_t i = 0; i < data.GetHeight(); i++)
{
for (uint16_t j = 0; j < data.GetWidth(); j++)
{
kernelData[location++] = data.Get(j, i);
}
}
// Copy to GRAM
return CopyToGRAM(static_cast<uint32_t>(size * sizeof(float)), kernelData.get());
}
std::shared_ptr<Image> Filter::ApplyFilter()
{
bool error = false;
std::shared_ptr<Image> result = std::shared_ptr<Image>(new Image(this->image->GetWidth(), this->image->GetHeight()));
// Copy data to GPU memory
void* cudaKernel = CopyToGRAM(this->kernel);
void* cudaImage = CopyToGRAM(*this->image.get());
void* cudaResult = nullptr;
cudaMalloc(&cudaResult, this->image->GetHeight() * this->image->GetWidth() * 3);
cudaDeviceSynchronize();
if ((cudaKernel != nullptr) && (cudaImage != nullptr) && (cudaResult!=nullptr))
{
ApplyFilterCuda<<<CUDA_BLOCK_COUNT, CUDA_THREAD_COUNT >>>
(cudaKernel, this->kernel.GetWidth(), this->kernel.GetHeight(),
cudaImage, this->image->GetWidth(), this->image->GetHeight(),
cudaResult);
cudaDeviceSynchronize();
// Copy result image back from gram
auto imageSize = this->image->GetWidth() * this->image->GetHeight();
std::unique_ptr<RGBPixel[]> imageData =
std::unique_ptr<RGBPixel[]>(new RGBPixel[imageSize]);
cudaMemcpy(imageData.get(), cudaResult, imageSize * 3, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Postprocess data
uint64_t loc = 0;
for (uint32_t i = 0; i < this->image->GetHeight(); i++)
{
for (uint32_t j = 0; j < this->image->GetWidth(); j++)
{
// Set Pixel
Pixel pixel = { imageData[loc++] };
result->SetPixel(j, i, pixel);
}
}
}
else
{
error = true;
}
// Cleanup
if (cudaKernel != nullptr)
{
cudaFree(cudaKernel);
}
if (cudaImage != nullptr)
{
cudaFree(cudaImage);
}
if (cudaResult != nullptr)
{
cudaFree(cudaResult);
}
if (error)
{
throw RuntimeException("Error: Unable to allocate data on GPU!");
}
return result;
}
#endif /* USE_CUDA */ |
29e497762cbb8f77e08bded0d0f06a784eb29f02.hip | // !!! This is a file automatically generated by hipify!!!
#include "../radial_user.h"
//#include <fc2d_cudaclaw5.h>
#include <fclaw_base.h> /* Needed for SC_MIN, SC_MAX */
//#include <cassert>
typedef double real;
static __device__ real claw_zero = 0.0;
static __device__ real rho = 1.0;
static __device__ real bulk = 4.0;
__device__ void radial_rpn2acoustics(int idir, int meqn, int mwaves,
int maux, double ql[], double qr[],
double auxl[], double auxr[],
double wave[], double s[],
double amdq[], double apdq[])
{
/* wave[mwaves][meqn] */
/* idir in 0,1 : needed to get correct */
// TODO: this should be replaced with acoustics riemann solver
//FCLAW_ASSERT(mwaves == 2);
//FCLAW_ASSERT(meqn == 3);
// TODO: pass in bulk and rho
real c = sqrt(bulk/rho);
real z = c*rho;
// if we use template, we don't have to have this branching here
if (0 == idir) // x-direction
{
s[0] = -c;
s[1] = c;
real alpha1 = ( ql[0] - qr[0] + z*(qr[1] - ql[1])) / (2*z);
real alpha2 = ( qr[0] - ql[0] + z*(qr[1] - ql[1])) / (2*z);
// TODO: might want to replace double[] wave in argument list with
// double* wave
wave[0] = -alpha1*z;
wave[1] = alpha1;
wave[2] = claw_zero;
wave[3] = alpha2*z;
wave[4] = alpha2;
wave[5] = claw_zero;
}
else if (1 == idir) // y-direction
{
s[0] = -c;
s[1] = c;
real alpha1 = ( ql[0] - qr[0] + z*(qr[2] - ql[2])) / (2*z);
real alpha2 = ( qr[0] - ql[0] + z*(qr[2] - ql[2])) / (2*z);
wave[0] = -alpha1*z;
wave[1] = claw_zero;
wave[2] = alpha1;
wave[3] = alpha2*z;
wave[4] = claw_zero;
wave[5] = alpha2;
}
else printf("Invalid value for idir in riemann solver\n");
for (int mq = 0; mq < meqn; mq++)
{
amdq[mq] = s[0]*wave[mq];
apdq[mq] = s[1]*wave[meqn+mq];
}
}
__device__ cudaclaw5_cuda_rpn2_t radial_rpn2 = radial_rpn2acoustics;
void radial_assign_rpn2(cudaclaw5_cuda_rpn2_t *rpn2)
{
hipError_t ce = hipMemcpyFromSymbol(rpn2, radial_rpn2, sizeof(cudaclaw5_cuda_rpn2_t));
if(ce != hipSuccess)
{
fclaw_global_essentialf("ERROR (radial_rpn2adv): %s\n",hipGetErrorString(ce));
exit(0);
}
}
| 29e497762cbb8f77e08bded0d0f06a784eb29f02.cu | #include "../radial_user.h"
//#include <fc2d_cudaclaw5.h>
#include <fclaw_base.h> /* Needed for SC_MIN, SC_MAX */
//#include <cassert>
typedef double real;
static __device__ real claw_zero = 0.0;
static __device__ real rho = 1.0;
static __device__ real bulk = 4.0;
__device__ void radial_rpn2acoustics(int idir, int meqn, int mwaves,
int maux, double ql[], double qr[],
double auxl[], double auxr[],
double wave[], double s[],
double amdq[], double apdq[])
{
/* wave[mwaves][meqn] */
/* idir in 0,1 : needed to get correct */
// TODO: this should be replaced with acoustics riemann solver
//FCLAW_ASSERT(mwaves == 2);
//FCLAW_ASSERT(meqn == 3);
// TODO: pass in bulk and rho
real c = sqrt(bulk/rho);
real z = c*rho;
// if we use template, we don't have to have this branching here
if (0 == idir) // x-direction
{
s[0] = -c;
s[1] = c;
real alpha1 = ( ql[0] - qr[0] + z*(qr[1] - ql[1])) / (2*z);
real alpha2 = ( qr[0] - ql[0] + z*(qr[1] - ql[1])) / (2*z);
// TODO: might want to replace double[] wave in argument list with
// double* wave
wave[0] = -alpha1*z;
wave[1] = alpha1;
wave[2] = claw_zero;
wave[3] = alpha2*z;
wave[4] = alpha2;
wave[5] = claw_zero;
}
else if (1 == idir) // y-direction
{
s[0] = -c;
s[1] = c;
real alpha1 = ( ql[0] - qr[0] + z*(qr[2] - ql[2])) / (2*z);
real alpha2 = ( qr[0] - ql[0] + z*(qr[2] - ql[2])) / (2*z);
wave[0] = -alpha1*z;
wave[1] = claw_zero;
wave[2] = alpha1;
wave[3] = alpha2*z;
wave[4] = claw_zero;
wave[5] = alpha2;
}
else printf("Invalid value for idir in riemann solver\n");
for (int mq = 0; mq < meqn; mq++)
{
amdq[mq] = s[0]*wave[mq];
apdq[mq] = s[1]*wave[meqn+mq];
}
}
__device__ cudaclaw5_cuda_rpn2_t radial_rpn2 = radial_rpn2acoustics;
void radial_assign_rpn2(cudaclaw5_cuda_rpn2_t *rpn2)
{
cudaError_t ce = cudaMemcpyFromSymbol(rpn2, radial_rpn2, sizeof(cudaclaw5_cuda_rpn2_t));
if(ce != cudaSuccess)
{
fclaw_global_essentialf("ERROR (radial_rpn2adv): %s\n",cudaGetErrorString(ce));
exit(0);
}
}
|
DistributionExponentialKernel.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::exponential_kernel(iter, lambda, generator);
}
REGISTER_DISPATCH(exponential_stub, &exponential_kernel);
}} // namespace at::native
| DistributionExponentialKernel.cu | #include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
#include <utility>
#include <functional>
#include <ATen/native/Distributions.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <cstdint>
#include <limits>
#include <utility>
#include <type_traits>
namespace at { namespace native {
void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
at::native::templates::cuda::exponential_kernel(iter, lambda, generator);
}
REGISTER_DISPATCH(exponential_stub, &exponential_kernel);
}} // namespace at::native
|
4bd652d474cb575024e8b7b35027842bd5bd345b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GpuFunctions.h"
#include "BcMacros.h"
#include "GpuConstants.h"
__global__ void gpu_update_new(int *fluid_d, FLOAT_TYPE* rho_d, FLOAT_TYPE* u_d, FLOAT_TYPE* v_d,
int *bcMask_d, FLOAT_TYPE* drag_d, FLOAT_TYPE* lift_d,
FLOAT_TYPE* coordX_d, FLOAT_TYPE* coordY_d, FLOAT_TYPE* f_d)
{
int k;
int bidx=blockIdx.x;
int tidx=threadIdx.x;
int ind = tidx + bidx*blockDim.x;
int ms = width_d*height_d;
if (ind<(width_d*height_d))
{
if (fluid_d[ind]==1)
{
// Update macroscopic
rho_d[ind]=0;
u_d[ind]=0;
v_d[ind]=0;
for (k=0; k<9; k++)
{
rho_d[ind] = rho_d[ind] + f_d[ind+k*ms];
u_d[ind] = u_d[ind] + f_d[ind+k*ms]*cx_d[k];
v_d[ind]= v_d[ind] + f_d[ind+k*ms]*cy_d[k];
}
u_d[ind] = u_d[ind] / rho_d[ind];
v_d[ind] = v_d[ind] / rho_d[ind];
if ((bcMask_d[ind] & BC_OUTL_E) == BC_OUTL_E) // for outlet on the right
{
///@todo code: probably should handle outlet on other sides
v_d[ind]=0.0;
}
// DRAG/LIFT FORCE
if (dlBoundaryId_d != 0 && (bcMask_d[ind] & BND_ID_ALL) == BOUND_ID(dlBoundaryId_d))
{
drag_d[ind] = 0.33333333*rho_d[ind]*(20-coordX_d[ind])*0.2;
lift_d[ind] = 0.33333333*rho_d[ind]*(20-coordY_d[ind])*0.2;
}
}
}
}
__global__ void gpuUpdateMacro(int *fluid_d, FLOAT_TYPE* rho_d, FLOAT_TYPE* u_d, FLOAT_TYPE* v_d,
int *bcMask_d, FLOAT_TYPE* drag_d, FLOAT_TYPE* lift_d,
FLOAT_TYPE* coordX_d, FLOAT_TYPE* coordY_d, FLOAT_TYPE* f_d)
{
int ind = threadIdx.x + blockIdx.x*blockDim.x;
int ms = width_d*height_d;
FLOAT_TYPE r,u,v;
if (ind < ms)
{
if (fluid_d[ind]==1)
{
r = u = v = 0.0;
r = f_d[ind ] + f_d[ind+ ms] + f_d[ind+2*ms] + f_d[ind+3*ms] + f_d[ind+4*ms] + f_d[ind+5*ms] +
f_d[ind+6*ms] + f_d[ind+7*ms] + f_d[ind+8*ms];
u = f_d[ind+ ms] - f_d[ind+3*ms] + f_d[ind+5*ms] - f_d[ind+6*ms] - f_d[ind+7*ms] + f_d[ind+8*ms];
v = f_d[ind+2*ms] - f_d[ind+4*ms] + f_d[ind+5*ms] + f_d[ind+6*ms] - f_d[ind+7*ms] - f_d[ind+8*ms];
rho_d[ind] = r;
u_d[ind] = u / r;
///@todo code: probably should handle outlet on other sides
v_d[ind] = ((bcMask_d[ind] & BC_OUTL_E) == BC_OUTL_E) ? 0.0 : v / r;
// DRAG/LIFT FORCE
if (dlBoundaryId_d != 0 && (bcMask_d[ind] & BND_ID_ALL) == BOUND_ID(dlBoundaryId_d))
{
// printf("draglift: %d\n",ind);
drag_d[ind] = 0.33333333*r*(20-coordX_d[ind])*0.2;
lift_d[ind] = 0.33333333*r*(20-coordY_d[ind])*0.2;
}
}
}
}
__global__ void gpu_update_macro(int* fluid_d, FLOAT_TYPE* rho_d, FLOAT_TYPE* u_d, FLOAT_TYPE* v_d,
int* bcId_d, int* boundaryId_d, FLOAT_TYPE* drag_d,
FLOAT_TYPE* lift_d, FLOAT_TYPE* coordX_d, FLOAT_TYPE* coordY_d, FLOAT_TYPE* f_d)
{
int k;
int bidx=blockIdx.x;
int tidx=threadIdx.x;
int ind = tidx + bidx*blockDim.x;
int ms = width_d*height_d;
if (ind<(width_d*height_d))
{
if (fluid_d[ind]==1)
{
// Update macroscopic
rho_d[ind]=0;
u_d[ind]=0;
v_d[ind]=0;
for (k=0; k<9; k++)
{
rho_d[ind] = rho_d[ind] + f_d[ind+k*ms];
u_d[ind] = u_d[ind] + f_d[ind+k*ms]*cx_d[k];
v_d[ind]= v_d[ind] + f_d[ind+k*ms]*cy_d[k];
}
u_d[ind] = u_d[ind] / rho_d[ind];
v_d[ind] = v_d[ind] / rho_d[ind];
if (bcId_d[ind+ms]==3) // for outlet on the right
{
///@todo code: probably should handle outlet on other sides
v_d[ind]=0.0;
}
// DRAG/LIFT FORCE
if (dlBoundaryId_d != 0 && boundaryId_d[ind]==dlBoundaryId_d)
{
drag_d[ind] = 0.33333333*rho_d[ind]*(20-coordX_d[ind])*0.2;
lift_d[ind] = 0.33333333*rho_d[ind]*(20-coordY_d[ind])*0.2;
}
}
}
} | 4bd652d474cb575024e8b7b35027842bd5bd345b.cu | #include "GpuFunctions.h"
#include "BcMacros.h"
#include "GpuConstants.h"
__global__ void gpu_update_new(int *fluid_d, FLOAT_TYPE* rho_d, FLOAT_TYPE* u_d, FLOAT_TYPE* v_d,
int *bcMask_d, FLOAT_TYPE* drag_d, FLOAT_TYPE* lift_d,
FLOAT_TYPE* coordX_d, FLOAT_TYPE* coordY_d, FLOAT_TYPE* f_d)
{
int k;
int bidx=blockIdx.x;
int tidx=threadIdx.x;
int ind = tidx + bidx*blockDim.x;
int ms = width_d*height_d;
if (ind<(width_d*height_d))
{
if (fluid_d[ind]==1)
{
// Update macroscopic
rho_d[ind]=0;
u_d[ind]=0;
v_d[ind]=0;
for (k=0; k<9; k++)
{
rho_d[ind] = rho_d[ind] + f_d[ind+k*ms];
u_d[ind] = u_d[ind] + f_d[ind+k*ms]*cx_d[k];
v_d[ind]= v_d[ind] + f_d[ind+k*ms]*cy_d[k];
}
u_d[ind] = u_d[ind] / rho_d[ind];
v_d[ind] = v_d[ind] / rho_d[ind];
if ((bcMask_d[ind] & BC_OUTL_E) == BC_OUTL_E) // for outlet on the right
{
///@todo code: probably should handle outlet on other sides
v_d[ind]=0.0;
}
// DRAG/LIFT FORCE
if (dlBoundaryId_d != 0 && (bcMask_d[ind] & BND_ID_ALL) == BOUND_ID(dlBoundaryId_d))
{
drag_d[ind] = 0.33333333*rho_d[ind]*(20-coordX_d[ind])*0.2;
lift_d[ind] = 0.33333333*rho_d[ind]*(20-coordY_d[ind])*0.2;
}
}
}
}
__global__ void gpuUpdateMacro(int *fluid_d, FLOAT_TYPE* rho_d, FLOAT_TYPE* u_d, FLOAT_TYPE* v_d,
int *bcMask_d, FLOAT_TYPE* drag_d, FLOAT_TYPE* lift_d,
FLOAT_TYPE* coordX_d, FLOAT_TYPE* coordY_d, FLOAT_TYPE* f_d)
{
int ind = threadIdx.x + blockIdx.x*blockDim.x;
int ms = width_d*height_d;
FLOAT_TYPE r,u,v;
if (ind < ms)
{
if (fluid_d[ind]==1)
{
r = u = v = 0.0;
r = f_d[ind ] + f_d[ind+ ms] + f_d[ind+2*ms] + f_d[ind+3*ms] + f_d[ind+4*ms] + f_d[ind+5*ms] +
f_d[ind+6*ms] + f_d[ind+7*ms] + f_d[ind+8*ms];
u = f_d[ind+ ms] - f_d[ind+3*ms] + f_d[ind+5*ms] - f_d[ind+6*ms] - f_d[ind+7*ms] + f_d[ind+8*ms];
v = f_d[ind+2*ms] - f_d[ind+4*ms] + f_d[ind+5*ms] + f_d[ind+6*ms] - f_d[ind+7*ms] - f_d[ind+8*ms];
rho_d[ind] = r;
u_d[ind] = u / r;
///@todo code: probably should handle outlet on other sides
v_d[ind] = ((bcMask_d[ind] & BC_OUTL_E) == BC_OUTL_E) ? 0.0 : v / r;
// DRAG/LIFT FORCE
if (dlBoundaryId_d != 0 && (bcMask_d[ind] & BND_ID_ALL) == BOUND_ID(dlBoundaryId_d))
{
// printf("draglift: %d\n",ind);
drag_d[ind] = 0.33333333*r*(20-coordX_d[ind])*0.2;
lift_d[ind] = 0.33333333*r*(20-coordY_d[ind])*0.2;
}
}
}
}
__global__ void gpu_update_macro(int* fluid_d, FLOAT_TYPE* rho_d, FLOAT_TYPE* u_d, FLOAT_TYPE* v_d,
int* bcId_d, int* boundaryId_d, FLOAT_TYPE* drag_d,
FLOAT_TYPE* lift_d, FLOAT_TYPE* coordX_d, FLOAT_TYPE* coordY_d, FLOAT_TYPE* f_d)
{
int k;
int bidx=blockIdx.x;
int tidx=threadIdx.x;
int ind = tidx + bidx*blockDim.x;
int ms = width_d*height_d;
if (ind<(width_d*height_d))
{
if (fluid_d[ind]==1)
{
// Update macroscopic
rho_d[ind]=0;
u_d[ind]=0;
v_d[ind]=0;
for (k=0; k<9; k++)
{
rho_d[ind] = rho_d[ind] + f_d[ind+k*ms];
u_d[ind] = u_d[ind] + f_d[ind+k*ms]*cx_d[k];
v_d[ind]= v_d[ind] + f_d[ind+k*ms]*cy_d[k];
}
u_d[ind] = u_d[ind] / rho_d[ind];
v_d[ind] = v_d[ind] / rho_d[ind];
if (bcId_d[ind+ms]==3) // for outlet on the right
{
///@todo code: probably should handle outlet on other sides
v_d[ind]=0.0;
}
// DRAG/LIFT FORCE
if (dlBoundaryId_d != 0 && boundaryId_d[ind]==dlBoundaryId_d)
{
drag_d[ind] = 0.33333333*rho_d[ind]*(20-coordX_d[ind])*0.2;
lift_d[ind] = 0.33333333*rho_d[ind]*(20-coordY_d[ind])*0.2;
}
}
}
} |
bb86ae4ebae794d8b366f0ef197b036094854bd8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "solveFull.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
double *inv1 = NULL;
hipMalloc(&inv1, XSIZE*YSIZE);
double *inv2 = NULL;
hipMalloc(&inv2, XSIZE*YSIZE);
const int nx = 1;
const int nBatch = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
solveFull), dim3(gridBlock),dim3(threadBlock), 0, 0, data,inv1,inv2,nx,nBatch);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
solveFull), dim3(gridBlock),dim3(threadBlock), 0, 0, data,inv1,inv2,nx,nBatch);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
solveFull), dim3(gridBlock),dim3(threadBlock), 0, 0, data,inv1,inv2,nx,nBatch);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bb86ae4ebae794d8b366f0ef197b036094854bd8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "solveFull.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
double *inv1 = NULL;
cudaMalloc(&inv1, XSIZE*YSIZE);
double *inv2 = NULL;
cudaMalloc(&inv2, XSIZE*YSIZE);
const int nx = 1;
const int nBatch = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
solveFull<<<gridBlock,threadBlock>>>(data,inv1,inv2,nx,nBatch);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
solveFull<<<gridBlock,threadBlock>>>(data,inv1,inv2,nx,nBatch);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
solveFull<<<gridBlock,threadBlock>>>(data,inv1,inv2,nx,nBatch);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b88bfc4d59f8cc2116dd52d4c76de2ff9e4eb434.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <cassert>
#include "support.h"
#include "kernel.hip"
using namespace std;
int main(int argc, char* argv[])
{
Timer timer;
CheckersBoard nextBoard;
CheckersBoard startBoard;//(0x00002DFF,0xFF930000,0);
if(!startBoard.getP1Up())
startBoard.swapPlayers();
startBoard.dump(cout);
if(startBoard.getP1Up())
startBoard.swapPlayers();
cout<<endl<<endl;
startTime(&timer);
clock_t startClock = clock();
for(int i = 0; i < 100 && startBoard.p1NumPieces()>0; i++)
{
nextBoard = getOptimalNextBoard(startBoard);
startBoard = nextBoard;
cout<<i<<" Player 1 up = "<<nextBoard.getP1Up();
if(!nextBoard.getP1Up())
nextBoard.swapPlayers();
cout<<" Player 1 pieces "<<nextBoard.p1NumPieces()<<" vs "<<nextBoard.p2NumPieces()<<endl;
nextBoard.dump(cout);
cout<<endl<<endl;
}
clock_t stopClock = clock();
stopTime(&timer); printf("Total Run time = %f s vs %f\n", elapsedTime(timer),(float)(stopClock-startClock)/CLOCKS_PER_SEC);
return 0;
}
| b88bfc4d59f8cc2116dd52d4c76de2ff9e4eb434.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include <cassert>
#include "support.h"
#include "kernel.cu"
using namespace std;
int main(int argc, char* argv[])
{
Timer timer;
CheckersBoard nextBoard;
CheckersBoard startBoard;//(0x00002DFF,0xFF930000,0);
if(!startBoard.getP1Up())
startBoard.swapPlayers();
startBoard.dump(cout);
if(startBoard.getP1Up())
startBoard.swapPlayers();
cout<<endl<<endl;
startTime(&timer);
clock_t startClock = clock();
for(int i = 0; i < 100 && startBoard.p1NumPieces()>0; i++)
{
nextBoard = getOptimalNextBoard(startBoard);
startBoard = nextBoard;
cout<<i<<" Player 1 up = "<<nextBoard.getP1Up();
if(!nextBoard.getP1Up())
nextBoard.swapPlayers();
cout<<" Player 1 pieces "<<nextBoard.p1NumPieces()<<" vs "<<nextBoard.p2NumPieces()<<endl;
nextBoard.dump(cout);
cout<<endl<<endl;
}
clock_t stopClock = clock();
stopTime(&timer); printf("Total Run time = %f s vs %f\n", elapsedTime(timer),(float)(stopClock-startClock)/CLOCKS_PER_SEC);
return 0;
}
|
434f10cd0e9300c8369b9428e9132b8611116b30.hip | // !!! This is a file automatically generated by hipify!!!
/*
We use a term *tile* to identify the rectangular submatrices of the image.
Not to be confused with the blocks of threads.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#define DSM_MAX_TILES_PER_BLOCK 500
#define DSM_MAX_TILES_PER_THREAD 500
// threads per block
#define TPB_1D 16
#define TPB (TPB_1D * TPB_1D)
// satellite pixels per thread
#define SAT_PPT_1D 2
#define SAT_PPT (SAT_PPT_1D * SAT_PPT_1D)
// satellite pixels per block
#define SAT_PPB_1D (SAT_PPT_1D * TPB_1D)
#define SAT_PPB (SAT_PPB_1D * SAT_PPB_1D)
// DSM pixels per thread
#define DSM_PPT_1D 1
#define DSM_PPT (DSM_PPT_1D * DSM_PPT_1D)
// DSM pixels per block
#define DSM_PPB_1D (DSM_PPT_1D * TPB_1D)
// #define DSM_PPB (DSM_PPB_1D * DSM_PPB_1D)
// this needs to be large negative number
#define DSM_IGNORE_VALUE -1E5
// extern const float DSM_IGNORE_VALUE;
#define EPS 1E-3
#define DTYPE float
__device__ bool d_rectanglesIntersect(DTYPE* bbox1, DTYPE* bbox2) {
if (bbox2[0] > bbox1[2] ||
bbox2[1] > bbox1[3] ||
bbox1[0] > bbox2[2] ||
bbox1[1] > bbox2[3]) { return false; }
else { return true; }
}
__device__ DTYPE d_area(DTYPE x1, DTYPE y1,
DTYPE x2, DTYPE y2,
DTYPE x3, DTYPE y3) {
return abs(x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2;
}
__device__ DTYPE d_interpolate_three(DTYPE x, DTYPE y,
DTYPE x1, DTYPE y1, DTYPE v1,
DTYPE x2, DTYPE y2, DTYPE v2,
DTYPE x3, DTYPE y3, DTYPE v3) {
DTYPE denom = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3);
DTYPE w1 = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / denom;
DTYPE w2 = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / denom;
DTYPE w3 = 1. - w1 - w2;
return (w1 * v1 + w2 * v2 + w3 * v3);
}
__device__ bool d_inside_triangle(DTYPE x, DTYPE y,
DTYPE x1, DTYPE y1,
DTYPE x2, DTYPE y2,
DTYPE x3, DTYPE y3) {
DTYPE A = d_area(x1, y1, x2, y2, x3, y3);
DTYPE A1 = d_area(x, y, x1, y1, x2, y2);
DTYPE A2 = d_area(x, y, x3, y3, x1, y1);
DTYPE A3 = d_area(x, y, x2, y2, x3, y3);
return (abs(A1 + A2 + A3 - A) < EPS);
}
__global__ void kernelComputePointsNum(DTYPE* pX, DTYPE* pY, DTYPE* pZ,
int* dsmPixelCounts,
int nfaces, int dsm_width,
int sat_width, int sat_height) {
int iface = blockIdx.x * blockDim.x + threadIdx.x;
if (iface < nfaces) {
int faces_per_row = 2 * (dsm_width - 1);
int irow = iface / faces_per_row;
int icol = (iface % faces_per_row) / 2;
int idx = irow * dsm_width + icol;
int idx1, idx2, idx3;
if (iface % 2 == 0) {
// **
// *
idx1 = idx;
idx2 = idx + 1;
idx3 = idx + dsm_width;
} else {
// *
// **
idx1 = idx + 1;
idx2 = idx + dsm_width;
idx3 = idx + dsm_width + 1;
}
if (pZ[idx1] < DSM_IGNORE_VALUE + 1 ||
pZ[idx2] < DSM_IGNORE_VALUE + 1 ||
pZ[idx3] < DSM_IGNORE_VALUE + 1) { return; }
float x1, y1, x2, y2, x3, y3;
x1 = pX[idx1];
y1 = pY[idx1];
x2 = pX[idx2];
y2 = pY[idx2];
x3 = pX[idx3];
y3 = pY[idx3];
int ymin = static_cast<int>( ceilf(fminf(fminf(y1, y2), y3)) );
int xmin = static_cast<int>( ceilf(fminf(fminf(x1, x2), x3)) );
int ymax = static_cast<int>( floorf(fmaxf(fmaxf(y1, y2), y3)) );
int xmax = static_cast<int>( floorf(fmaxf(fmaxf(x1, x2), x3)) );
ymin = fmaxf(0, ymin);
xmin = fmaxf(0, xmin);
ymax = fminf(sat_height - 1, ymax);
xmax = fminf(sat_width - 1, xmax);
//if ((xmax - xmin) * (ymax - ymin) > 100) {
// dsmPixelCounts[iface] = -1;
//} else {
{
for (int x = xmin; x <= xmax; ++x) {
for (int y = ymin; y <= ymax; ++y) {
if (d_inside_triangle((float) x - x1, (float) y - y1,
0, 0, x2-x1, y2-y1, x3-x1, y3-y1)) {
dsmPixelCounts[iface] += 1;
}
}
}
}
}
}
__global__ void kernelGetPoints(DTYPE* pX, DTYPE* pY, DTYPE* pZ,
int* dsmPixelCounts,
int* faceIDs, int* pixelIDs,
int nfaces, int dsm_width,
int sat_width, int sat_height) {
int iface = blockIdx.x * blockDim.x + threadIdx.x;
if (iface < nfaces) {
int curIdx = dsmPixelCounts[iface];
int faces_per_row = 2 * (dsm_width - 1);
int irow = iface / faces_per_row;
int icol = (iface % faces_per_row) / 2;
int idx = irow * dsm_width + icol;
int idx1, idx2, idx3;
if (iface % 2 == 0) {
// **
// *
idx1 = idx;
idx2 = idx + 1;
idx3 = idx + dsm_width;
} else {
// *
// **
idx1 = idx + 1;
idx2 = idx + dsm_width;
idx3 = idx + dsm_width + 1;
}
if (pZ[idx1] < DSM_IGNORE_VALUE + 1 ||
pZ[idx2] < DSM_IGNORE_VALUE + 1 ||
pZ[idx3] < DSM_IGNORE_VALUE + 1) { return; }
float x1, y1, x2, y2, x3, y3;
x1 = pX[idx1];
y1 = pY[idx1];
x2 = pX[idx2];
y2 = pY[idx2];
x3 = pX[idx3];
y3 = pY[idx3];
int ymin = static_cast<int>( ceilf(fminf(fminf(y1, y2), y3)) );
int xmin = static_cast<int>( ceilf(fminf(fminf(x1, x2), x3)) );
int ymax = static_cast<int>( floorf(fmaxf(fmaxf(y1, y2), y3)) );
int xmax = static_cast<int>( floorf(fmaxf(fmaxf(x1, x2), x3)) );
ymin = fmaxf(0, ymin);
xmin = fmaxf(0, xmin);
ymax = fminf(sat_height - 1, ymax);
xmax = fminf(sat_width - 1, xmax);
//if ((xmax - xmin) * (ymax - ymin) > 100) {
// dsmPixelCounts[iface] = -1;
//} else {
{
for (int x = xmin; x <= xmax; ++x) {
for (int y = ymin; y <= ymax; ++y) {
if (d_inside_triangle((float) x - x1, (float) y - y1,
0, 0, x2-x1, y2-y1, x3-x1, y3-y1)) {
faceIDs[curIdx] = iface;
pixelIDs[curIdx] = y * sat_width + x;
curIdx++;
}
}
}
}
}
}
__global__ void kernelFindLimits(int* ids, int* limits, int num) {
int iel = blockIdx.x * blockDim.x + threadIdx.x;
if (iel < num) {
int pixelID = ids[iel];
if (iel == 0 || ids[iel - 1] != pixelID) {
limits[pixelID * 2 + 0] = iel;
}
if (iel == num - 1 || ids[iel + 1] != pixelID) {
limits[pixelID * 2 + 1] = iel + 1;
}
}
}
__global__ void kernelDraw(int* faceIDs, int* pixelIDsLimits,
float* pX, float* pY, float* pZ,
float* pOut,
int sat_npixels, int dsm_width, int sat_width) {
int ipixel = blockIdx.x * blockDim.x + threadIdx.x;
if (ipixel < sat_npixels) {
int faces_per_row = 2 * (dsm_width - 1);
for (int i = pixelIDsLimits[2 * ipixel + 0];
i < pixelIDsLimits[2 * ipixel + 1]; ++i) {
int iface = faceIDs[i];
int irow = iface / faces_per_row;
int icol = (iface % faces_per_row) / 2;
int idx = irow * dsm_width + icol;
int idx1, idx2, idx3;
if (iface % 2 == 0) {
// **
// *
idx1 = idx;
idx2 = idx + 1;
idx3 = idx + dsm_width;
} else {
// *
// **
idx1 = idx + 1;
idx2 = idx + dsm_width;
idx3 = idx + dsm_width + 1;
}
float x1, y1, elev1, x2, y2, elev2, x3, y3, elev3;
x1 = pX[idx1];
y1 = pY[idx1];
elev1 = pZ[idx1];
x2 = pX[idx2];
y2 = pY[idx2];
elev2 = pZ[idx2];
x3 = pX[idx3];
y3 = pY[idx3];
elev3 = pZ[idx3];
float x = static_cast<float>(ipixel % sat_width);
float y = static_cast<float>(ipixel / sat_width);
float elev = d_interpolate_three(x, y,
x1, y1, elev1,
x2, y2, elev2,
x3, y3, elev3);
if (elev > pOut[ipixel]) {
pOut[ipixel] = elev;
}
}
}
}
void cudaRenderSatElevation(DTYPE * pX, DTYPE* pY, DTYPE* pZ, DTYPE* pOut,
int dsm_width, int dsm_height, int sat_width, int sat_height) {
int dsm_npixels = dsm_width * dsm_height;
int sat_npixels = sat_width * sat_height;
DTYPE* d_pX;
DTYPE* d_pY;
DTYPE* d_pZ;
DTYPE* d_pOut;
hipMalloc((void **)&d_pX, sizeof(DTYPE) * dsm_npixels);
hipMalloc((void **)&d_pY, sizeof(DTYPE) * dsm_npixels);
hipMalloc((void **)&d_pZ, sizeof(DTYPE) * dsm_npixels);
hipMalloc((void **)&d_pOut, sizeof(DTYPE) * sat_npixels);
hipMemcpy(d_pX, pX, sizeof(DTYPE) * dsm_npixels, hipMemcpyHostToDevice);
hipMemcpy(d_pY, pY, sizeof(DTYPE) * dsm_npixels, hipMemcpyHostToDevice);
hipMemcpy(d_pZ, pZ, sizeof(DTYPE) * dsm_npixels, hipMemcpyHostToDevice);
// output memory on host contains all min values
hipMemcpy(d_pOut, pOut, sizeof(DTYPE) * sat_npixels, hipMemcpyHostToDevice);
int nfaces = 2 * (dsm_height - 1) * (dsm_width - 1);
int nblocks = (nfaces + TPB - 1) / TPB;
// compute # of pixels that each face cover
// TODO: change to int
int* dsmPixelCounts;
hipMalloc((void **)&dsmPixelCounts, sizeof(int) * nfaces);
hipMemset(dsmPixelCounts, 0, sizeof(int) * nfaces);
hipLaunchKernelGGL(( kernelComputePointsNum), dim3(nblocks), dim3(TPB), 0, 0, d_pX, d_pY, d_pZ,
dsmPixelCounts, nfaces,
dsm_width, sat_width, sat_height);
// hipDeviceSynchronize();
hipDeviceSynchronize();
if ( hipSuccess != hipGetLastError() )
printf( "Error in CUDA kernel attempting to compute number of points "
"for each thread!\n" );
int numPixelsLast;
hipMemcpy(&numPixelsLast, dsmPixelCounts + nfaces - 1, sizeof(int),
hipMemcpyDeviceToHost);
// exclusive scan to get start index for each face
thrust::exclusive_scan(thrust::device, dsmPixelCounts,
dsmPixelCounts + nfaces, dsmPixelCounts);
//
int numPixelsTotal;
hipMemcpy(&numPixelsTotal, dsmPixelCounts + nfaces - 1, sizeof(int),
hipMemcpyDeviceToHost);
numPixelsTotal += numPixelsLast;
printf("================= %d\n", numPixelsTotal);
int* faceIDs;
int* pixelIDs;
hipMalloc((void **)&faceIDs, sizeof(int) * numPixelsTotal);
hipMalloc((void **)&pixelIDs, sizeof(int) * numPixelsTotal);
hipLaunchKernelGGL(( kernelGetPoints), dim3(nblocks), dim3(TPB), 0, 0, d_pX, d_pY, d_pZ,
dsmPixelCounts,
faceIDs, pixelIDs,
nfaces,
dsm_width, sat_width, sat_height);
hipDeviceSynchronize();
if ( hipSuccess != hipGetLastError() )
printf( "Error in CUDA kernel attempting to "
"get points ids for each face!\n" );
// sort by key
thrust::sort_by_key(thrust::device, pixelIDs, pixelIDs + numPixelsTotal,
faceIDs);
hipDeviceSynchronize();
if ( hipSuccess != hipGetLastError() )
printf( "Error in CUDA kernel attempting to "
"sort!\n" );
// find start and end points for each pixel
int* pixelIDsLimits;
hipMalloc((void **)&pixelIDsLimits, 2 * sizeof(int) * sat_npixels);
hipMemset(pixelIDsLimits, 0, 2 * sizeof(int) * sat_npixels);
nblocks = (numPixelsTotal + TPB - 1) / TPB;
hipLaunchKernelGGL(( kernelFindLimits), dim3(nblocks), dim3(TPB), 0, 0, pixelIDs, pixelIDsLimits, numPixelsTotal);
hipDeviceSynchronize();
if ( hipSuccess != hipGetLastError() )
printf( "Error in CUDA kernel attempting to "
"find start and end positions for each pixel!\n" );
//
nblocks = (sat_npixels + TPB - 1) / TPB;
hipLaunchKernelGGL(( kernelDraw), dim3(nblocks), dim3(TPB), 0, 0, faceIDs, pixelIDsLimits, d_pX, d_pY, d_pZ,
d_pOut,
sat_npixels, dsm_width, sat_width);
hipDeviceSynchronize();
if ( hipSuccess != hipGetLastError() )
printf( "Error in CUDA kernel attempting to "
"draw satellite elevation!\n" );
// hipMemcpy(pOut, dsmPixelCounts, sizeof(float) * min(sat_npixels, nfaces), hipMemcpyDeviceToHost);
hipMemcpy(pOut, d_pOut, sizeof(DTYPE) * sat_npixels,
hipMemcpyDeviceToHost);
}
| 434f10cd0e9300c8369b9428e9132b8611116b30.cu | /*
We use a term *tile* to identify the rectangular submatrices of the image.
Not to be confused with the blocks of threads.
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#define DSM_MAX_TILES_PER_BLOCK 500
#define DSM_MAX_TILES_PER_THREAD 500
// threads per block
#define TPB_1D 16
#define TPB (TPB_1D * TPB_1D)
// satellite pixels per thread
#define SAT_PPT_1D 2
#define SAT_PPT (SAT_PPT_1D * SAT_PPT_1D)
// satellite pixels per block
#define SAT_PPB_1D (SAT_PPT_1D * TPB_1D)
#define SAT_PPB (SAT_PPB_1D * SAT_PPB_1D)
// DSM pixels per thread
#define DSM_PPT_1D 1
#define DSM_PPT (DSM_PPT_1D * DSM_PPT_1D)
// DSM pixels per block
#define DSM_PPB_1D (DSM_PPT_1D * TPB_1D)
// #define DSM_PPB (DSM_PPB_1D * DSM_PPB_1D)
// this needs to be large negative number
#define DSM_IGNORE_VALUE -1E5
// extern const float DSM_IGNORE_VALUE;
#define EPS 1E-3
#define DTYPE float
__device__ bool d_rectanglesIntersect(DTYPE* bbox1, DTYPE* bbox2) {
if (bbox2[0] > bbox1[2] ||
bbox2[1] > bbox1[3] ||
bbox1[0] > bbox2[2] ||
bbox1[1] > bbox2[3]) { return false; }
else { return true; }
}
__device__ DTYPE d_area(DTYPE x1, DTYPE y1,
DTYPE x2, DTYPE y2,
DTYPE x3, DTYPE y3) {
return abs(x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2;
}
__device__ DTYPE d_interpolate_three(DTYPE x, DTYPE y,
DTYPE x1, DTYPE y1, DTYPE v1,
DTYPE x2, DTYPE y2, DTYPE v2,
DTYPE x3, DTYPE y3, DTYPE v3) {
DTYPE denom = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3);
DTYPE w1 = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / denom;
DTYPE w2 = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / denom;
DTYPE w3 = 1. - w1 - w2;
return (w1 * v1 + w2 * v2 + w3 * v3);
}
__device__ bool d_inside_triangle(DTYPE x, DTYPE y,
DTYPE x1, DTYPE y1,
DTYPE x2, DTYPE y2,
DTYPE x3, DTYPE y3) {
DTYPE A = d_area(x1, y1, x2, y2, x3, y3);
DTYPE A1 = d_area(x, y, x1, y1, x2, y2);
DTYPE A2 = d_area(x, y, x3, y3, x1, y1);
DTYPE A3 = d_area(x, y, x2, y2, x3, y3);
return (abs(A1 + A2 + A3 - A) < EPS);
}
__global__ void kernelComputePointsNum(DTYPE* pX, DTYPE* pY, DTYPE* pZ,
int* dsmPixelCounts,
int nfaces, int dsm_width,
int sat_width, int sat_height) {
int iface = blockIdx.x * blockDim.x + threadIdx.x;
if (iface < nfaces) {
int faces_per_row = 2 * (dsm_width - 1);
int irow = iface / faces_per_row;
int icol = (iface % faces_per_row) / 2;
int idx = irow * dsm_width + icol;
int idx1, idx2, idx3;
if (iface % 2 == 0) {
// **
// *
idx1 = idx;
idx2 = idx + 1;
idx3 = idx + dsm_width;
} else {
// *
// **
idx1 = idx + 1;
idx2 = idx + dsm_width;
idx3 = idx + dsm_width + 1;
}
if (pZ[idx1] < DSM_IGNORE_VALUE + 1 ||
pZ[idx2] < DSM_IGNORE_VALUE + 1 ||
pZ[idx3] < DSM_IGNORE_VALUE + 1) { return; }
float x1, y1, x2, y2, x3, y3;
x1 = pX[idx1];
y1 = pY[idx1];
x2 = pX[idx2];
y2 = pY[idx2];
x3 = pX[idx3];
y3 = pY[idx3];
int ymin = static_cast<int>( ceilf(fminf(fminf(y1, y2), y3)) );
int xmin = static_cast<int>( ceilf(fminf(fminf(x1, x2), x3)) );
int ymax = static_cast<int>( floorf(fmaxf(fmaxf(y1, y2), y3)) );
int xmax = static_cast<int>( floorf(fmaxf(fmaxf(x1, x2), x3)) );
ymin = fmaxf(0, ymin);
xmin = fmaxf(0, xmin);
ymax = fminf(sat_height - 1, ymax);
xmax = fminf(sat_width - 1, xmax);
//if ((xmax - xmin) * (ymax - ymin) > 100) {
// dsmPixelCounts[iface] = -1;
//} else {
{
for (int x = xmin; x <= xmax; ++x) {
for (int y = ymin; y <= ymax; ++y) {
if (d_inside_triangle((float) x - x1, (float) y - y1,
0, 0, x2-x1, y2-y1, x3-x1, y3-y1)) {
dsmPixelCounts[iface] += 1;
}
}
}
}
}
}
__global__ void kernelGetPoints(DTYPE* pX, DTYPE* pY, DTYPE* pZ,
int* dsmPixelCounts,
int* faceIDs, int* pixelIDs,
int nfaces, int dsm_width,
int sat_width, int sat_height) {
int iface = blockIdx.x * blockDim.x + threadIdx.x;
if (iface < nfaces) {
int curIdx = dsmPixelCounts[iface];
int faces_per_row = 2 * (dsm_width - 1);
int irow = iface / faces_per_row;
int icol = (iface % faces_per_row) / 2;
int idx = irow * dsm_width + icol;
int idx1, idx2, idx3;
if (iface % 2 == 0) {
// **
// *
idx1 = idx;
idx2 = idx + 1;
idx3 = idx + dsm_width;
} else {
// *
// **
idx1 = idx + 1;
idx2 = idx + dsm_width;
idx3 = idx + dsm_width + 1;
}
if (pZ[idx1] < DSM_IGNORE_VALUE + 1 ||
pZ[idx2] < DSM_IGNORE_VALUE + 1 ||
pZ[idx3] < DSM_IGNORE_VALUE + 1) { return; }
float x1, y1, x2, y2, x3, y3;
x1 = pX[idx1];
y1 = pY[idx1];
x2 = pX[idx2];
y2 = pY[idx2];
x3 = pX[idx3];
y3 = pY[idx3];
int ymin = static_cast<int>( ceilf(fminf(fminf(y1, y2), y3)) );
int xmin = static_cast<int>( ceilf(fminf(fminf(x1, x2), x3)) );
int ymax = static_cast<int>( floorf(fmaxf(fmaxf(y1, y2), y3)) );
int xmax = static_cast<int>( floorf(fmaxf(fmaxf(x1, x2), x3)) );
ymin = fmaxf(0, ymin);
xmin = fmaxf(0, xmin);
ymax = fminf(sat_height - 1, ymax);
xmax = fminf(sat_width - 1, xmax);
//if ((xmax - xmin) * (ymax - ymin) > 100) {
// dsmPixelCounts[iface] = -1;
//} else {
{
for (int x = xmin; x <= xmax; ++x) {
for (int y = ymin; y <= ymax; ++y) {
if (d_inside_triangle((float) x - x1, (float) y - y1,
0, 0, x2-x1, y2-y1, x3-x1, y3-y1)) {
faceIDs[curIdx] = iface;
pixelIDs[curIdx] = y * sat_width + x;
curIdx++;
}
}
}
}
}
}
__global__ void kernelFindLimits(int* ids, int* limits, int num) {
int iel = blockIdx.x * blockDim.x + threadIdx.x;
if (iel < num) {
int pixelID = ids[iel];
if (iel == 0 || ids[iel - 1] != pixelID) {
limits[pixelID * 2 + 0] = iel;
}
if (iel == num - 1 || ids[iel + 1] != pixelID) {
limits[pixelID * 2 + 1] = iel + 1;
}
}
}
__global__ void kernelDraw(int* faceIDs, int* pixelIDsLimits,
float* pX, float* pY, float* pZ,
float* pOut,
int sat_npixels, int dsm_width, int sat_width) {
int ipixel = blockIdx.x * blockDim.x + threadIdx.x;
if (ipixel < sat_npixels) {
int faces_per_row = 2 * (dsm_width - 1);
for (int i = pixelIDsLimits[2 * ipixel + 0];
i < pixelIDsLimits[2 * ipixel + 1]; ++i) {
int iface = faceIDs[i];
int irow = iface / faces_per_row;
int icol = (iface % faces_per_row) / 2;
int idx = irow * dsm_width + icol;
int idx1, idx2, idx3;
if (iface % 2 == 0) {
// **
// *
idx1 = idx;
idx2 = idx + 1;
idx3 = idx + dsm_width;
} else {
// *
// **
idx1 = idx + 1;
idx2 = idx + dsm_width;
idx3 = idx + dsm_width + 1;
}
float x1, y1, elev1, x2, y2, elev2, x3, y3, elev3;
x1 = pX[idx1];
y1 = pY[idx1];
elev1 = pZ[idx1];
x2 = pX[idx2];
y2 = pY[idx2];
elev2 = pZ[idx2];
x3 = pX[idx3];
y3 = pY[idx3];
elev3 = pZ[idx3];
float x = static_cast<float>(ipixel % sat_width);
float y = static_cast<float>(ipixel / sat_width);
float elev = d_interpolate_three(x, y,
x1, y1, elev1,
x2, y2, elev2,
x3, y3, elev3);
if (elev > pOut[ipixel]) {
pOut[ipixel] = elev;
}
}
}
}
void cudaRenderSatElevation(DTYPE * pX, DTYPE* pY, DTYPE* pZ, DTYPE* pOut,
int dsm_width, int dsm_height, int sat_width, int sat_height) {
int dsm_npixels = dsm_width * dsm_height;
int sat_npixels = sat_width * sat_height;
DTYPE* d_pX;
DTYPE* d_pY;
DTYPE* d_pZ;
DTYPE* d_pOut;
cudaMalloc((void **)&d_pX, sizeof(DTYPE) * dsm_npixels);
cudaMalloc((void **)&d_pY, sizeof(DTYPE) * dsm_npixels);
cudaMalloc((void **)&d_pZ, sizeof(DTYPE) * dsm_npixels);
cudaMalloc((void **)&d_pOut, sizeof(DTYPE) * sat_npixels);
cudaMemcpy(d_pX, pX, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice);
cudaMemcpy(d_pY, pY, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice);
cudaMemcpy(d_pZ, pZ, sizeof(DTYPE) * dsm_npixels, cudaMemcpyHostToDevice);
// output memory on host contains all min values
cudaMemcpy(d_pOut, pOut, sizeof(DTYPE) * sat_npixels, cudaMemcpyHostToDevice);
int nfaces = 2 * (dsm_height - 1) * (dsm_width - 1);
int nblocks = (nfaces + TPB - 1) / TPB;
// compute # of pixels that each face cover
// TODO: change to int
int* dsmPixelCounts;
cudaMalloc((void **)&dsmPixelCounts, sizeof(int) * nfaces);
cudaMemset(dsmPixelCounts, 0, sizeof(int) * nfaces);
kernelComputePointsNum<<<nblocks, TPB>>>(d_pX, d_pY, d_pZ,
dsmPixelCounts, nfaces,
dsm_width, sat_width, sat_height);
// cudaThreadSynchronize();
cudaDeviceSynchronize();
if ( cudaSuccess != cudaGetLastError() )
printf( "Error in CUDA kernel attempting to compute number of points "
"for each thread!\n" );
int numPixelsLast;
cudaMemcpy(&numPixelsLast, dsmPixelCounts + nfaces - 1, sizeof(int),
cudaMemcpyDeviceToHost);
// exclusive scan to get start index for each face
thrust::exclusive_scan(thrust::device, dsmPixelCounts,
dsmPixelCounts + nfaces, dsmPixelCounts);
//
int numPixelsTotal;
cudaMemcpy(&numPixelsTotal, dsmPixelCounts + nfaces - 1, sizeof(int),
cudaMemcpyDeviceToHost);
numPixelsTotal += numPixelsLast;
printf("================= %d\n", numPixelsTotal);
int* faceIDs;
int* pixelIDs;
cudaMalloc((void **)&faceIDs, sizeof(int) * numPixelsTotal);
cudaMalloc((void **)&pixelIDs, sizeof(int) * numPixelsTotal);
kernelGetPoints<<<nblocks, TPB>>>(d_pX, d_pY, d_pZ,
dsmPixelCounts,
faceIDs, pixelIDs,
nfaces,
dsm_width, sat_width, sat_height);
cudaDeviceSynchronize();
if ( cudaSuccess != cudaGetLastError() )
printf( "Error in CUDA kernel attempting to "
"get points ids for each face!\n" );
// sort by key
thrust::sort_by_key(thrust::device, pixelIDs, pixelIDs + numPixelsTotal,
faceIDs);
cudaDeviceSynchronize();
if ( cudaSuccess != cudaGetLastError() )
printf( "Error in CUDA kernel attempting to "
"sort!\n" );
// find start and end points for each pixel
int* pixelIDsLimits;
cudaMalloc((void **)&pixelIDsLimits, 2 * sizeof(int) * sat_npixels);
cudaMemset(pixelIDsLimits, 0, 2 * sizeof(int) * sat_npixels);
nblocks = (numPixelsTotal + TPB - 1) / TPB;
kernelFindLimits<<<nblocks, TPB>>>(pixelIDs, pixelIDsLimits, numPixelsTotal);
cudaDeviceSynchronize();
if ( cudaSuccess != cudaGetLastError() )
printf( "Error in CUDA kernel attempting to "
"find start and end positions for each pixel!\n" );
//
nblocks = (sat_npixels + TPB - 1) / TPB;
kernelDraw<<<nblocks, TPB>>>(faceIDs, pixelIDsLimits, d_pX, d_pY, d_pZ,
d_pOut,
sat_npixels, dsm_width, sat_width);
cudaDeviceSynchronize();
if ( cudaSuccess != cudaGetLastError() )
printf( "Error in CUDA kernel attempting to "
"draw satellite elevation!\n" );
// cudaMemcpy(pOut, dsmPixelCounts, sizeof(float) * min(sat_npixels, nfaces), cudaMemcpyDeviceToHost);
cudaMemcpy(pOut, d_pOut, sizeof(DTYPE) * sat_npixels,
cudaMemcpyDeviceToHost);
}
|
cf99c16de533125aebe16e98c3835767b47d7079.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_scales_dropblock_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *drop_blocks_scale = NULL;
hipMalloc(&drop_blocks_scale, XSIZE*YSIZE);
int block_size_w = XSIZE*YSIZE;
int block_size_h = XSIZE*YSIZE;
int outputs = 1;
int batch = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_scales_dropblock_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, drop_blocks_scale,block_size_w,block_size_h,outputs,batch);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_scales_dropblock_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, drop_blocks_scale,block_size_w,block_size_h,outputs,batch);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_scales_dropblock_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, drop_blocks_scale,block_size_w,block_size_h,outputs,batch);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cf99c16de533125aebe16e98c3835767b47d7079.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_scales_dropblock_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *drop_blocks_scale = NULL;
cudaMalloc(&drop_blocks_scale, XSIZE*YSIZE);
int block_size_w = XSIZE*YSIZE;
int block_size_h = XSIZE*YSIZE;
int outputs = 1;
int batch = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_scales_dropblock_kernel<<<gridBlock,threadBlock>>>(drop_blocks_scale,block_size_w,block_size_h,outputs,batch);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_scales_dropblock_kernel<<<gridBlock,threadBlock>>>(drop_blocks_scale,block_size_w,block_size_h,outputs,batch);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_scales_dropblock_kernel<<<gridBlock,threadBlock>>>(drop_blocks_scale,block_size_w,block_size_h,outputs,batch);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8cc08770f0c84392e1ef6898a0a901322c764666.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void reduce(int *g_idata, int *g_odata) {
__shared__ int sdata[256];
// each thread loads one element from global to shared mem
// note use of 1D thread indices (only) in this kernel
int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[threadIdx.x] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (int s = 1; s < blockDim.x; s *= 2)
{
int index = 2 * s * threadIdx.x;;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (threadIdx.x == 0)
atomicAdd(g_odata, sdata[0]);
} | 8cc08770f0c84392e1ef6898a0a901322c764666.cu | #include "includes.h"
__global__ void reduce(int *g_idata, int *g_odata) {
__shared__ int sdata[256];
// each thread loads one element from global to shared mem
// note use of 1D thread indices (only) in this kernel
int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[threadIdx.x] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (int s = 1; s < blockDim.x; s *= 2)
{
int index = 2 * s * threadIdx.x;;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (threadIdx.x == 0)
atomicAdd(g_odata, sdata[0]);
} |
2a659e5ae3fb9367a338d6fbf6c9afd415ca6824.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define THREADS_PER_BLOCK 256
__global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH )
{
int COL = threadIdx.x + blockIdx.x * blockDim.x;
int ROW = threadIdx.y + blockIdx.y * blockDim.y;
if (ROW < WIDTH && COL < WIDTH) {
for (int i = 0; i < WIDTH; i++) {
Pd[ROW * WIDTH + COL] += Md[ROW * WIDTH + i] * Nd [i * WIDTH + COL];
}
}
}
int main(int arg0, char *arg1[]){
hipDeviceSynchronize();
int WIDTH = atoi(arg1[1]);
int sqrtThreads = sqrt(THREADS_PER_BLOCK);
int nBlocks = WIDTH/sqrtThreads;
if (WIDTH % sqrtThreads != 0)
{
nBlocks++;
}
dim3 grid(nBlocks, nBlocks, 1);
dim3 block(sqrtThreads, sqrtThreads, 1);
float *a_h, *b_h, *c_h, *d_h, *a_d, *b_d, *c_d;
int size;
hipEvent_t start;
hipEvent_t stop;
float elapsed1;
size = WIDTH * WIDTH * sizeof(float);
a_h = (float*) malloc(size);
b_h = (float*) malloc(size);
c_h = (float*) malloc(size);
d_h = (float*) malloc(size);
for (int i = 0; i < WIDTH; i++)
{
for (int j = 0; j < WIDTH; j++)
{
a_h[i * WIDTH + j] = i;
b_h[i * WIDTH + j] = i;
}
}
hipMalloc((void**)&a_d, size);
hipMalloc((void**)&b_d, size);
hipMalloc((void**)&c_d, size);
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, size, hipMemcpyHostToDevice);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MatrixMul), dim3(grid), dim3(block), 0, 0, a_d, b_d, c_d, WIDTH);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed1, start, stop);
printf("%f\n", elapsed1/1000);
hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost);
free(a_h);
free(b_h);
free(c_h);
free(d_h);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 2a659e5ae3fb9367a338d6fbf6c9afd415ca6824.cu | #include <stdio.h>
#include <math.h>
#define THREADS_PER_BLOCK 256
__global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH )
{
int COL = threadIdx.x + blockIdx.x * blockDim.x;
int ROW = threadIdx.y + blockIdx.y * blockDim.y;
if (ROW < WIDTH && COL < WIDTH) {
for (int i = 0; i < WIDTH; i++) {
Pd[ROW * WIDTH + COL] += Md[ROW * WIDTH + i] * Nd [i * WIDTH + COL];
}
}
}
int main(int arg0, char *arg1[]){
cudaThreadSynchronize();
int WIDTH = atoi(arg1[1]);
int sqrtThreads = sqrt(THREADS_PER_BLOCK);
int nBlocks = WIDTH/sqrtThreads;
if (WIDTH % sqrtThreads != 0)
{
nBlocks++;
}
dim3 grid(nBlocks, nBlocks, 1);
dim3 block(sqrtThreads, sqrtThreads, 1);
float *a_h, *b_h, *c_h, *d_h, *a_d, *b_d, *c_d;
int size;
cudaEvent_t start;
cudaEvent_t stop;
float elapsed1;
size = WIDTH * WIDTH * sizeof(float);
a_h = (float*) malloc(size);
b_h = (float*) malloc(size);
c_h = (float*) malloc(size);
d_h = (float*) malloc(size);
for (int i = 0; i < WIDTH; i++)
{
for (int j = 0; j < WIDTH; j++)
{
a_h[i * WIDTH + j] = i;
b_h[i * WIDTH + j] = i;
}
}
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&b_d, size);
cudaMalloc((void**)&c_d, size);
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
MatrixMul<<<grid, block>>>(a_d, b_d, c_d, WIDTH);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed1, start, stop);
printf("%f\n", elapsed1/1000);
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
free(a_h);
free(b_h);
free(c_h);
free(d_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
ad2215550dd5435d4fdf888dfc060dd9e8d811c6.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <hip/hip_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=1;
unsigned Value2=A[i];
unsigned Value3=B[i];
unsigned Value;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive INT addition access
if((i%32)==0){
#pragma unroll 100
for(unsigned k=0; k<iterations;k++) {
Value2= I1*Value1;
Value3=I2*Value3;
Value1*=Value2;
Value3*=Value1;
Value2*=Value3;
Value1*=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random unsigned entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
} | ad2215550dd5435d4fdf888dfc060dd9e8d811c6.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <cuda_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=1;
unsigned Value2=A[i];
unsigned Value3=B[i];
unsigned Value;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive INT addition access
if((i%32)==0){
#pragma unroll 100
for(unsigned k=0; k<iterations;k++) {
Value2= I1*Value1;
Value3=I2*Value3;
Value1*=Value2;
Value3*=Value1;
Value2*=Value3;
Value1*=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random unsigned entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
} |
9ec4071d33bdd72601134402332388d779cb9524.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* MULTI-NODE AND PARALLEL MATRIX-MATRIX PRODUCT WITH MPI AND CUDA */
/* */
/* File: mmpmpicuda.cu */
/* Author: Alberto Pou Quirs (Github: bertini36) */
/* Description: This program performs a matrix product (A * B = C) */
/* distributing the computation between multiple nodes */
/* with MPI technology and parallelizing the computation in */
/* every node with Nvidia CUDA technology */
/* Compilation: nvcc -I/opt/mpi/bullxmpi/1.2.9.1/include */
/* -L/opt/mpi/bullxmpi/1.2.9.1/lib -lmpi -ldl -lm -lnuma */
/* -lrt -lnsl -lutil -lm -ldl mmpmpicuda.cu -o mmpmpicuda */
/* Strategy: */
/* Example 16x16 matrices with 4 nodes: */
/* _________________16________________ */
/* | | */
/* | NODE 1 | 4 */
/* |_________________________________| */
/* | | */
/* | NODE 2 | 4 */
/* C = |_________________________________| 16 */
/* | | */
/* | NODE 3 | 4 */
/* |_________________________________| */
/* | | */
/* | NODE 4 | 4 */
/* |_________________________________| */
/* */
/* Node 1 computes 4 rows of result matrix: */
/* __________________________________ */
/* | | */
/* | 4x16 CUDA block | */
/* |_________________________________| */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <mpi.h>
#define N 1024 # It has to be 32 multiple. Min 32 * Number of nodes.
#define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0)
struct timeval start_time, end_time;
inline void checkCuda(hipError_t e) {
if (e != hipSuccess) {
err("CUDA Error %d: %s\n", e, hipGetErrorString(e));
}
}
__global__ void matrixProduct(double *matrix_a, double *matrix_b, double *matrix_c, int width, int from, int my_rank) {
int row = threadIdx.y + blockDim.y * blockIdx.y;
int col = threadIdx.x + blockDim.x * blockIdx.x;
matrix_c[row * width + col] = 0;
for (int k=0; k<width; k++) {
matrix_c[row * width + col] += matrix_a[((row + from) * width) + k] * matrix_b[k * width + col];
}
}
void initializeMatrices(double matrix_a[N][N], double matrix_b[N][N]) {
int i, j;
srand(time(NULL));
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
matrix_a[i][j] = rand();
matrix_b[i][j] = rand();
}
}
}
void showMatrices(double matrix_a[N][N], double matrix_b[N][N], double matrix_c[N][N]) {
int i, j;
srand(time(NULL));
printf("***** MATRIX A ***** \n");
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
(j % N == N-1) ? printf("%.1f \n", matrix_a[i][j]) : printf("%.1f,", matrix_a[i][j]);
}
}
printf("***** MATRIX B ***** \n");
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
(j % N == N-1) ? printf("%.1f \n", matrix_b[i][j]) : printf("%.1f,", matrix_b[i][j]);
}
}
printf("***** RESULT MATRIX ***** \n");
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
(j % N == N-1) ? printf("%f \n", matrix_c[i][j]) : printf("%f,", matrix_c[i][j]);
}
}
}
void checkMatrices(double matrix_a[N][N], double matrix_b[N][N], double matrix_c[N][N], double matrix_testc[N][N]) {
int i, j, k;
for(i = 0; i < N; i++)
for(j = 0; j < N; j++)
for(k = 0; k < N; k++)
{
matrix_testc[i][j] += matrix_a[i][k] * matrix_b[k][j];
}
for(i = 0; i < 32 == 1; i++) {
for(j = 0; j < 32; j++){
printf("%.1f ", (matrix_c[i][j]));
}
printf("\n");
}
printf("\n\n\n");
for(i = 0; i < 32 == 1; i++) {
for(j = 0; j < 32; j++){
printf("%.1f ", (matrix_testc[i][j]));
}
printf("\n");
}
}
int main(int argc, char *argv[]) {
double A[N][N], B[N][N], C[N][N], C_TEST[N][N];
double *d_a, *d_b, *d_c;
int my_rank, comm_sz, from, to, nrows;
// MPI initialization
MPI_Init (&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Process id
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz); // Number of processors
if (N % comm_sz != 0) {
if (my_rank == 0) printf("Matrix size not divisible by number of processors \n");
MPI_Finalize();
exit(-1);
}
// Calculate interval lines to compute per node
from = my_rank * N / comm_sz;
to = (my_rank + 1) * N / comm_sz;
nrows = to - from;
if (my_rank == 0) { initializeMatrices(A, B); }
// Send A y B to every node
MPI_Bcast(A, N*N, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(B, N*N, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Allocate memory in the device
checkCuda(hipMalloc((void **) &d_a, N*N*sizeof(double)));
checkCuda(hipMalloc((void **) &d_b, N*N*sizeof(double)));
checkCuda(hipMalloc((void **) &d_c, (N*N/comm_sz)*sizeof(double)));
// Copy the information in the device
checkCuda(hipMemcpy(d_a, A, N*N*sizeof(double), hipMemcpyHostToDevice));
checkCuda(hipMemcpy(d_b, B, N*N*sizeof(double), hipMemcpyHostToDevice));
// CUDA threads structure definition
dim3 dimGrid(N/32, N/(32*comm_sz));
dim3 dimBlock(32, 32); // MAX BLOCK SIZE
MPI_Barrier(MPI_COMM_WORLD);
if (my_rank == 0) { gettimeofday(&start_time, NULL); }
// Kernel launch
hipLaunchKernelGGL(( matrixProduct), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, N, from, my_rank);
checkCuda(hipDeviceSynchronize());
checkCuda(hipGetLastError());
// Calculate compute time
MPI_Barrier(MPI_COMM_WORLD);
if (my_rank == 0) {
gettimeofday(&end_time, NULL);
printf("Compute time: %.1f ms \n", (float) (end_time.tv_sec - start_time.tv_sec) * 1000 + (end_time.tv_usec - start_time.tv_usec) / 1000);
}
// Get results from device
checkCuda(hipMemcpy(C[from], d_c, (nrows)*N*sizeof(double), hipMemcpyDeviceToHost));
// Unify results from nodes
MPI_Gather(C[from], N*N/comm_sz, MPI_DOUBLE, C, N*N/comm_sz, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// if (my_rank == 0) { showMatrices(A, B, C); }
checkCuda(hipFree(d_a));
checkCuda(hipFree(d_b));
checkCuda(hipFree(d_c));
MPI_Finalize();
return 0;
} | 9ec4071d33bdd72601134402332388d779cb9524.cu | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* MULTI-NODE AND PARALLEL MATRIX-MATRIX PRODUCT WITH MPI AND CUDA */
/* */
/* File: mmpmpicuda.cu */
/* Author: Alberto Pou Quirós (Github: bertini36) */
/* Description: This program performs a matrix product (A * B = C) */
/* distributing the computation between multiple nodes */
/* with MPI technology and parallelizing the computation in */
/* every node with Nvidia CUDA technology */
/* Compilation: nvcc -I/opt/mpi/bullxmpi/1.2.9.1/include */
/* -L/opt/mpi/bullxmpi/1.2.9.1/lib -lmpi -ldl -lm -lnuma */
/* -lrt -lnsl -lutil -lm -ldl mmpmpicuda.cu -o mmpmpicuda */
/* Strategy: */
/* Example 16x16 matrices with 4 nodes: */
/* _________________16________________ */
/* | | */
/* | NODE 1 | 4 */
/* |_________________________________| */
/* | | */
/* | NODE 2 | 4 */
/* C = |_________________________________| 16 */
/* | | */
/* | NODE 3 | 4 */
/* |_________________________________| */
/* | | */
/* | NODE 4 | 4 */
/* |_________________________________| */
/* */
/* Node 1 computes 4 rows of result matrix: */
/* __________________________________ */
/* | | */
/* | 4x16 CUDA block | */
/* |_________________________________| */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <mpi.h>
#define N 1024 # It has to be 32 multiple. Min 32 * Number of nodes.
#define err(format, ...) do { fprintf(stderr, format, ##__VA_ARGS__); exit(1); } while (0)
struct timeval start_time, end_time;
inline void checkCuda(cudaError_t e) {
if (e != cudaSuccess) {
err("CUDA Error %d: %s\n", e, cudaGetErrorString(e));
}
}
__global__ void matrixProduct(double *matrix_a, double *matrix_b, double *matrix_c, int width, int from, int my_rank) {
int row = threadIdx.y + blockDim.y * blockIdx.y;
int col = threadIdx.x + blockDim.x * blockIdx.x;
matrix_c[row * width + col] = 0;
for (int k=0; k<width; k++) {
matrix_c[row * width + col] += matrix_a[((row + from) * width) + k] * matrix_b[k * width + col];
}
}
void initializeMatrices(double matrix_a[N][N], double matrix_b[N][N]) {
int i, j;
srand(time(NULL));
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
matrix_a[i][j] = rand();
matrix_b[i][j] = rand();
}
}
}
void showMatrices(double matrix_a[N][N], double matrix_b[N][N], double matrix_c[N][N]) {
int i, j;
srand(time(NULL));
printf("***** MATRIX A ***** \n");
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
(j % N == N-1) ? printf("%.1f \n", matrix_a[i][j]) : printf("%.1f,", matrix_a[i][j]);
}
}
printf("***** MATRIX B ***** \n");
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
(j % N == N-1) ? printf("%.1f \n", matrix_b[i][j]) : printf("%.1f,", matrix_b[i][j]);
}
}
printf("***** RESULT MATRIX ***** \n");
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
(j % N == N-1) ? printf("%f \n", matrix_c[i][j]) : printf("%f,", matrix_c[i][j]);
}
}
}
void checkMatrices(double matrix_a[N][N], double matrix_b[N][N], double matrix_c[N][N], double matrix_testc[N][N]) {
int i, j, k;
for(i = 0; i < N; i++)
for(j = 0; j < N; j++)
for(k = 0; k < N; k++)
{
matrix_testc[i][j] += matrix_a[i][k] * matrix_b[k][j];
}
for(i = 0; i < 32 == 1; i++) {
for(j = 0; j < 32; j++){
printf("%.1f ", (matrix_c[i][j]));
}
printf("\n");
}
printf("\n\n\n");
for(i = 0; i < 32 == 1; i++) {
for(j = 0; j < 32; j++){
printf("%.1f ", (matrix_testc[i][j]));
}
printf("\n");
}
}
int main(int argc, char *argv[]) {
double A[N][N], B[N][N], C[N][N], C_TEST[N][N];
double *d_a, *d_b, *d_c;
int my_rank, comm_sz, from, to, nrows;
// MPI initialization
MPI_Init (&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Process id
MPI_Comm_size(MPI_COMM_WORLD, &comm_sz); // Number of processors
if (N % comm_sz != 0) {
if (my_rank == 0) printf("Matrix size not divisible by number of processors \n");
MPI_Finalize();
exit(-1);
}
// Calculate interval lines to compute per node
from = my_rank * N / comm_sz;
to = (my_rank + 1) * N / comm_sz;
nrows = to - from;
if (my_rank == 0) { initializeMatrices(A, B); }
// Send A y B to every node
MPI_Bcast(A, N*N, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(B, N*N, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// Allocate memory in the device
checkCuda(cudaMalloc((void **) &d_a, N*N*sizeof(double)));
checkCuda(cudaMalloc((void **) &d_b, N*N*sizeof(double)));
checkCuda(cudaMalloc((void **) &d_c, (N*N/comm_sz)*sizeof(double)));
// Copy the information in the device
checkCuda(cudaMemcpy(d_a, A, N*N*sizeof(double), cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(d_b, B, N*N*sizeof(double), cudaMemcpyHostToDevice));
// CUDA threads structure definition
dim3 dimGrid(N/32, N/(32*comm_sz));
dim3 dimBlock(32, 32); // MAX BLOCK SIZE
MPI_Barrier(MPI_COMM_WORLD);
if (my_rank == 0) { gettimeofday(&start_time, NULL); }
// Kernel launch
matrixProduct<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, N, from, my_rank);
checkCuda(cudaDeviceSynchronize());
checkCuda(cudaGetLastError());
// Calculate compute time
MPI_Barrier(MPI_COMM_WORLD);
if (my_rank == 0) {
gettimeofday(&end_time, NULL);
printf("Compute time: %.1f ms \n", (float) (end_time.tv_sec - start_time.tv_sec) * 1000 + (end_time.tv_usec - start_time.tv_usec) / 1000);
}
// Get results from device
checkCuda(cudaMemcpy(C[from], d_c, (nrows)*N*sizeof(double), cudaMemcpyDeviceToHost));
// Unify results from nodes
MPI_Gather(C[from], N*N/comm_sz, MPI_DOUBLE, C, N*N/comm_sz, MPI_DOUBLE, 0, MPI_COMM_WORLD);
// if (my_rank == 0) { showMatrices(A, B, C); }
checkCuda(cudaFree(d_a));
checkCuda(cudaFree(d_b));
checkCuda(cudaFree(d_c));
MPI_Finalize();
return 0;
} |
020506a495ec4f38d3baf948dcb685fe488f1c73.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "initialSpikeIndCopyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned short *pLastSpikeInd = NULL;
hipMalloc(&pLastSpikeInd, XSIZE*YSIZE);
const unsigned int noReal = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
initialSpikeIndCopyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, pLastSpikeInd,noReal);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
initialSpikeIndCopyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, pLastSpikeInd,noReal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
initialSpikeIndCopyKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, pLastSpikeInd,noReal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 020506a495ec4f38d3baf948dcb685fe488f1c73.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "initialSpikeIndCopyKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned short *pLastSpikeInd = NULL;
cudaMalloc(&pLastSpikeInd, XSIZE*YSIZE);
const unsigned int noReal = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
initialSpikeIndCopyKernel<<<gridBlock,threadBlock>>>(pLastSpikeInd,noReal);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
initialSpikeIndCopyKernel<<<gridBlock,threadBlock>>>(pLastSpikeInd,noReal);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
initialSpikeIndCopyKernel<<<gridBlock,threadBlock>>>(pLastSpikeInd,noReal);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
78973da9a9e7870cbf5e81ce386466c6692fb24d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
// Kernel addition on GPU
__global__ void add (int N, float a, float b, float c, float* A)
{
int i = threadIdx.x;
A[i] = (A[i] + a) * b + c;
}
void init (float* A, int N)
{
for (int i = 0; i < N; i++)
{
A[i] = (float)i;
// printf ("%d: %.2f\n", i, A[i]);
}
}
// Main function on the host
int main()
{
int N = 20;
float *A, *dev_A;
A = (float*) malloc(sizeof(float) * N);
printf ("Initializing array\n");
init(A, N); // Initialize the array
printf ("Initialization complete\n");
hipMalloc((void **) &dev_A, sizeof(float) * N);
printf ("Device memory allocated\n");
hipMemcpy(dev_A, A, sizeof(float) * N, hipMemcpyHostToDevice);
printf ("Data moved to device\n");
hipLaunchKernelGGL(( add) , dim3(1), dim3(N) , 0, 0, N, 3.0f, 4.0f, -2.0f, dev_A);
hipMemcpy(A, dev_A, sizeof(float) * N, hipMemcpyDeviceToHost);
printf ("Data moved to host\n");
hipFree(dev_A);
printf ("Device memory released\n");
for (int i = 0; i < N; i++)
{
printf ("%.2f ", A[i]);
}
printf ("\n");
return 0;
} | 78973da9a9e7870cbf5e81ce386466c6692fb24d.cu | #include "stdio.h"
// Kernel addition on GPU
__global__ void add (int N, float a, float b, float c, float* A)
{
int i = threadIdx.x;
A[i] = (A[i] + a) * b + c;
}
void init (float* A, int N)
{
for (int i = 0; i < N; i++)
{
A[i] = (float)i;
// printf ("%d: %.2f\n", i, A[i]);
}
}
// Main function on the host
int main()
{
int N = 20;
float *A, *dev_A;
A = (float*) malloc(sizeof(float) * N);
printf ("Initializing array\n");
init(A, N); // Initialize the array
printf ("Initialization complete\n");
cudaMalloc((void **) &dev_A, sizeof(float) * N);
printf ("Device memory allocated\n");
cudaMemcpy(dev_A, A, sizeof(float) * N, cudaMemcpyHostToDevice);
printf ("Data moved to device\n");
add <<< 1, N >>> (N, 3.0f, 4.0f, -2.0f, dev_A);
cudaMemcpy(A, dev_A, sizeof(float) * N, cudaMemcpyDeviceToHost);
printf ("Data moved to host\n");
cudaFree(dev_A);
printf ("Device memory released\n");
for (int i = 0; i < N; i++)
{
printf ("%.2f ", A[i]);
}
printf ("\n");
return 0;
} |
a52bf0493a3a4ab73e164eef8d1ea93b455800a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 %s --std=c++11 -triple nvptx-unknown-unknown -fcuda-is-device -emit-llvm -o - -verify
// Note: This test won't work with -fsyntax-only, because some of these errors
// are emitted during codegen.
#include "Inputs/cuda.h"
extern "C" void host_fn() {}
// expected-note@-1 {{'host_fn' declared here}}
// expected-note@-2 {{'host_fn' declared here}}
// expected-note@-3 {{'host_fn' declared here}}
// expected-note@-4 {{'host_fn' declared here}}
// expected-note@-5 {{'host_fn' declared here}}
// expected-note@-6 {{'host_fn' declared here}}
// expected-note@-7 {{'host_fn' declared here}}
struct Dummy {};
struct S {
S() {}
// expected-note@-1 {{'S' declared here}}
// expected-note@-2 {{'S' declared here}}
~S() { host_fn(); }
// expected-note@-1 {{'~S' declared here}}
int x;
};
struct T {
__host__ __device__ void hd() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
// No error; this is (implicitly) inline and is never called, so isn't
// codegen'ed.
__host__ __device__ void hd2() { host_fn(); }
__host__ __device__ void hd3();
void h() {}
// expected-note@-1 {{'h' declared here}}
void operator+();
// expected-note@-1 {{'operator+' declared here}}
void operator-(const T&) {}
// expected-note@-1 {{'operator-' declared here}}
operator Dummy() { return Dummy(); }
// expected-note@-1 {{'operator Dummy' declared here}}
};
__host__ __device__ void T::hd3() {
host_fn();
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
}
template <typename T> __host__ __device__ void hd2() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
__global__ void kernel() { hd2<int>(); }
__host__ __device__ void hd() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
template <typename T> __host__ __device__ void hd3() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
__device__ void device_fn() { hd3<int>(); }
// No error because this is never instantiated.
template <typename T> __host__ __device__ void hd4() { host_fn(); }
__host__ __device__ void local_var() {
S s;
// expected-error@-1 {{reference to __host__ function 'S' in __host__ __device__ function}}
}
__host__ __device__ void placement_new(char *ptr) {
::new(ptr) S();
// expected-error@-1 {{reference to __host__ function 'S' in __host__ __device__ function}}
}
__host__ __device__ void explicit_destructor(S *s) {
s->~S();
// expected-error@-1 {{reference to __host__ function '~S' in __host__ __device__ function}}
}
__host__ __device__ void hd_member_fn() {
T t;
// Necessary to trigger an error on T::hd. It's (implicitly) inline, so
// isn't codegen'ed until we call it.
t.hd();
}
__host__ __device__ void h_member_fn() {
T t;
t.h();
// expected-error@-1 {{reference to __host__ function 'h' in __host__ __device__ function}}
}
__host__ __device__ void fn_ptr() {
auto* ptr = &host_fn;
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
}
template <typename T>
__host__ __device__ void fn_ptr_template() {
auto* ptr = &host_fn; // Not an error because the template isn't instantiated.
}
__host__ __device__ void unaryOp() {
T t;
(void) +t; // expected-error {{reference to __host__ function 'operator+' in __host__ __device__ function}}
}
__host__ __device__ void binaryOp() {
T t;
(void) (t - t); // expected-error {{reference to __host__ function 'operator-' in __host__ __device__ function}}
}
__host__ __device__ void implicitConversion() {
T t;
Dummy d = t; // expected-error {{reference to __host__ function 'operator Dummy' in __host__ __device__ function}}
}
template <typename T>
struct TmplStruct {
template <typename U> __host__ __device__ void fn() {}
};
template <>
template <>
__host__ __device__ void TmplStruct<int>::fn<int>() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
__device__ void double_specialization() { TmplStruct<int>().fn<int>(); }
| a52bf0493a3a4ab73e164eef8d1ea93b455800a5.cu | // RUN: %clang_cc1 %s --std=c++11 -triple nvptx-unknown-unknown -fcuda-is-device -emit-llvm -o - -verify
// Note: This test won't work with -fsyntax-only, because some of these errors
// are emitted during codegen.
#include "Inputs/cuda.h"
extern "C" void host_fn() {}
// expected-note@-1 {{'host_fn' declared here}}
// expected-note@-2 {{'host_fn' declared here}}
// expected-note@-3 {{'host_fn' declared here}}
// expected-note@-4 {{'host_fn' declared here}}
// expected-note@-5 {{'host_fn' declared here}}
// expected-note@-6 {{'host_fn' declared here}}
// expected-note@-7 {{'host_fn' declared here}}
struct Dummy {};
struct S {
S() {}
// expected-note@-1 {{'S' declared here}}
// expected-note@-2 {{'S' declared here}}
~S() { host_fn(); }
// expected-note@-1 {{'~S' declared here}}
int x;
};
struct T {
__host__ __device__ void hd() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
// No error; this is (implicitly) inline and is never called, so isn't
// codegen'ed.
__host__ __device__ void hd2() { host_fn(); }
__host__ __device__ void hd3();
void h() {}
// expected-note@-1 {{'h' declared here}}
void operator+();
// expected-note@-1 {{'operator+' declared here}}
void operator-(const T&) {}
// expected-note@-1 {{'operator-' declared here}}
operator Dummy() { return Dummy(); }
// expected-note@-1 {{'operator Dummy' declared here}}
};
__host__ __device__ void T::hd3() {
host_fn();
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
}
template <typename T> __host__ __device__ void hd2() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
__global__ void kernel() { hd2<int>(); }
__host__ __device__ void hd() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
template <typename T> __host__ __device__ void hd3() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
__device__ void device_fn() { hd3<int>(); }
// No error because this is never instantiated.
template <typename T> __host__ __device__ void hd4() { host_fn(); }
__host__ __device__ void local_var() {
S s;
// expected-error@-1 {{reference to __host__ function 'S' in __host__ __device__ function}}
}
__host__ __device__ void placement_new(char *ptr) {
::new(ptr) S();
// expected-error@-1 {{reference to __host__ function 'S' in __host__ __device__ function}}
}
__host__ __device__ void explicit_destructor(S *s) {
s->~S();
// expected-error@-1 {{reference to __host__ function '~S' in __host__ __device__ function}}
}
__host__ __device__ void hd_member_fn() {
T t;
// Necessary to trigger an error on T::hd. It's (implicitly) inline, so
// isn't codegen'ed until we call it.
t.hd();
}
__host__ __device__ void h_member_fn() {
T t;
t.h();
// expected-error@-1 {{reference to __host__ function 'h' in __host__ __device__ function}}
}
__host__ __device__ void fn_ptr() {
auto* ptr = &host_fn;
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
}
template <typename T>
__host__ __device__ void fn_ptr_template() {
auto* ptr = &host_fn; // Not an error because the template isn't instantiated.
}
__host__ __device__ void unaryOp() {
T t;
(void) +t; // expected-error {{reference to __host__ function 'operator+' in __host__ __device__ function}}
}
__host__ __device__ void binaryOp() {
T t;
(void) (t - t); // expected-error {{reference to __host__ function 'operator-' in __host__ __device__ function}}
}
__host__ __device__ void implicitConversion() {
T t;
Dummy d = t; // expected-error {{reference to __host__ function 'operator Dummy' in __host__ __device__ function}}
}
template <typename T>
struct TmplStruct {
template <typename U> __host__ __device__ void fn() {}
};
template <>
template <>
__host__ __device__ void TmplStruct<int>::fn<int>() { host_fn(); }
// expected-error@-1 {{reference to __host__ function 'host_fn' in __host__ __device__ function}}
__device__ void double_specialization() { TmplStruct<int>().fn<int>(); }
|
b75b3e4d0484e07bd38c617687f77f741c6358ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "OCTCUDAHeader.cuh"
using namespace std;
int gnMode = -1;
int gnRawLineLength;
int gnRawNumberLines;
int gnCalibrationNumberLines;
int gnProcessNumberLines;
int gnProcessedNumberLines;
int gnPerpendicular;
int gnAllocationStatus = 0;
int gnMidLength;
float* gpfRawCalibration;
float* gpfProcessCalibration;
size_t gnProcessCalibrationPitch;
// reference
float* gpfReferenceEven;
float* gpfReferenceOdd;
// fft
hipfftComplex* gpcProcessDepthProfile;
size_t gnProcessDepthProfilePitch;
hipfftHandle gchForward;
// calibration mask
int gnCalibrationStart;
int gnCalibrationStop;
int gnCalibrationRound;
float* gpfCalibrationMask;
// reverse fft
hipfftComplex* gpcProcessSpectrum;
size_t gnProcessSpectrumPitch;
hipfftHandle gchReverse;
// phase
float* gpfProcessPhase;
size_t gnProcessPhasePitch;
int main()
{
hipError_t cudaStatus;
int nStatus;
// get device count
int m_nDeviceCount;
nStatus = getDeviceCount(&m_nDeviceCount);
if (nStatus == -1) {
fprintf(stderr, "getDeviceCount failed!");
return 1;
} // if (nStatus
if (m_nDeviceCount == 1)
fprintf(stdout, "%d device found.\n", m_nDeviceCount);
else
fprintf(stdout, "%d devices found.\n", m_nDeviceCount);
fprintf(stdout, "\n");
// loop through number of devices and get names
char* m_strDeviceName;
m_strDeviceName = (char*)malloc(256 * sizeof(char));
for (int nDevice = 0; nDevice < m_nDeviceCount; nDevice++) {
nStatus = getDeviceName(nDevice, m_strDeviceName);
if (nStatus != 0) {
fprintf(stderr, "can't get device name");
return 1;
} // if (nStatus
fprintf(stdout, "device %d : %s\n", nDevice, m_strDeviceName);
} // for (int nDevice
free(m_strDeviceName);
fprintf(stdout, "\n");
// initialization
initialize(1, 1024, 2048, 1024, 2048); // int nMode, int nRawLineLength, int nRawNumberLines, int nProcessNumberLines, int nProcessedNumberLines
// read data from binary file
short* pnParallel;
readPSSDOCTFile(&pnParallel);
fprintf(stdout, "initialization complete\n");
// change array type while copying into host memory array (need to do this in C#)
std::copy(pnParallel, pnParallel + gnCalibrationNumberLines * gnRawLineLength, gpfRawCalibration);
fprintf(stdout, "initialization complete\n");
// start C++ clock
auto t_start = std::chrono::high_resolution_clock::now();
// start timer
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipDeviceSynchronize();
// pause
// std::this_thread::sleep_for(std::chrono::milliseconds(1000));
// int nNumberReps = 1000;
int nNumberReps = 1;
//// set up number of threads per block
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
for (int nRep = 0; nRep < nNumberReps; nRep++) {
// loop through in chunks that can be processed
int nAline, nNumberLinesInChunk;
for (nAline = 0; nAline < gnCalibrationNumberLines; nAline += gnProcessNumberLines) {
// copy chunk of data for processing
nNumberLinesInChunk = nAline + gnProcessNumberLines - gnCalibrationNumberLines;
if (nNumberLinesInChunk <= 0)
nNumberLinesInChunk = gnProcessNumberLines;
printf("nAline = %d nNumberLinesInChunk = %d \n", nAline, nNumberLinesInChunk);
// copy every other line starting at 0
gpuErrchk(hipMemcpy2D(gpfProcessCalibration, gnProcessCalibrationPitch, gpfRawCalibration + (nAline + 0) * gnRawLineLength, 2 * gnProcessCalibrationPitch, gnProcessCalibrationPitch, nNumberLinesInChunk >> 1, hipMemcpyHostToDevice));
//float dSum;
//float* pCurrent;
//for (int nPoint = 0; nPoint < gnRawLineLength; nPoint++) {
// dSum = 0;
// pCurrent = gpfRawCalibration + nAline * gnRawLineLength + nPoint;
// for (int nLine = 0; nLine < nNumberLinesInChunk >> 1; nLine++) {
// dSum += *(pCurrent);
// pCurrent += gnRawLineLength;
// }
// gpfReferenceEven[nPoint] = 2 * dSum / (nNumberLinesInChunk >> 1);
//}
// calculate reference
chrono::high_resolution_clock::time_point t1 = chrono::high_resolution_clock::now();
// old version
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
hipLaunchKernelGGL(( calculateMean), dim3(d3Blocks), dim3(d3Threads), 0, 0, gpfProcessCalibration, gpfReferenceEven, nNumberLinesInChunk >> 1, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
chrono::high_resolution_clock::time_point t2 = chrono::high_resolution_clock::now();
double dTime = std::chrono::duration<double, std::milli>(t2 - t1).count();
fprintf(stdout, "Old kernel calculateMean time = %0.5f ms\n", dTime);
// JL version
// subtract reference
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
hipLaunchKernelGGL(( subtractMean), dim3(d3Blocks), dim3(d3Threads), 0, 0, gpfProcessCalibration, gpfReferenceEven, nNumberLinesInChunk >> 1, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
// forward fft
gpuErrchk(hipMemset2D(gpcProcessDepthProfile, gnProcessDepthProfilePitch, 0.0, gnProcessDepthProfilePitch, gnProcessNumberLines >> 1));
hipfftExecR2C(gchForward, gpfProcessCalibration, gpcProcessDepthProfile);
// calculate mask
nThreadsPerBlock = 512;
hipLaunchKernelGGL(( calculateMask), dim3(gnRawLineLength / nThreadsPerBlock), dim3(nThreadsPerBlock), 0, 0, gpfCalibrationMask, gnRawLineLength, 50, 100, 16);
// apply mask
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
hipLaunchKernelGGL(( applyMask), dim3(d3Blocks), dim3(d3Threads), 0, 0, gpcProcessDepthProfile, gpfCalibrationMask, nNumberLinesInChunk >> 1, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
// reverse fft
hipfftExecC2C(gchReverse, gpcProcessDepthProfile, gpcProcessSpectrum, HIPFFT_BACKWARD);
// calculate phase
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnRawLineLength / d3Threads.x;
d3Blocks.y = (gnProcessNumberLines >> 1) / d3Threads.y;
d3Blocks.z = 1;
hipLaunchKernelGGL(( calculatePhase), dim3(d3Blocks), dim3(d3Threads), 0, 0, gpcProcessSpectrum, gpfProcessPhase, nNumberLinesInChunk >> 1, gnRawLineLength);
gpuErrchk(hipPeekAtLastError());
} // for (nAline
hipDeviceSynchronize();
} // for (int nRep
// stop C++ timer
auto t_end = std::chrono::high_resolution_clock::now();
// stop timer
hipEventRecord(stop);
// can do transfers back to host memory here
hipEventSynchronize(stop);
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end - t_start).count();
fprintf(stdout, "time in milliseconds = %f\n", elapsed_time_ms / nNumberReps);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
fprintf(stdout, "time in milliseconds = %f\n", milliseconds / nNumberReps);
hipEventDestroy(start);
hipEventDestroy(stop);
fprintf(stdout, "\n");
// free memory
cleanup();
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
| b75b3e4d0484e07bd38c617687f77f741c6358ee.cu | #include "OCTCUDAHeader.cuh"
using namespace std;
int gnMode = -1;
int gnRawLineLength;
int gnRawNumberLines;
int gnCalibrationNumberLines;
int gnProcessNumberLines;
int gnProcessedNumberLines;
int gnPerpendicular;
int gnAllocationStatus = 0;
int gnMidLength;
float* gpfRawCalibration;
float* gpfProcessCalibration;
size_t gnProcessCalibrationPitch;
// reference
float* gpfReferenceEven;
float* gpfReferenceOdd;
// fft
cufftComplex* gpcProcessDepthProfile;
size_t gnProcessDepthProfilePitch;
cufftHandle gchForward;
// calibration mask
int gnCalibrationStart;
int gnCalibrationStop;
int gnCalibrationRound;
float* gpfCalibrationMask;
// reverse fft
cufftComplex* gpcProcessSpectrum;
size_t gnProcessSpectrumPitch;
cufftHandle gchReverse;
// phase
float* gpfProcessPhase;
size_t gnProcessPhasePitch;
int main()
{
cudaError_t cudaStatus;
int nStatus;
// get device count
int m_nDeviceCount;
nStatus = getDeviceCount(&m_nDeviceCount);
if (nStatus == -1) {
fprintf(stderr, "getDeviceCount failed!");
return 1;
} // if (nStatus
if (m_nDeviceCount == 1)
fprintf(stdout, "%d device found.\n", m_nDeviceCount);
else
fprintf(stdout, "%d devices found.\n", m_nDeviceCount);
fprintf(stdout, "\n");
// loop through number of devices and get names
char* m_strDeviceName;
m_strDeviceName = (char*)malloc(256 * sizeof(char));
for (int nDevice = 0; nDevice < m_nDeviceCount; nDevice++) {
nStatus = getDeviceName(nDevice, m_strDeviceName);
if (nStatus != 0) {
fprintf(stderr, "can't get device name");
return 1;
} // if (nStatus
fprintf(stdout, "device %d : %s\n", nDevice, m_strDeviceName);
} // for (int nDevice
free(m_strDeviceName);
fprintf(stdout, "\n");
// initialization
initialize(1, 1024, 2048, 1024, 2048); // int nMode, int nRawLineLength, int nRawNumberLines, int nProcessNumberLines, int nProcessedNumberLines
// read data from binary file
short* pnParallel;
readPSSDOCTFile(&pnParallel);
fprintf(stdout, "initialization complete\n");
// change array type while copying into host memory array (need to do this in C#)
std::copy(pnParallel, pnParallel + gnCalibrationNumberLines * gnRawLineLength, gpfRawCalibration);
fprintf(stdout, "initialization complete\n");
// start C++ clock
auto t_start = std::chrono::high_resolution_clock::now();
// start timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaDeviceSynchronize();
// pause
// std::this_thread::sleep_for(std::chrono::milliseconds(1000));
// int nNumberReps = 1000;
int nNumberReps = 1;
//// set up number of threads per block
int nThreadsPerBlock;
dim3 d3Threads;
dim3 d3Blocks;
for (int nRep = 0; nRep < nNumberReps; nRep++) {
// loop through in chunks that can be processed
int nAline, nNumberLinesInChunk;
for (nAline = 0; nAline < gnCalibrationNumberLines; nAline += gnProcessNumberLines) {
// copy chunk of data for processing
nNumberLinesInChunk = nAline + gnProcessNumberLines - gnCalibrationNumberLines;
if (nNumberLinesInChunk <= 0)
nNumberLinesInChunk = gnProcessNumberLines;
printf("nAline = %d nNumberLinesInChunk = %d \n", nAline, nNumberLinesInChunk);
// copy every other line starting at 0
gpuErrchk(cudaMemcpy2D(gpfProcessCalibration, gnProcessCalibrationPitch, gpfRawCalibration + (nAline + 0) * gnRawLineLength, 2 * gnProcessCalibrationPitch, gnProcessCalibrationPitch, nNumberLinesInChunk >> 1, cudaMemcpyHostToDevice));
//float dSum;
//float* pCurrent;
//for (int nPoint = 0; nPoint < gnRawLineLength; nPoint++) {
// dSum = 0;
// pCurrent = gpfRawCalibration + nAline * gnRawLineLength + nPoint;
// for (int nLine = 0; nLine < nNumberLinesInChunk >> 1; nLine++) {
// dSum += *(pCurrent);
// pCurrent += gnRawLineLength;
// }
// gpfReferenceEven[nPoint] = 2 * dSum / (nNumberLinesInChunk >> 1);
//}
// calculate reference
chrono::high_resolution_clock::time_point t1 = chrono::high_resolution_clock::now();
// old version
d3Threads.x = 128;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
calculateMean<<<d3Blocks, d3Threads>>>(gpfProcessCalibration, gpfReferenceEven, nNumberLinesInChunk >> 1, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
chrono::high_resolution_clock::time_point t2 = chrono::high_resolution_clock::now();
double dTime = std::chrono::duration<double, std::milli>(t2 - t1).count();
fprintf(stdout, "Old kernel calculateMean time = %0.5f ms\n", dTime);
// JL version
// subtract reference
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
subtractMean<<<d3Blocks, d3Threads>>>(gpfProcessCalibration, gpfReferenceEven, nNumberLinesInChunk >> 1, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
// forward fft
gpuErrchk(cudaMemset2D(gpcProcessDepthProfile, gnProcessDepthProfilePitch, 0.0, gnProcessDepthProfilePitch, gnProcessNumberLines >> 1));
cufftExecR2C(gchForward, gpfProcessCalibration, gpcProcessDepthProfile);
// calculate mask
nThreadsPerBlock = 512;
calculateMask<<<gnRawLineLength / nThreadsPerBlock, nThreadsPerBlock>>>(gpfCalibrationMask, gnRawLineLength, 50, 100, 16);
// apply mask
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnProcessNumberLines / d3Threads.x;
d3Blocks.y = 1;
d3Blocks.z = 1;
applyMask<<<d3Blocks, d3Threads>>>(gpcProcessDepthProfile, gpfCalibrationMask, nNumberLinesInChunk >> 1, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
// reverse fft
cufftExecC2C(gchReverse, gpcProcessDepthProfile, gpcProcessSpectrum, CUFFT_INVERSE);
// calculate phase
d3Threads.x = 32;
d3Threads.y = 1024 / d3Threads.x;
d3Threads.z = 1;
d3Blocks.x = gnRawLineLength / d3Threads.x;
d3Blocks.y = (gnProcessNumberLines >> 1) / d3Threads.y;
d3Blocks.z = 1;
calculatePhase<<<d3Blocks, d3Threads>>>(gpcProcessSpectrum, gpfProcessPhase, nNumberLinesInChunk >> 1, gnRawLineLength);
gpuErrchk(cudaPeekAtLastError());
} // for (nAline
cudaDeviceSynchronize();
} // for (int nRep
// stop C++ timer
auto t_end = std::chrono::high_resolution_clock::now();
// stop timer
cudaEventRecord(stop);
// can do transfers back to host memory here
cudaEventSynchronize(stop);
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end - t_start).count();
fprintf(stdout, "time in milliseconds = %f\n", elapsed_time_ms / nNumberReps);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
fprintf(stdout, "time in milliseconds = %f\n", milliseconds / nNumberReps);
cudaEventDestroy(start);
cudaEventDestroy(stop);
fprintf(stdout, "\n");
// free memory
cleanup();
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
7eb61b2c785b483520dc3729104aae37159e8774.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* -----------------------------------------------------------------
* Programmer(s): Slaven Peles @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This example is based on cvAdvDiff_bnd
* example by Scott D. Cohen, Alan C.
* Hindmarsh and Radu Serban @ LLNL
* -----------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem with a banded Jacobian,
* with the program for its solution by CVODE.
* The problem is the semi-discrete form of the advection-diffusion
* equation in 2-D:
* du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2
* on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time
* interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions
* are posed, and the initial condition is
* u(x,y,t=0) = x(2-x)y(1-y)exp(5xy).
* The PDE is discretized on a uniform MX+2 by MY+2 grid with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX*MY.
* This program solves the problem with the BDF method, Newton
* iteration with the CVBAND band linear solver, and a user-supplied
* Jacobian routine.
* It uses scalar relative and absolute tolerances.
* Output is printed at t = .1, .2, ..., 1.
* Run statistics (optional outputs) are printed at the end.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <sunlinsol/sunlinsol_spgmr.h> /* access to SPGMR SUNLinearSolver */
#include <cvode/cvode_spils.h> /* access to CVSpils interface */
#include <sundials/sundials_types.h> /* definition of type realtype */
#include <sundials/sundials_math.h> /* definition of ABS and EXP */
#include <nvector/nvector_cuda.h>
/* Real Constants */
#define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.1) /* first output time */
#define DTOUT RCONST(0.1) /* output time increment */
#define NOUT 10 /* number of output times */
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define FIVE RCONST(5.0)
/*
* CUDA kernels
*/
__global__ void fKernel(const realtype *u, realtype *udot,
sunindextype MX, sunindextype MY,
realtype hordc, realtype horac, realtype verdc)
{
realtype uij, udn, uup, ult, urt, hdiff, hadv, vdiff;
sunindextype i, j, tid;
/* Loop over all grid points. */
tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < MX*MY) {
i = tid/MY;
j = tid%MY;
uij = u[tid];
udn = (j == 0) ? ZERO : u[tid - 1];
uup = (j == MY-1) ? ZERO : u[tid + 1];
ult = (i == 0) ? ZERO : u[tid - MY];
urt = (i == MX-1) ? ZERO : u[tid + MY];
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - TWO*uij + urt);
hadv = horac*(urt - ult);
vdiff = verdc*(uup - TWO*uij + udn);
udot[tid] = hdiff + hadv + vdiff;
}
}
__global__ void jtvKernel(const realtype *vdata, realtype *Jvdata,
sunindextype MX, sunindextype MY,
realtype hordc, realtype horac, realtype verdc)
{
sunindextype i, j, tid;
/* Loop over all grid points. */
tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < MX*MY) {
i = tid/MY;
j = tid%MY;
/* set the tid-th element of Jv */
Jvdata[tid] = -TWO*(verdc+hordc) * vdata[tid];
if (i != 0) Jvdata[tid] += (hordc - horac) * vdata[tid-MY];
if (i != MX-1) Jvdata[tid] += (hordc + horac) * vdata[tid+MY];
if (j != 0) Jvdata[tid] += verdc * vdata[tid-1];
if (j != MY-1) Jvdata[tid] += verdc * vdata[tid+1];
}
}
/* Type : _UserData (contains model and discretization parameters) */
struct _UserData {
sunindextype MX, MY, NEQ;
realtype dx, dy, XMAX, YMAX;
realtype hdcoef, hacoef, vdcoef;
};
typedef _UserData *UserData;
/* Problem setup and initialization functions */
static UserData SetUserData(int argc, char** argv);
static void SetIC(N_Vector u, UserData data);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
static int jtv(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *user_data, N_Vector tmp);
/* Private Helper Functions */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data);
static void PrintOutput(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Private function to check function return values */
static int check_retval(void *returnvalue, const char *funcname, int opt);
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char** argv)
{
realtype reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
SUNLinearSolver LS;
void *cvode_mem;
int iout, retval;
long int nst;
u = NULL;
data = NULL;
LS = NULL;
cvode_mem = NULL;
/* Set model parameters */
data = SetUserData(argc, argv);
if(check_retval((void *)data, "malloc", 2)) return(1);
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
/* Create a CUDA vector with initial values */
u = N_VNew_Cuda(data->NEQ); /* Allocate u vector */
if(check_retval((void*)u, "N_VNew_Cuda", 0)) return(1);
SetIC(u, data); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula */
cvode_mem = CVodeCreate(CV_BDF);
if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the initial time T0, and
* the initial dependent variable vector u. */
retval = CVodeInit(cvode_mem, f, T0, u);
if(check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
retval = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
retval = CVodeSetUserData(cvode_mem, data);
if(check_retval(&retval, "CVodeSetUserData", 1)) return(1);
/* Create SPGMR solver structure without preconditioning
* and the maximum Krylov dimension maxl */
LS = SUNSPGMR(u, PREC_NONE, 0);
if(check_retval(&retval, "SUNSPGMR", 1)) return(1);
/* Set CVSpils linear solver to LS */
retval = CVSpilsSetLinearSolver(cvode_mem, LS);
if(check_retval(&retval, "CVSpilsSetLinearSolver", 1)) return(1);
/* Set the JAcobian-times-vector function */
retval = CVSpilsSetJacTimes(cvode_mem, NULL, jtv);
if(check_retval(&retval, "CVSpilsSetJacTimesVecFn", 1)) return(1);
/* In loop over output points: call CVode, print results, test for errors */
umax = N_VMaxNorm(u);
PrintHeader(reltol, abstol, umax, data);
for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_retval(&retval, "CVode", 1)) break;
umax = N_VMaxNorm(u);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
PrintOutput(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
N_VDestroy(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
free(data); /* Free the user data */
return(0);
}
/*
*-------------------------------------------
* Problem setup and initialization functions
*-------------------------------------------
*/
/* Set model and discretization parameters */
UserData SetUserData(int argc, char *argv[])
{
const sunindextype MX = 10;
const sunindextype MY = 5;
const realtype XMAX = RCONST(2.0); /* domain boundaries */
const realtype YMAX = RCONST(1.0);
/* Allocate user data structure */
UserData ud = (UserData) malloc(sizeof *ud);
if(check_retval((void*) ud, "AllocUserData", 2)) return(NULL);
ud->MX = MX;
ud->MY = MY;
ud->NEQ = MX*MY;
ud->XMAX = XMAX;
ud->YMAX = YMAX;
ud->dx = XMAX/(MX+1); /* Set grid coefficients in data */
ud->dy = YMAX/(MY+1);
ud->hdcoef = ONE/(ud->dx*ud->dx);
ud->hacoef = HALF/(TWO*ud->dx);
ud->vdcoef = ONE/(ud->dy*ud->dy);
return ud;
}
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, UserData data)
{
/* Extract needed constants from data */
const realtype dx = data->dx;
const realtype dy = data->dy;
const realtype xmax = data->XMAX;
const realtype ymax = data->YMAX;
const sunindextype MY = data->MY;
const sunindextype NEQ = data->NEQ;
/* Extract pointer to solution vector data on the host */
realtype *udata = N_VGetHostArrayPointer_Cuda(u);
sunindextype i, j, tid;
realtype x, y;
/* Load initial profile into u vector */
for (tid=0; tid < NEQ; tid++) {
i = tid / MY;
j = tid % MY;
x = (i+1)*dx;
y = (j+1)*dy;
udata[tid] = x*(xmax - x)*y*(ymax - y)*SUNRexp(FIVE*x*y);
}
N_VCopyToDevice_Cuda(u);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* f routine. Compute f(t,u). */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data)
{
UserData data = (UserData) user_data;
/* Extract needed constants from data */
const sunindextype MX = data->MX;
const sunindextype MY = data->MY;
const realtype hordc = data->hdcoef;
const realtype horac = data->hacoef;
const realtype verdc = data->vdcoef;
/* Extract pointers to vector data */
const realtype *udata = N_VGetDeviceArrayPointer_Cuda(u);
realtype *dudata = N_VGetDeviceArrayPointer_Cuda(udot);
unsigned block = 256;
unsigned grid = (MX*MY + block - 1) / block;
hipLaunchKernelGGL(( fKernel), dim3(grid),dim3(block), 0, 0, udata, dudata, MX, MY, hordc, horac, verdc);
return(0);
}
/* Jacobian-times-vector routine. */
static int jtv(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *user_data, N_Vector tmp)
{
UserData data = (UserData) user_data;
/* Extract needed constants from data */
const sunindextype MX = data->MX;
const sunindextype MY = data->MY;
const realtype hordc = data->hdcoef;
const realtype horac = data->hacoef;
const realtype verdc = data->vdcoef;
/* Extract pointers to vector data */
const realtype *vdata = N_VGetDeviceArrayPointer_Cuda(v);
realtype *Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv);
unsigned block = 256;
unsigned grid = (MX*MY + block - 1) / block;
N_VConst(ZERO, Jv);
hipLaunchKernelGGL(( jtvKernel), dim3(grid),dim3(block), 0, 0, vdata, Jvdata, MX, MY, hordc, horac, verdc);
return(0);
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
/* Print first lines of output (problem description) */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data)
{
printf("\n2-D Advection-Diffusion Equation\n");
printf("Mesh dimensions = %d X %d\n", data->MX, data->MY);
printf("Total system size = %d\n", data->NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n",
reltol, abstol);
printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: reltol = %g abstol = %g\n\n",
reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#else
printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#endif
return;
}
/* Print current value */
static void PrintOutput(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#endif
return;
}
/* Get and print some final statistics */
static void PrintFinalStats(void *cvode_mem)
{
long lenrw, leniw ;
long lenrwLS, leniwLS;
long int nst, nfe, nsetups, nni, ncfn, netf;
long int nli, npe, nps, ncfl, nfeLS;
int retval;
retval = CVodeGetWorkSpace(cvode_mem, &lenrw, &leniw);
check_retval(&retval, "CVodeGetWorkSpace", 1);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_retval(&retval, "CVodeGetNumLinSolvSetups", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
retval = CVSpilsGetWorkSpace(cvode_mem, &lenrwLS, &leniwLS);
check_retval(&retval, "CVSpilsGetWorkSpace", 1);
retval = CVSpilsGetNumLinIters(cvode_mem, &nli);
check_retval(&retval, "CVSpilsGetNumLinIters", 1);
retval = CVSpilsGetNumPrecEvals(cvode_mem, &npe);
check_retval(&retval, "CVSpilsGetNumPrecEvals", 1);
retval = CVSpilsGetNumPrecSolves(cvode_mem, &nps);
check_retval(&retval, "CVSpilsGetNumPrecSolves", 1);
retval = CVSpilsGetNumConvFails(cvode_mem, &ncfl);
check_retval(&retval, "CVSpilsGetNumConvFails", 1);
retval = CVSpilsGetNumRhsEvals(cvode_mem, &nfeLS);
check_retval(&retval, "CVSpilsGetNumRhsEvals", 1);
printf("\nFinal Statistics.. \n\n");
printf("lenrw = %5ld leniw = %5ld\n", lenrw, leniw);
printf("lenrwLS = %5ld leniwLS = %5ld\n", lenrwLS, leniwLS);
printf("nst = %5ld\n" , nst);
printf("nfe = %5ld nfeLS = %5ld\n" , nfe, nfeLS);
printf("nni = %5ld nli = %5ld\n" , nni, nli);
printf("nsetups = %5ld netf = %5ld\n" , nsetups, netf);
printf("npe = %5ld nps = %5ld\n" , npe, nps);
printf("ncfn = %5ld ncfl = %5ld\n\n", ncfn, ncfl);
return;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns an integer value so check if
retval >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_retval(void *returnvalue, const char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
| 7eb61b2c785b483520dc3729104aae37159e8774.cu | /*
* -----------------------------------------------------------------
* Programmer(s): Slaven Peles @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This example is based on cvAdvDiff_bnd
* example by Scott D. Cohen, Alan C.
* Hindmarsh and Radu Serban @ LLNL
* -----------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem with a banded Jacobian,
* with the program for its solution by CVODE.
* The problem is the semi-discrete form of the advection-diffusion
* equation in 2-D:
* du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2
* on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time
* interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions
* are posed, and the initial condition is
* u(x,y,t=0) = x(2-x)y(1-y)exp(5xy).
* The PDE is discretized on a uniform MX+2 by MY+2 grid with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX*MY.
* This program solves the problem with the BDF method, Newton
* iteration with the CVBAND band linear solver, and a user-supplied
* Jacobian routine.
* It uses scalar relative and absolute tolerances.
* Output is printed at t = .1, .2, ..., 1.
* Run statistics (optional outputs) are printed at the end.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <sunlinsol/sunlinsol_spgmr.h> /* access to SPGMR SUNLinearSolver */
#include <cvode/cvode_spils.h> /* access to CVSpils interface */
#include <sundials/sundials_types.h> /* definition of type realtype */
#include <sundials/sundials_math.h> /* definition of ABS and EXP */
#include <nvector/nvector_cuda.h>
/* Real Constants */
#define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.1) /* first output time */
#define DTOUT RCONST(0.1) /* output time increment */
#define NOUT 10 /* number of output times */
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define TWO RCONST(2.0)
#define FIVE RCONST(5.0)
/*
* CUDA kernels
*/
__global__ void fKernel(const realtype *u, realtype *udot,
sunindextype MX, sunindextype MY,
realtype hordc, realtype horac, realtype verdc)
{
realtype uij, udn, uup, ult, urt, hdiff, hadv, vdiff;
sunindextype i, j, tid;
/* Loop over all grid points. */
tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < MX*MY) {
i = tid/MY;
j = tid%MY;
uij = u[tid];
udn = (j == 0) ? ZERO : u[tid - 1];
uup = (j == MY-1) ? ZERO : u[tid + 1];
ult = (i == 0) ? ZERO : u[tid - MY];
urt = (i == MX-1) ? ZERO : u[tid + MY];
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - TWO*uij + urt);
hadv = horac*(urt - ult);
vdiff = verdc*(uup - TWO*uij + udn);
udot[tid] = hdiff + hadv + vdiff;
}
}
__global__ void jtvKernel(const realtype *vdata, realtype *Jvdata,
sunindextype MX, sunindextype MY,
realtype hordc, realtype horac, realtype verdc)
{
sunindextype i, j, tid;
/* Loop over all grid points. */
tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < MX*MY) {
i = tid/MY;
j = tid%MY;
/* set the tid-th element of Jv */
Jvdata[tid] = -TWO*(verdc+hordc) * vdata[tid];
if (i != 0) Jvdata[tid] += (hordc - horac) * vdata[tid-MY];
if (i != MX-1) Jvdata[tid] += (hordc + horac) * vdata[tid+MY];
if (j != 0) Jvdata[tid] += verdc * vdata[tid-1];
if (j != MY-1) Jvdata[tid] += verdc * vdata[tid+1];
}
}
/* Type : _UserData (contains model and discretization parameters) */
struct _UserData {
sunindextype MX, MY, NEQ;
realtype dx, dy, XMAX, YMAX;
realtype hdcoef, hacoef, vdcoef;
};
typedef _UserData *UserData;
/* Problem setup and initialization functions */
static UserData SetUserData(int argc, char** argv);
static void SetIC(N_Vector u, UserData data);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
static int jtv(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *user_data, N_Vector tmp);
/* Private Helper Functions */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data);
static void PrintOutput(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Private function to check function return values */
static int check_retval(void *returnvalue, const char *funcname, int opt);
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char** argv)
{
realtype reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
SUNLinearSolver LS;
void *cvode_mem;
int iout, retval;
long int nst;
u = NULL;
data = NULL;
LS = NULL;
cvode_mem = NULL;
/* Set model parameters */
data = SetUserData(argc, argv);
if(check_retval((void *)data, "malloc", 2)) return(1);
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
/* Create a CUDA vector with initial values */
u = N_VNew_Cuda(data->NEQ); /* Allocate u vector */
if(check_retval((void*)u, "N_VNew_Cuda", 0)) return(1);
SetIC(u, data); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula */
cvode_mem = CVodeCreate(CV_BDF);
if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the initial time T0, and
* the initial dependent variable vector u. */
retval = CVodeInit(cvode_mem, f, T0, u);
if(check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
retval = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSStolerances", 1)) return(1);
/* Set the pointer to user-defined data */
retval = CVodeSetUserData(cvode_mem, data);
if(check_retval(&retval, "CVodeSetUserData", 1)) return(1);
/* Create SPGMR solver structure without preconditioning
* and the maximum Krylov dimension maxl */
LS = SUNSPGMR(u, PREC_NONE, 0);
if(check_retval(&retval, "SUNSPGMR", 1)) return(1);
/* Set CVSpils linear solver to LS */
retval = CVSpilsSetLinearSolver(cvode_mem, LS);
if(check_retval(&retval, "CVSpilsSetLinearSolver", 1)) return(1);
/* Set the JAcobian-times-vector function */
retval = CVSpilsSetJacTimes(cvode_mem, NULL, jtv);
if(check_retval(&retval, "CVSpilsSetJacTimesVecFn", 1)) return(1);
/* In loop over output points: call CVode, print results, test for errors */
umax = N_VMaxNorm(u);
PrintHeader(reltol, abstol, umax, data);
for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_retval(&retval, "CVode", 1)) break;
umax = N_VMaxNorm(u);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
PrintOutput(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
N_VDestroy(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
free(data); /* Free the user data */
return(0);
}
/*
*-------------------------------------------
* Problem setup and initialization functions
*-------------------------------------------
*/
/* Set model and discretization parameters */
UserData SetUserData(int argc, char *argv[])
{
const sunindextype MX = 10;
const sunindextype MY = 5;
const realtype XMAX = RCONST(2.0); /* domain boundaries */
const realtype YMAX = RCONST(1.0);
/* Allocate user data structure */
UserData ud = (UserData) malloc(sizeof *ud);
if(check_retval((void*) ud, "AllocUserData", 2)) return(NULL);
ud->MX = MX;
ud->MY = MY;
ud->NEQ = MX*MY;
ud->XMAX = XMAX;
ud->YMAX = YMAX;
ud->dx = XMAX/(MX+1); /* Set grid coefficients in data */
ud->dy = YMAX/(MY+1);
ud->hdcoef = ONE/(ud->dx*ud->dx);
ud->hacoef = HALF/(TWO*ud->dx);
ud->vdcoef = ONE/(ud->dy*ud->dy);
return ud;
}
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, UserData data)
{
/* Extract needed constants from data */
const realtype dx = data->dx;
const realtype dy = data->dy;
const realtype xmax = data->XMAX;
const realtype ymax = data->YMAX;
const sunindextype MY = data->MY;
const sunindextype NEQ = data->NEQ;
/* Extract pointer to solution vector data on the host */
realtype *udata = N_VGetHostArrayPointer_Cuda(u);
sunindextype i, j, tid;
realtype x, y;
/* Load initial profile into u vector */
for (tid=0; tid < NEQ; tid++) {
i = tid / MY;
j = tid % MY;
x = (i+1)*dx;
y = (j+1)*dy;
udata[tid] = x*(xmax - x)*y*(ymax - y)*SUNRexp(FIVE*x*y);
}
N_VCopyToDevice_Cuda(u);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* f routine. Compute f(t,u). */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data)
{
UserData data = (UserData) user_data;
/* Extract needed constants from data */
const sunindextype MX = data->MX;
const sunindextype MY = data->MY;
const realtype hordc = data->hdcoef;
const realtype horac = data->hacoef;
const realtype verdc = data->vdcoef;
/* Extract pointers to vector data */
const realtype *udata = N_VGetDeviceArrayPointer_Cuda(u);
realtype *dudata = N_VGetDeviceArrayPointer_Cuda(udot);
unsigned block = 256;
unsigned grid = (MX*MY + block - 1) / block;
fKernel<<<grid,block>>>(udata, dudata, MX, MY, hordc, horac, verdc);
return(0);
}
/* Jacobian-times-vector routine. */
static int jtv(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *user_data, N_Vector tmp)
{
UserData data = (UserData) user_data;
/* Extract needed constants from data */
const sunindextype MX = data->MX;
const sunindextype MY = data->MY;
const realtype hordc = data->hdcoef;
const realtype horac = data->hacoef;
const realtype verdc = data->vdcoef;
/* Extract pointers to vector data */
const realtype *vdata = N_VGetDeviceArrayPointer_Cuda(v);
realtype *Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv);
unsigned block = 256;
unsigned grid = (MX*MY + block - 1) / block;
N_VConst(ZERO, Jv);
jtvKernel<<<grid,block>>>(vdata, Jvdata, MX, MY, hordc, horac, verdc);
return(0);
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
/* Print first lines of output (problem description) */
static void PrintHeader(realtype reltol, realtype abstol, realtype umax, UserData data)
{
printf("\n2-D Advection-Diffusion Equation\n");
printf("Mesh dimensions = %d X %d\n", data->MX, data->MY);
printf("Total system size = %d\n", data->NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n",
reltol, abstol);
printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: reltol = %g abstol = %g\n\n",
reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#else
printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol);
printf("At t = %g max.norm(u) =%14.6e \n", T0, umax);
#endif
return;
}
/* Print current value */
static void PrintOutput(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst);
#endif
return;
}
/* Get and print some final statistics */
static void PrintFinalStats(void *cvode_mem)
{
long lenrw, leniw ;
long lenrwLS, leniwLS;
long int nst, nfe, nsetups, nni, ncfn, netf;
long int nli, npe, nps, ncfl, nfeLS;
int retval;
retval = CVodeGetWorkSpace(cvode_mem, &lenrw, &leniw);
check_retval(&retval, "CVodeGetWorkSpace", 1);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_retval(&retval, "CVodeGetNumLinSolvSetups", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
retval = CVSpilsGetWorkSpace(cvode_mem, &lenrwLS, &leniwLS);
check_retval(&retval, "CVSpilsGetWorkSpace", 1);
retval = CVSpilsGetNumLinIters(cvode_mem, &nli);
check_retval(&retval, "CVSpilsGetNumLinIters", 1);
retval = CVSpilsGetNumPrecEvals(cvode_mem, &npe);
check_retval(&retval, "CVSpilsGetNumPrecEvals", 1);
retval = CVSpilsGetNumPrecSolves(cvode_mem, &nps);
check_retval(&retval, "CVSpilsGetNumPrecSolves", 1);
retval = CVSpilsGetNumConvFails(cvode_mem, &ncfl);
check_retval(&retval, "CVSpilsGetNumConvFails", 1);
retval = CVSpilsGetNumRhsEvals(cvode_mem, &nfeLS);
check_retval(&retval, "CVSpilsGetNumRhsEvals", 1);
printf("\nFinal Statistics.. \n\n");
printf("lenrw = %5ld leniw = %5ld\n", lenrw, leniw);
printf("lenrwLS = %5ld leniwLS = %5ld\n", lenrwLS, leniwLS);
printf("nst = %5ld\n" , nst);
printf("nfe = %5ld nfeLS = %5ld\n" , nfe, nfeLS);
printf("nni = %5ld nli = %5ld\n" , nni, nli);
printf("nsetups = %5ld netf = %5ld\n" , nsetups, netf);
printf("npe = %5ld nps = %5ld\n" , npe, nps);
printf("ncfn = %5ld ncfl = %5ld\n\n", ncfn, ncfl);
return;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns an integer value so check if
retval >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_retval(void *returnvalue, const char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
|
95cfca6f556209af878348588160c26d0d0a1dd7.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef CUDA_H
#define CUDA_H
#include <thrust/device_vector.h>
#include <thrust/tabulate.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include "helper_math.h"
#include "math_constants.h"
#define USE_TEX 0
#if USE_TEX
#define FETCH(t, i) tex1Dfetch(t##Tex, i)
#else
#define FETCH(t, i) t[i]
#endif
#include "hip/hip_vector_types.h"
typedef unsigned int uint;
extern "C"
{
void generate_random_points(float *points, int numofpoints)
{
// sequentially generate some random 2D points in the unit square
std::cout << "generating points\n" << std::endl;
for(int i = 0; i < numofpoints; ++i)
{
//srand (time(NULL));
points[i*4] = float(rand()) / RAND_MAX;
points[i*4+1] = float(rand()) / RAND_MAX;
points[i*4+2] = float(rand()) / RAND_MAX;
points[i*4+3] = 0;
}
}
void allocateArray(void **devPtr, size_t size)
{
checkCudaErrors(hipMalloc(devPtr, size));
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// calculate grid hash value for each particle
// calculate position in uniform grid
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
__device__ int3 calcGridPos(float3 p, float3 worldOrigin, float3 cellSize)
{
int3 gridPos;
gridPos.x = floor((p.x - worldOrigin.x) / cellSize.x);
gridPos.y = floor((p.y - worldOrigin.y) / cellSize.y);
gridPos.z = floor((p.z - worldOrigin.z) / cellSize.z);
return gridPos;
}
// calculate address in grid from position (clamping to edges)
__device__ uint calcGridHash(int3 gridPos, uint3 gridSize)
{
gridPos.x = gridPos.x & (gridSize.x-1); // wrap grid, assumes size is power of 2
gridPos.y = gridPos.y & (gridSize.y-1);
gridPos.z = gridPos.z & (gridSize.z-1);
return gridPos.z*gridSize.y*gridSize.x + gridPos.y*gridSize.x + gridPos.x;
}
__global__
void calcHashD(uint *gridParticleHash, // output
uint *gridParticleIndex, // output
float4 *pos, // input: positions
uint numParticles,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize)
{
uint index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
volatile float4 p = pos[index];
// get address in grid
int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z),worldOrigin, cellSize);
uint hash = calcGridHash(gridPos, gridSize);
// store grid hash and particle index
gridParticleHash[index] = hash;
gridParticleIndex[index] = index;
}
void calcHash(uint *gridParticleHash,
uint *gridParticleIndex,
float *pos,
int numParticles,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads) , 0, 0, gridParticleHash,
gridParticleIndex,
(float4 *) pos,numParticles,worldOrigin,gridSize,cellSize);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
// sorting particles by hash
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
//
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash),
thrust::device_ptr<uint>(dGridParticleHash + numParticles),
thrust::device_ptr<uint>(dGridParticleIndex));
}
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
// rearrange particle data into sorted order, and find the start of each cell
// in the sorted hash array
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
__global__
void reorderDataAndFindCellStartD(uint *cellStart, // output: cell start index
uint *cellEnd, // output: cell end index
float4 *sortedPos, // output: sorted positions
// float4 *sortedVel, // output: sorted velocities
uint *gridParticleHash, // input: sorted grid hashes
uint *gridParticleIndex,// input: sorted particle indices
float4 *oldPos, // input: sorted position array
// float4 *oldVel, // input: sorted velocity array
uint numParticles)
{
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
uint index = (blockIdx.x*blockDim.x) + threadIdx.x;
uint hash;
// handle case when no. of particles not multiple of block size
if (index < numParticles)
{
hash = gridParticleHash[index];
// Load hash data into shared memory so that we can look
// at neighboring particle's hash value without loading
// two hash values per thread
sharedHash[threadIdx.x+1] = hash;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
sharedHash[0] = gridParticleHash[index-1];
}
}
__syncthreads();
if (index < numParticles)
{
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
if (index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)
cellEnd[sharedHash[threadIdx.x]] = index;
}
if (index == numParticles - 1)
{
cellEnd[hash] = index + 1;
}
// Now use the sorted index to reorder the pos and vel data
uint sortedIndex = gridParticleIndex[index];
float4 pos = FETCH(oldPos, sortedIndex); // macro does either global read or texture fetch
// float4 vel = FETCH(oldVel, sortedIndex); // see particles_kernel.cuh
sortedPos[index] = pos;
// sortedVel[index] = vel;
}
}
void reorderDataAndFindCellStart(uint *cellStart,
uint *cellEnd,
float *sortedPos,
//float *sortedVel,
uint *gridParticleHash,
uint *gridParticleIndex,
float *oldPos,
//float *oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
checkCudaErrors(hipMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
#if USE_TEX
checkCudaErrors(hipBindTexture(0, oldPosTex, oldPos, numParticles*sizeof(float4)));
//checkCudaErrors(hipBindTexture(0, oldVelTex, oldVel, numParticles*sizeof(float4)));
#endif
uint smemSize = sizeof(uint)*(numThreads+1);
hipLaunchKernelGGL(( reorderDataAndFindCellStartD), dim3(numBlocks), dim3(numThreads), smemSize, 0,
cellStart,
cellEnd,
(float4 *) sortedPos,
//(float4 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float4 *) oldPos,
//(float4 *) oldVel,
numParticles);
getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD");
#if USE_TEX
checkCudaErrors(hipUnbindTexture(oldPosTex));
//checkCudaErrors(hipUnbindTexture(oldVelTex));
#endif
}
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
// finde neighbour cell particles
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
__device__
int collideSpheres(float3 posA, float3 posB,
float radiusA, float radiusB)
{
// calculate relative position
float3 relPos = posB - posA;
float dist = length(relPos);
float collideDist = radiusA + radiusB;
//float3 force = make_float3(0.0f);
if (dist < collideDist)
{
return 1;
}
else
return 0;
}
/*
__device__
void collideCell(int3 gridPos,
uint index,
float3 pos,
//float3 vel,
float4 *oldPos,
//float4 *oldVel,
uint *cellStart,
uint *cellEnd,
float particleRadius,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize,
uint *d_neighborList)
{
uint gridHash = calcGridHash(gridPos, gridSize);
// get start of bucket for this cell
uint startIndex = FETCH(cellStart, gridHash);
//float3 force = make_float3(0.0f);
if (startIndex != 0xffffffff) // cell is not empty
{
// iterate over particles in this cell
uint endIndex = FETCH(cellEnd, gridHash);
uint a=0;
for (uint j=startIndex; j<endIndex; j++)
{
if (j != index) // check not colliding with self
{
float3 pos2 = make_float3(FETCH(oldPos, j));
//float3 vel2 = make_float3(FETCH(oldVel, j));
// collide two spheres
if(collideSpheres(pos, pos2, particleRadius,particleRadius))
{
d_neighborList[index*10+a]=d_neighborList[index*10+a]+j;
a=a+1;
}
}
}
}
}*/
__global__
void collideD(//float4 *newVel, // output: new velocity
float4 *oldPos, // input: sorted positions
//float4 *oldVel, // input: sorted velocities
uint *gridParticleIndex, // input: sorted particle indices
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint numCells,
float particleRadius,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize,
uint *d_neighborList,
uint MAX_LISTELEM)
{
uint index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
// read particle data from sorted arrays
float3 pos = make_float3(FETCH(oldPos, index));
//float3 vel = make_float3(FETCH(oldVel, index));
// get address in grid
int3 gridPos = calcGridPos(pos, worldOrigin, cellSize);
// examine neighbouring cells
//float3 force = make_float3(0.0f);
uint a=0;
for (int z=-1; z<=1; z++)
{
for (int y=-1; y<=1; y++)
{
for (int x=-1; x<=1; x++)
{
int3 neighbourPos = gridPos + make_int3(x, y, z);
// collideCell(neighbourPos, index, pos, oldPos, cellStart, cellEnd, particleRadius,worldOrigin,gridSize,cellSize,d_neighborList);
uint gridHash = calcGridHash(neighbourPos, gridSize);
// get start of bucket for this cell
uint startIndex = FETCH(cellStart, gridHash);
//float3 force = make_float3(0.0f);
if (startIndex != 0xffffffff) // cell is not empty
{
// iterate over particles in this cell
uint endIndex = FETCH(cellEnd, gridHash);
for (uint j=startIndex; j<endIndex; j++)
{
if (j != index) // check not colliding with self
{
float3 pos2 = make_float3(FETCH(oldPos, j));
//float3 vel2 = make_float3(FETCH(oldVel, j));
// collide two spheres
if(collideSpheres(pos, pos2, particleRadius,particleRadius))
{
if (a>MAX_LISTELEM) {
return;
}
//uint originalIndex = gridParticleIndex[index];
d_neighborList[index*MAX_LISTELEM+a]=gridParticleIndex[j];
a=a+1;
}
}
}
}
}
}
}
d_neighborList[index*(MAX_LISTELEM)+MAX_LISTELEM-1]=a;
// collide with cursor sphere
//force += collideSpheres(pos, params.colliderPos, vel, make_float3(0.0f, 0.0f, 0.0f), params.particleRadius, params.colliderRadius, 0.0f);
// write new velocity back to original unsorted location
//uint originalIndex = gridParticleIndex[index];
//newVel[originalIndex] = make_float4(vel + force, 0.0f);
}
void collide(//float *newVel,
float *sortedPos,
//float *sortedVel,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint numCells,
float particleRadius,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize,
uint *d_neighborList,
uint MAX_LISTELEM)
{
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( collideD), dim3(numBlocks), dim3(numThreads) , 0, 0, //(float4 *)newVel,
(float4 *)sortedPos,
//(float4 *)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles,
numCells,
particleRadius,
worldOrigin,
gridSize,
cellSize,
d_neighborList,
MAX_LISTELEM);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
void find_neighbor_cuda(int n,double *xp,double *yp,double dis,int **neighborListoutput,int MAX_LIST)
{
int numParticles=n;
float particleRadius=dis/2.0f;
int GRID_SIZE=64;
uint gridDim = GRID_SIZE;
uint3 gridSize;
gridSize.x = gridSize.y = gridSize.z = gridDim;
float3 worldOrigin;
worldOrigin = make_float3(-0.0450f, -0.0450f, -0.0450f);
float3 cellSize;
cellSize = make_float3(particleRadius * 2.0f,particleRadius * 2.0f,particleRadius * 2.0f);
uint numCells;
numCells = gridSize.x*gridSize.y*gridSize.z;
// uint numBodies;
// uint maxParticlesPerCell;
// CPU data
float *m_hPos=new float[numParticles*4]; // particle positions
// uint *m_hCellStart;
// uint *m_hCellEnd;
uint *m_hGridParticleHash=new uint[numParticles];
uint *m_hGridParticleIndex=new uint[numParticles];
// GPU data
float *m_dPos;
float *m_dSortedPos;
// grid data for sorting method
uint *m_dGridParticleHash; // grid hash value for each particle
uint *m_dGridParticleIndex;// particle index for each particle
uint *m_dCellStart; // index of start of each cell in sorted list
uint *m_dCellEnd; // index of end of cell
// float *m_cudaPosVBO; // these are the CUDA deviceMem Pos
unsigned int memSize = sizeof(float) * 4 * numParticles;
allocateArray((void **)&m_dPos, memSize);
allocateArray((void **)&m_dSortedPos, memSize);
allocateArray((void **)&m_dGridParticleHash, numParticles*sizeof(uint));
allocateArray((void **)&m_dGridParticleIndex, numParticles*sizeof(uint));
allocateArray((void **)&m_dCellStart, numCells*sizeof(uint));
allocateArray((void **)&m_dCellEnd, numCells*sizeof(uint));
// checkCudaErrors(hipMalloc((void **)&m_cudaPosVBO, memSize)) ;
//initial particle position
//generate_random_points(m_hPos,numParticles);
for(int i = 0; i < numParticles; ++i)
{
//srand (time(NULL));
m_hPos[i*4] = xp[i];
m_hPos[i*4+1] = yp[i];
m_hPos[i*4+2] = 0;
m_hPos[i*4+3] = 0;
}
checkCudaErrors(hipMemcpy(m_dPos, m_hPos, sizeof(float)*4*numParticles,hipMemcpyHostToDevice));
// ********************** find particle cell hash id**********************//
calcHash(
m_dGridParticleHash,
m_dGridParticleIndex,
m_dPos,
numParticles,
worldOrigin,
gridSize,
cellSize);
// ********************** write particle position data**********************//
//checkCudaErrors(hipMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(float)*4*numParticles,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(uint)*numParticles,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(m_hGridParticleHash, m_dGridParticleHash, sizeof(uint)*numParticles,hipMemcpyDeviceToHost));
if (0) {
std::cout <<"Index "<<"position x y z "<<"Hash"<<std::endl;
for(int i=0; i<numParticles; i++)
{
printf("%.4d, %.4f, %.4f, %.4f, %.4d\n",m_hGridParticleIndex[i],m_hPos[i*4],m_hPos[i*4+1],m_hPos[i*4+2],m_hGridParticleHash[i]);
}
}
// ********************** sort particles based on hash **********************//
sortParticles(m_dGridParticleHash, m_dGridParticleIndex, numParticles);
// ********************** reorder particle arrays into sorted order and**********************//
// find start and end of each cell
reorderDataAndFindCellStart(
m_dCellStart,
m_dCellEnd,
m_dSortedPos,
//m_dSortedVel,
m_dGridParticleHash,
m_dGridParticleIndex,
m_dPos,
//m_dVel,
numParticles,
numCells);
// ********************** write particle position data reordering index**********************//
/*
checkCudaErrors(hipMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(float)*numParticles,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(m_hGridParticleHash, m_dGridParticleHash, sizeof(float)*numParticles,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(m_hPos, m_dSortedPos, sizeof(float)*4*numParticles,hipMemcpyDeviceToHost));
std::cout <<"After reordering !!!"<<std::endl;
std::cout <<"Index "<<"position x y z "<<"Hash"<<std::endl;
for(int i=0; i<numParticles; i++)
{
printf("%.4d, %.4f, %.4f, %.4f, %.4d\n",m_hGridParticleIndex[i],m_hPos[i*4],m_hPos[i*4+1],m_hPos[i*4+2],m_hGridParticleHash[i]);
}
*/
// ********************** write particle position data**********************//
uint MAX_LISTELEM=MAX_LIST;
uint *h_neighborList=new uint[numParticles*MAX_LISTELEM];
uint *d_neighborList; // index of end of cell
allocateArray((void **)&d_neighborList, numParticles*MAX_LISTELEM*sizeof(uint));
hipMemset(d_neighborList, 0000, MAX_LISTELEM*numParticles*sizeof(uint));
collide(
//m_dVel,
m_dSortedPos,
//m_dSortedVel,
m_dGridParticleIndex,
m_dCellStart,
m_dCellEnd,
numParticles,
numCells,
particleRadius,
worldOrigin,
gridSize,
cellSize,
d_neighborList,
MAX_LISTELEM);
checkCudaErrors(hipMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(uint)*numParticles,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(m_hGridParticleHash, m_dGridParticleHash, sizeof(uint)*numParticles,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(m_hPos, m_dSortedPos, sizeof(float)*4*numParticles,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_neighborList, d_neighborList, sizeof(uint)*numParticles*MAX_LISTELEM,hipMemcpyDeviceToHost));
if (0) {
std::cout <<"neibouring index !!!"<<std::endl;
std::cout <<"i originalIndex "<<"position x y z "<<"Hash"<<std::endl;
for(int i=0; i<numParticles; i++)
{
printf("%.4d, %.4d, %.4f, %.4f, %.4f, %.4d, %.4d, %.4d %.4d\n",i,m_hGridParticleIndex[i],m_hPos[i*4],m_hPos[i*4+1],m_hPos[i*4+2],m_hGridParticleHash[i], h_neighborList[i*MAX_LISTELEM],h_neighborList[i*MAX_LISTELEM+1],h_neighborList[(i+1)*MAX_LISTELEM-1]);
}
}
// uint neighborList[numParticles][MAX_LISTELEM];
uint** neighborList;
neighborList=(uint**) malloc(numParticles*sizeof(uint*));
for (int i = 0; i < numParticles; i++)
neighborList[i] = (uint*) malloc(MAX_LISTELEM*sizeof(uint));
for(int i=0;i<numParticles;i++)
{
for(int j=0;j<MAX_LISTELEM;j++)
{
neighborList[i][j]=0.;
}
}
//uint* tempList=new uint[numParticles*MAX_LISTELEM];
uint* tempList;
tempList=(uint*) malloc(numParticles*MAX_LISTELEM*sizeof(uint));
for(int j=0;j<numParticles*MAX_LISTELEM;j++)
{
tempList[j]=0.;
}
//memcpy(neighborList,h_neighborList,numParticles*MAX_LISTELEM*sizeof(uint));
for (int i=0; i<numParticles; i++) {
for (int j=0; j<MAX_LISTELEM; j++) {
neighborList[i][j]=h_neighborList[i*MAX_LISTELEM+j];
}
}
for (int i=0; i<numParticles; i++) {
for (int j=0; j<MAX_LISTELEM; j++) {
tempList[i*MAX_LISTELEM+j]=neighborList[i][j];
}
}
for (int i=0; i<numParticles; i++) {
for (int j=0; j<MAX_LISTELEM; j++) {
neighborListoutput[m_hGridParticleIndex[i]][j]=tempList[i*MAX_LISTELEM+j];
}
}
sortParticles(m_dGridParticleIndex, m_dGridParticleHash, numParticles);
//checkCudaErrors(hipMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(uint)*numParticles,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(m_hGridParticleHash, m_dGridParticleHash, sizeof(uint)*numParticles,hipMemcpyDeviceToHost));
//checkCudaErrors(hipMemcpy(m_hPos, m_dSortedPos, sizeof(float)*4*numParticles,hipMemcpyDeviceToHost));
//checkCudaErrors(hipMemcpy(h_neighborList, d_neighborList, sizeof(uint)*numParticles*MAX_LISTELEM,hipMemcpyDeviceToHost));
if (0) {
std::cout <<"neibouring index !!!"<<std::endl;
std::cout <<"i hash id "<<"neighbor particle id "<<"total neighbors"<<std::endl;
for(int i=0; i<numParticles; i++)
{
printf("%.4d, %.4d, %.4d, %.4d, %.4d, %.4d, %.4d\n",i,m_hGridParticleHash[i], neighborListoutput[i][0],neighborListoutput[i][1],neighborListoutput[i][2],neighborListoutput[i][3],neighborListoutput[i][MAX_LISTELEM-1]);
}
}
free(m_hPos);
free(m_hGridParticleHash);
free(m_hGridParticleIndex);
hipFree(m_dPos);
hipFree(m_dGridParticleHash);
hipFree(m_dGridParticleIndex);
hipFree(m_dCellStart);
hipFree(m_dCellEnd);
hipFree(m_dSortedPos);
free(h_neighborList);
hipFree(d_neighborList);
free(tempList);
free(neighborList);
//std::cout<<"sorting finished !!!"<<std::endl;
return;
}
} // extern "C"
#endif // PARTICLE_H | 95cfca6f556209af878348588160c26d0d0a1dd7.cu |
#ifndef CUDA_H
#define CUDA_H
#include <thrust/device_vector.h>
#include <thrust/tabulate.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <algorithm>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include "helper_math.h"
#include "math_constants.h"
#define USE_TEX 0
#if USE_TEX
#define FETCH(t, i) tex1Dfetch(t##Tex, i)
#else
#define FETCH(t, i) t[i]
#endif
#include "vector_types.h"
typedef unsigned int uint;
extern "C"
{
void generate_random_points(float *points, int numofpoints)
{
// sequentially generate some random 2D points in the unit square
std::cout << "generating points\n" << std::endl;
for(int i = 0; i < numofpoints; ++i)
{
//srand (time(NULL));
points[i*4] = float(rand()) / RAND_MAX;
points[i*4+1] = float(rand()) / RAND_MAX;
points[i*4+2] = float(rand()) / RAND_MAX;
points[i*4+3] = 0;
}
}
void allocateArray(void **devPtr, size_t size)
{
checkCudaErrors(cudaMalloc(devPtr, size));
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// calculate grid hash value for each particle
// calculate position in uniform grid
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
__device__ int3 calcGridPos(float3 p, float3 worldOrigin, float3 cellSize)
{
int3 gridPos;
gridPos.x = floor((p.x - worldOrigin.x) / cellSize.x);
gridPos.y = floor((p.y - worldOrigin.y) / cellSize.y);
gridPos.z = floor((p.z - worldOrigin.z) / cellSize.z);
return gridPos;
}
// calculate address in grid from position (clamping to edges)
__device__ uint calcGridHash(int3 gridPos, uint3 gridSize)
{
gridPos.x = gridPos.x & (gridSize.x-1); // wrap grid, assumes size is power of 2
gridPos.y = gridPos.y & (gridSize.y-1);
gridPos.z = gridPos.z & (gridSize.z-1);
return gridPos.z*gridSize.y*gridSize.x + gridPos.y*gridSize.x + gridPos.x;
}
__global__
void calcHashD(uint *gridParticleHash, // output
uint *gridParticleIndex, // output
float4 *pos, // input: positions
uint numParticles,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize)
{
uint index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
volatile float4 p = pos[index];
// get address in grid
int3 gridPos = calcGridPos(make_float3(p.x, p.y, p.z),worldOrigin, cellSize);
uint hash = calcGridHash(gridPos, gridSize);
// store grid hash and particle index
gridParticleHash[index] = hash;
gridParticleIndex[index] = index;
}
void calcHash(uint *gridParticleHash,
uint *gridParticleIndex,
float *pos,
int numParticles,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
calcHashD<<< numBlocks, numThreads >>>(gridParticleHash,
gridParticleIndex,
(float4 *) pos,numParticles,worldOrigin,gridSize,cellSize);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
// sorting particles by hash
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
//
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash),
thrust::device_ptr<uint>(dGridParticleHash + numParticles),
thrust::device_ptr<uint>(dGridParticleIndex));
}
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
// rearrange particle data into sorted order, and find the start of each cell
// in the sorted hash array
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
__global__
void reorderDataAndFindCellStartD(uint *cellStart, // output: cell start index
uint *cellEnd, // output: cell end index
float4 *sortedPos, // output: sorted positions
// float4 *sortedVel, // output: sorted velocities
uint *gridParticleHash, // input: sorted grid hashes
uint *gridParticleIndex,// input: sorted particle indices
float4 *oldPos, // input: sorted position array
// float4 *oldVel, // input: sorted velocity array
uint numParticles)
{
extern __shared__ uint sharedHash[]; // blockSize + 1 elements
uint index = (blockIdx.x*blockDim.x) + threadIdx.x;
uint hash;
// handle case when no. of particles not multiple of block size
if (index < numParticles)
{
hash = gridParticleHash[index];
// Load hash data into shared memory so that we can look
// at neighboring particle's hash value without loading
// two hash values per thread
sharedHash[threadIdx.x+1] = hash;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
sharedHash[0] = gridParticleHash[index-1];
}
}
__syncthreads();
if (index < numParticles)
{
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
if (index == 0 || hash != sharedHash[threadIdx.x])
{
cellStart[hash] = index;
if (index > 0)
cellEnd[sharedHash[threadIdx.x]] = index;
}
if (index == numParticles - 1)
{
cellEnd[hash] = index + 1;
}
// Now use the sorted index to reorder the pos and vel data
uint sortedIndex = gridParticleIndex[index];
float4 pos = FETCH(oldPos, sortedIndex); // macro does either global read or texture fetch
// float4 vel = FETCH(oldVel, sortedIndex); // see particles_kernel.cuh
sortedPos[index] = pos;
// sortedVel[index] = vel;
}
}
void reorderDataAndFindCellStart(uint *cellStart,
uint *cellEnd,
float *sortedPos,
//float *sortedVel,
uint *gridParticleHash,
uint *gridParticleIndex,
float *oldPos,
//float *oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
checkCudaErrors(cudaMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
#if USE_TEX
checkCudaErrors(cudaBindTexture(0, oldPosTex, oldPos, numParticles*sizeof(float4)));
//checkCudaErrors(cudaBindTexture(0, oldVelTex, oldVel, numParticles*sizeof(float4)));
#endif
uint smemSize = sizeof(uint)*(numThreads+1);
reorderDataAndFindCellStartD<<< numBlocks, numThreads, smemSize>>>(
cellStart,
cellEnd,
(float4 *) sortedPos,
//(float4 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float4 *) oldPos,
//(float4 *) oldVel,
numParticles);
getLastCudaError("Kernel execution failed: reorderDataAndFindCellStartD");
#if USE_TEX
checkCudaErrors(cudaUnbindTexture(oldPosTex));
//checkCudaErrors(cudaUnbindTexture(oldVelTex));
#endif
}
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
// finde neighbour cell particles
//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
__device__
int collideSpheres(float3 posA, float3 posB,
float radiusA, float radiusB)
{
// calculate relative position
float3 relPos = posB - posA;
float dist = length(relPos);
float collideDist = radiusA + radiusB;
//float3 force = make_float3(0.0f);
if (dist < collideDist)
{
return 1;
}
else
return 0;
}
/*
__device__
void collideCell(int3 gridPos,
uint index,
float3 pos,
//float3 vel,
float4 *oldPos,
//float4 *oldVel,
uint *cellStart,
uint *cellEnd,
float particleRadius,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize,
uint *d_neighborList)
{
uint gridHash = calcGridHash(gridPos, gridSize);
// get start of bucket for this cell
uint startIndex = FETCH(cellStart, gridHash);
//float3 force = make_float3(0.0f);
if (startIndex != 0xffffffff) // cell is not empty
{
// iterate over particles in this cell
uint endIndex = FETCH(cellEnd, gridHash);
uint a=0;
for (uint j=startIndex; j<endIndex; j++)
{
if (j != index) // check not colliding with self
{
float3 pos2 = make_float3(FETCH(oldPos, j));
//float3 vel2 = make_float3(FETCH(oldVel, j));
// collide two spheres
if(collideSpheres(pos, pos2, particleRadius,particleRadius))
{
d_neighborList[index*10+a]=d_neighborList[index*10+a]+j;
a=a+1;
}
}
}
}
}*/
__global__
void collideD(//float4 *newVel, // output: new velocity
float4 *oldPos, // input: sorted positions
//float4 *oldVel, // input: sorted velocities
uint *gridParticleIndex, // input: sorted particle indices
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint numCells,
float particleRadius,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize,
uint *d_neighborList,
uint MAX_LISTELEM)
{
uint index = (blockIdx.x*blockDim.x) + threadIdx.x;
if (index >= numParticles) return;
// read particle data from sorted arrays
float3 pos = make_float3(FETCH(oldPos, index));
//float3 vel = make_float3(FETCH(oldVel, index));
// get address in grid
int3 gridPos = calcGridPos(pos, worldOrigin, cellSize);
// examine neighbouring cells
//float3 force = make_float3(0.0f);
uint a=0;
for (int z=-1; z<=1; z++)
{
for (int y=-1; y<=1; y++)
{
for (int x=-1; x<=1; x++)
{
int3 neighbourPos = gridPos + make_int3(x, y, z);
// collideCell(neighbourPos, index, pos, oldPos, cellStart, cellEnd, particleRadius,worldOrigin,gridSize,cellSize,d_neighborList);
uint gridHash = calcGridHash(neighbourPos, gridSize);
// get start of bucket for this cell
uint startIndex = FETCH(cellStart, gridHash);
//float3 force = make_float3(0.0f);
if (startIndex != 0xffffffff) // cell is not empty
{
// iterate over particles in this cell
uint endIndex = FETCH(cellEnd, gridHash);
for (uint j=startIndex; j<endIndex; j++)
{
if (j != index) // check not colliding with self
{
float3 pos2 = make_float3(FETCH(oldPos, j));
//float3 vel2 = make_float3(FETCH(oldVel, j));
// collide two spheres
if(collideSpheres(pos, pos2, particleRadius,particleRadius))
{
if (a>MAX_LISTELEM) {
return;
}
//uint originalIndex = gridParticleIndex[index];
d_neighborList[index*MAX_LISTELEM+a]=gridParticleIndex[j];
a=a+1;
}
}
}
}
}
}
}
d_neighborList[index*(MAX_LISTELEM)+MAX_LISTELEM-1]=a;
// collide with cursor sphere
//force += collideSpheres(pos, params.colliderPos, vel, make_float3(0.0f, 0.0f, 0.0f), params.particleRadius, params.colliderRadius, 0.0f);
// write new velocity back to original unsorted location
//uint originalIndex = gridParticleIndex[index];
//newVel[originalIndex] = make_float4(vel + force, 0.0f);
}
void collide(//float *newVel,
float *sortedPos,
//float *sortedVel,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
uint numParticles,
uint numCells,
float particleRadius,
float3 worldOrigin,
uint3 gridSize,
float3 cellSize,
uint *d_neighborList,
uint MAX_LISTELEM)
{
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
collideD<<< numBlocks, numThreads >>>(//(float4 *)newVel,
(float4 *)sortedPos,
//(float4 *)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles,
numCells,
particleRadius,
worldOrigin,
gridSize,
cellSize,
d_neighborList,
MAX_LISTELEM);
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
//>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
void find_neighbor_cuda(int n,double *xp,double *yp,double dis,int **neighborListoutput,int MAX_LIST)
{
int numParticles=n;
float particleRadius=dis/2.0f;
int GRID_SIZE=64;
uint gridDim = GRID_SIZE;
uint3 gridSize;
gridSize.x = gridSize.y = gridSize.z = gridDim;
float3 worldOrigin;
worldOrigin = make_float3(-0.0450f, -0.0450f, -0.0450f);
float3 cellSize;
cellSize = make_float3(particleRadius * 2.0f,particleRadius * 2.0f,particleRadius * 2.0f);
uint numCells;
numCells = gridSize.x*gridSize.y*gridSize.z;
// uint numBodies;
// uint maxParticlesPerCell;
// CPU data
float *m_hPos=new float[numParticles*4]; // particle positions
// uint *m_hCellStart;
// uint *m_hCellEnd;
uint *m_hGridParticleHash=new uint[numParticles];
uint *m_hGridParticleIndex=new uint[numParticles];
// GPU data
float *m_dPos;
float *m_dSortedPos;
// grid data for sorting method
uint *m_dGridParticleHash; // grid hash value for each particle
uint *m_dGridParticleIndex;// particle index for each particle
uint *m_dCellStart; // index of start of each cell in sorted list
uint *m_dCellEnd; // index of end of cell
// float *m_cudaPosVBO; // these are the CUDA deviceMem Pos
unsigned int memSize = sizeof(float) * 4 * numParticles;
allocateArray((void **)&m_dPos, memSize);
allocateArray((void **)&m_dSortedPos, memSize);
allocateArray((void **)&m_dGridParticleHash, numParticles*sizeof(uint));
allocateArray((void **)&m_dGridParticleIndex, numParticles*sizeof(uint));
allocateArray((void **)&m_dCellStart, numCells*sizeof(uint));
allocateArray((void **)&m_dCellEnd, numCells*sizeof(uint));
// checkCudaErrors(cudaMalloc((void **)&m_cudaPosVBO, memSize)) ;
//initial particle position
//generate_random_points(m_hPos,numParticles);
for(int i = 0; i < numParticles; ++i)
{
//srand (time(NULL));
m_hPos[i*4] = xp[i];
m_hPos[i*4+1] = yp[i];
m_hPos[i*4+2] = 0;
m_hPos[i*4+3] = 0;
}
checkCudaErrors(cudaMemcpy(m_dPos, m_hPos, sizeof(float)*4*numParticles,cudaMemcpyHostToDevice));
// ********************** find particle cell hash id**********************//
calcHash(
m_dGridParticleHash,
m_dGridParticleIndex,
m_dPos,
numParticles,
worldOrigin,
gridSize,
cellSize);
// ********************** write particle position data**********************//
//checkCudaErrors(cudaMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(float)*4*numParticles,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(uint)*numParticles,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(m_hGridParticleHash, m_dGridParticleHash, sizeof(uint)*numParticles,cudaMemcpyDeviceToHost));
if (0) {
std::cout <<"Index "<<"position x y z "<<"Hash"<<std::endl;
for(int i=0; i<numParticles; i++)
{
printf("%.4d, %.4f, %.4f, %.4f, %.4d\n",m_hGridParticleIndex[i],m_hPos[i*4],m_hPos[i*4+1],m_hPos[i*4+2],m_hGridParticleHash[i]);
}
}
// ********************** sort particles based on hash **********************//
sortParticles(m_dGridParticleHash, m_dGridParticleIndex, numParticles);
// ********************** reorder particle arrays into sorted order and**********************//
// find start and end of each cell
reorderDataAndFindCellStart(
m_dCellStart,
m_dCellEnd,
m_dSortedPos,
//m_dSortedVel,
m_dGridParticleHash,
m_dGridParticleIndex,
m_dPos,
//m_dVel,
numParticles,
numCells);
// ********************** write particle position data reordering index**********************//
/*
checkCudaErrors(cudaMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(float)*numParticles,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(m_hGridParticleHash, m_dGridParticleHash, sizeof(float)*numParticles,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(m_hPos, m_dSortedPos, sizeof(float)*4*numParticles,cudaMemcpyDeviceToHost));
std::cout <<"After reordering !!!"<<std::endl;
std::cout <<"Index "<<"position x y z "<<"Hash"<<std::endl;
for(int i=0; i<numParticles; i++)
{
printf("%.4d, %.4f, %.4f, %.4f, %.4d\n",m_hGridParticleIndex[i],m_hPos[i*4],m_hPos[i*4+1],m_hPos[i*4+2],m_hGridParticleHash[i]);
}
*/
// ********************** write particle position data**********************//
uint MAX_LISTELEM=MAX_LIST;
uint *h_neighborList=new uint[numParticles*MAX_LISTELEM];
uint *d_neighborList; // index of end of cell
allocateArray((void **)&d_neighborList, numParticles*MAX_LISTELEM*sizeof(uint));
cudaMemset(d_neighborList, 0000, MAX_LISTELEM*numParticles*sizeof(uint));
collide(
//m_dVel,
m_dSortedPos,
//m_dSortedVel,
m_dGridParticleIndex,
m_dCellStart,
m_dCellEnd,
numParticles,
numCells,
particleRadius,
worldOrigin,
gridSize,
cellSize,
d_neighborList,
MAX_LISTELEM);
checkCudaErrors(cudaMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(uint)*numParticles,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(m_hGridParticleHash, m_dGridParticleHash, sizeof(uint)*numParticles,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(m_hPos, m_dSortedPos, sizeof(float)*4*numParticles,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_neighborList, d_neighborList, sizeof(uint)*numParticles*MAX_LISTELEM,cudaMemcpyDeviceToHost));
if (0) {
std::cout <<"neibouring index !!!"<<std::endl;
std::cout <<"i originalIndex "<<"position x y z "<<"Hash"<<std::endl;
for(int i=0; i<numParticles; i++)
{
printf("%.4d, %.4d, %.4f, %.4f, %.4f, %.4d, %.4d, %.4d %.4d\n",i,m_hGridParticleIndex[i],m_hPos[i*4],m_hPos[i*4+1],m_hPos[i*4+2],m_hGridParticleHash[i], h_neighborList[i*MAX_LISTELEM],h_neighborList[i*MAX_LISTELEM+1],h_neighborList[(i+1)*MAX_LISTELEM-1]);
}
}
// uint neighborList[numParticles][MAX_LISTELEM];
uint** neighborList;
neighborList=(uint**) malloc(numParticles*sizeof(uint*));
for (int i = 0; i < numParticles; i++)
neighborList[i] = (uint*) malloc(MAX_LISTELEM*sizeof(uint));
for(int i=0;i<numParticles;i++)
{
for(int j=0;j<MAX_LISTELEM;j++)
{
neighborList[i][j]=0.;
}
}
//uint* tempList=new uint[numParticles*MAX_LISTELEM];
uint* tempList;
tempList=(uint*) malloc(numParticles*MAX_LISTELEM*sizeof(uint));
for(int j=0;j<numParticles*MAX_LISTELEM;j++)
{
tempList[j]=0.;
}
//memcpy(neighborList,h_neighborList,numParticles*MAX_LISTELEM*sizeof(uint));
for (int i=0; i<numParticles; i++) {
for (int j=0; j<MAX_LISTELEM; j++) {
neighborList[i][j]=h_neighborList[i*MAX_LISTELEM+j];
}
}
for (int i=0; i<numParticles; i++) {
for (int j=0; j<MAX_LISTELEM; j++) {
tempList[i*MAX_LISTELEM+j]=neighborList[i][j];
}
}
for (int i=0; i<numParticles; i++) {
for (int j=0; j<MAX_LISTELEM; j++) {
neighborListoutput[m_hGridParticleIndex[i]][j]=tempList[i*MAX_LISTELEM+j];
}
}
sortParticles(m_dGridParticleIndex, m_dGridParticleHash, numParticles);
//checkCudaErrors(cudaMemcpy(m_hGridParticleIndex, m_dGridParticleIndex, sizeof(uint)*numParticles,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(m_hGridParticleHash, m_dGridParticleHash, sizeof(uint)*numParticles,cudaMemcpyDeviceToHost));
//checkCudaErrors(cudaMemcpy(m_hPos, m_dSortedPos, sizeof(float)*4*numParticles,cudaMemcpyDeviceToHost));
//checkCudaErrors(cudaMemcpy(h_neighborList, d_neighborList, sizeof(uint)*numParticles*MAX_LISTELEM,cudaMemcpyDeviceToHost));
if (0) {
std::cout <<"neibouring index !!!"<<std::endl;
std::cout <<"i hash id "<<"neighbor particle id "<<"total neighbors"<<std::endl;
for(int i=0; i<numParticles; i++)
{
printf("%.4d, %.4d, %.4d, %.4d, %.4d, %.4d, %.4d\n",i,m_hGridParticleHash[i], neighborListoutput[i][0],neighborListoutput[i][1],neighborListoutput[i][2],neighborListoutput[i][3],neighborListoutput[i][MAX_LISTELEM-1]);
}
}
free(m_hPos);
free(m_hGridParticleHash);
free(m_hGridParticleIndex);
cudaFree(m_dPos);
cudaFree(m_dGridParticleHash);
cudaFree(m_dGridParticleIndex);
cudaFree(m_dCellStart);
cudaFree(m_dCellEnd);
cudaFree(m_dSortedPos);
free(h_neighborList);
cudaFree(d_neighborList);
free(tempList);
free(neighborList);
//std::cout<<"sorting finished !!!"<<std::endl;
return;
}
} // extern "C"
#endif // PARTICLE_H |
4acf149e26089f5207c6b4b08e19ab1645436f8c.hip | // !!! This is a file automatically generated by hipify!!!
#include <R.h>
#include <hipfft.h>
/* This function is written for R to compute 1D FFT.
n - [IN] the number of complex we want to compute
inverse - [IN] set to 1 if use inverse mode
h_idata_re - [IN] input data from host (R, real part)
h_idata_im - [IN] input data from host (R, imaginary part)
h_odata_re - [OUT] results (real) allocated by caller
h_odata_im - [OUT] results (imaginary) allocated by caller
*/
extern "C"
void cufft(int *n, int *inverse, double *h_idata_re,
double *h_idata_im, double *h_odata_re, double *h_odata_im)
{
hipfftHandle plan;
hipfftDoubleComplex *d_data, *h_data;
hipMalloc((void**)&d_data, sizeof(hipfftDoubleComplex)*(*n));
h_data = (hipfftDoubleComplex *) malloc(sizeof(hipfftDoubleComplex) * (*n));
// Convert data to hipfftDoubleComplex type
for(int i=0; i< *n; i++) {
h_data[i].x = h_idata_re[i];
h_data[i].y = h_idata_im[i];
}
hipMemcpy(d_data, h_data, sizeof(hipfftDoubleComplex) * (*n),
hipMemcpyHostToDevice);
// Use the CUFFT plan to transform the signal in place.
hipfftPlan1d(&plan, *n, HIPFFT_Z2Z, 1);
if (!*inverse ) {
hipfftExecZ2Z(plan, d_data, d_data, HIPFFT_FORWARD);
} else {
hipfftExecZ2Z(plan, d_data, d_data, HIPFFT_BACKWARD);
}
hipMemcpy(h_data, d_data, sizeof(hipfftDoubleComplex) * (*n),
hipMemcpyDeviceToHost);
// split hipfftDoubleComplex to double array
for(int i=0; i<*n; i++) {
h_odata_re[i] = h_data[i].x;
h_odata_im[i] = h_data[i].y;
}
// Destroy the CUFFT plan and free memory.
hipfftDestroy(plan);
hipFree(d_data);
free(h_data);
} | 4acf149e26089f5207c6b4b08e19ab1645436f8c.cu | #include <R.h>
#include <cufft.h>
/* This function is written for R to compute 1D FFT.
n - [IN] the number of complex we want to compute
inverse - [IN] set to 1 if use inverse mode
h_idata_re - [IN] input data from host (R, real part)
h_idata_im - [IN] input data from host (R, imaginary part)
h_odata_re - [OUT] results (real) allocated by caller
h_odata_im - [OUT] results (imaginary) allocated by caller
*/
extern "C"
void cufft(int *n, int *inverse, double *h_idata_re,
double *h_idata_im, double *h_odata_re, double *h_odata_im)
{
cufftHandle plan;
cufftDoubleComplex *d_data, *h_data;
cudaMalloc((void**)&d_data, sizeof(cufftDoubleComplex)*(*n));
h_data = (cufftDoubleComplex *) malloc(sizeof(cufftDoubleComplex) * (*n));
// Convert data to cufftDoubleComplex type
for(int i=0; i< *n; i++) {
h_data[i].x = h_idata_re[i];
h_data[i].y = h_idata_im[i];
}
cudaMemcpy(d_data, h_data, sizeof(cufftDoubleComplex) * (*n),
cudaMemcpyHostToDevice);
// Use the CUFFT plan to transform the signal in place.
cufftPlan1d(&plan, *n, CUFFT_Z2Z, 1);
if (!*inverse ) {
cufftExecZ2Z(plan, d_data, d_data, CUFFT_FORWARD);
} else {
cufftExecZ2Z(plan, d_data, d_data, CUFFT_INVERSE);
}
cudaMemcpy(h_data, d_data, sizeof(cufftDoubleComplex) * (*n),
cudaMemcpyDeviceToHost);
// split cufftDoubleComplex to double array
for(int i=0; i<*n; i++) {
h_odata_re[i] = h_data[i].x;
h_odata_im[i] = h_data[i].y;
}
// Destroy the CUFFT plan and free memory.
cufftDestroy(plan);
cudaFree(d_data);
free(h_data);
} |
3c5228fbfdbbcba708398d8fd4b79407d34b4346.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void huber(float *a, const size_t width, const size_t height, const float alpha, const float strength, const size_t pixelsPerThread, float *f)
{
const size_t col = (blockIdx.x * blockDim.x + threadIdx.x) % width;
const size_t crow = (blockIdx.x * blockDim.x + threadIdx.x) / width * pixelsPerThread;
if (col >= width || crow >= height)
return;
const size_t erow = min((unsigned int)(crow + pixelsPerThread), (unsigned int)height);
const float alpha2 = alpha * alpha;
float colF = 0.0f;
for (size_t row = crow; row < erow; ++row)
{
const size_t idx = row * width + col;
// Pseudo-Huber loss function
const float root = sqrtf(1.0f + a[idx]*a[idx] / alpha2);
colF += alpha2 * (root - 1.0f);
a[idx] *= strength / root;
}
colF *= strength;
f[blockIdx.x * blockDim.x + threadIdx.x] = colF;
} | 3c5228fbfdbbcba708398d8fd4b79407d34b4346.cu | #include "includes.h"
__global__ void huber(float *a, const size_t width, const size_t height, const float alpha, const float strength, const size_t pixelsPerThread, float *f)
{
const size_t col = (blockIdx.x * blockDim.x + threadIdx.x) % width;
const size_t crow = (blockIdx.x * blockDim.x + threadIdx.x) / width * pixelsPerThread;
if (col >= width || crow >= height)
return;
const size_t erow = min((unsigned int)(crow + pixelsPerThread), (unsigned int)height);
const float alpha2 = alpha * alpha;
float colF = 0.0f;
for (size_t row = crow; row < erow; ++row)
{
const size_t idx = row * width + col;
// Pseudo-Huber loss function
const float root = sqrtf(1.0f + a[idx]*a[idx] / alpha2);
colF += alpha2 * (root - 1.0f);
a[idx] *= strength / root;
}
colF *= strength;
f[blockIdx.x * blockDim.x + threadIdx.x] = colF;
} |
99d61288b194fa3cdaff12392b6322168408a170.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += fabsf(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary);
check_error(hipPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += fabsf(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
/*binary[f*size + i] = weights[f*size + i];*/
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary);
check_error(hipPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network net)
{
fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i, j;
int m = l.n/l.groups;
int k = l.size*l.size*l.c/l.groups;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
for(j = 0; j < l.groups; ++j){
float *a = l.weights_gpu + j*l.nweights/l.groups;
float *b = net.workspace;
float *c = l.output_gpu + (i*l.groups + j)*n*m;
float *im = net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w;
if (l.size == 1){
b = im;
} else {
im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
}
gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, net);
} else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -(size/2.f);
int h_offset = -(size/2.f);
int out_index = j + w*(i + h*(k + c*b));
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i + l;
int cur_w = w_offset + j + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
}
}
}
extern "C" void smooth_layer(layer l, int size, float rate)
{
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
size_t n = h*w*c*l.batch;
hipLaunchKernelGGL(( smooth_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu);
check_error(hipPeekAtLastError());
}
void backward_convolutional_layer_gpu(convolutional_layer l, network net)
{
if(l.smooth){
smooth_layer(l, 5, l.smooth);
}
//constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, net);
} else {
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
}
float *original_input = net.input_gpu;
if(l.xnor) net.input_gpu = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
net.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(net.delta_gpu){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
net.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
net.delta_gpu);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu);
}
#else
int m = l.n/l.groups;
int n = l.size*l.size*l.c/l.groups;
int k = l.out_w*l.out_h;
int i, j;
for(i = 0; i < l.batch; ++i){
for(j = 0; j < l.groups; ++j){
float *a = l.delta_gpu + (i*l.groups + j)*m*k;
float *b = net.workspace;
float *c = l.weight_updates_gpu + j*l.nweights/l.groups;
float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w;
float *imd = net.delta_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w;
im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
if (net.delta_gpu) {
if (l.binary || l.xnor) swap_binary(&l);
a = l.weights_gpu + j*l.nweights/l.groups;
b = l.delta_gpu + (i*l.groups + j)*m*k;
c = net.workspace;
if (l.size == 1) {
c = imd;
}
gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k);
if (l.size != 1) {
col2im_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, imd);
}
if(l.binary || l.xnor) {
swap_binary(&l);
}
}
if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(layer l)
{
cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_pull_array(l.scales_gpu, l.scales, l.n);
cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
}
void push_convolutional_layer(layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_push_array(l.scales_gpu, l.scales, l.n);
cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
}
void update_convolutional_layer_gpu(layer l, update_args a)
{
float learning_rate = a.learning_rate*l.learning_rate_scale;
float momentum = a.momentum;
float decay = a.decay;
int batch = a.batch;
if(a.adam){
adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t);
adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t);
if(l.scales_gpu){
adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t);
}
}else{
axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1);
axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
scal_gpu(l.n, momentum, l.bias_updates_gpu, 1);
if(l.scales_gpu){
axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
scal_gpu(l.n, momentum, l.scale_updates_gpu, 1);
}
}
if(l.clip){
constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1);
}
}
| 99d61288b194fa3cdaff12392b6322168408a170.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
__global__ void binarize_kernel(float *x, int n, float *binary)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= n) return;
binary[i] = (x[i] >= 0) ? 1 : -1;
}
void binarize_gpu(float *x, int n, float *binary)
{
binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
{
int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (s >= size) return;
int i = 0;
float mean = 0;
for(i = 0; i < n; ++i){
mean += fabsf(input[i*size + s]);
}
mean = mean / n;
for(i = 0; i < n; ++i){
binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
}
}
void binarize_input_gpu(float *input, int n, int size, float *binary)
{
binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary);
check_error(cudaPeekAtLastError());
}
__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
{
int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (f >= n) return;
int i = 0;
float mean = 0;
for(i = 0; i < size; ++i){
mean += fabsf(weights[f*size + i]);
}
mean = mean / size;
for(i = 0; i < size; ++i){
binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
/*binary[f*size + i] = weights[f*size + i];*/
}
}
void binarize_weights_gpu(float *weights, int n, int size, float *binary)
{
binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary);
check_error(cudaPeekAtLastError());
}
void forward_convolutional_layer_gpu(convolutional_layer l, network net)
{
fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if(l.xnor){
binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
#ifdef CUDNN
float one = 1;
cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
#else
int i, j;
int m = l.n/l.groups;
int k = l.size*l.size*l.c/l.groups;
int n = l.out_w*l.out_h;
for(i = 0; i < l.batch; ++i){
for(j = 0; j < l.groups; ++j){
float *a = l.weights_gpu + j*l.nweights/l.groups;
float *b = net.workspace;
float *c = l.output_gpu + (i*l.groups + j)*n*m;
float *im = net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w;
if (l.size == 1){
b = im;
} else {
im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
}
gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
#endif
if (l.batch_normalize) {
forward_batchnorm_layer_gpu(l, net);
} else {
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
}
activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if(l.binary || l.xnor) swap_binary(&l);
}
__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -(size/2.f);
int h_offset = -(size/2.f);
int out_index = j + w*(i + h*(k + c*b));
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i + l;
int cur_w = w_offset + j + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
}
}
}
extern "C" void smooth_layer(layer l, int size, float rate)
{
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
size_t n = h*w*c*l.batch;
smooth_kernel<<<cuda_gridsize(n), BLOCK>>>(l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu);
check_error(cudaPeekAtLastError());
}
void backward_convolutional_layer_gpu(convolutional_layer l, network net)
{
if(l.smooth){
smooth_layer(l, 5, l.smooth);
}
//constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
if(l.batch_normalize){
backward_batchnorm_layer_gpu(l, net);
} else {
backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
}
float *original_input = net.input_gpu;
if(l.xnor) net.input_gpu = l.binary_input_gpu;
#ifdef CUDNN
float one = 1;
cudnnConvolutionBackwardFilter(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bf_algo,
net.workspace,
l.workspace_size,
&one,
l.dweightDesc,
l.weight_updates_gpu);
if(net.delta_gpu){
if(l.binary || l.xnor) swap_binary(&l);
cudnnConvolutionBackwardData(cudnn_handle(),
&one,
l.weightDesc,
l.weights_gpu,
l.ddstTensorDesc,
l.delta_gpu,
l.convDesc,
l.bd_algo,
net.workspace,
l.workspace_size,
&one,
l.dsrcTensorDesc,
net.delta_gpu);
if(l.binary || l.xnor) swap_binary(&l);
if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu);
}
#else
int m = l.n/l.groups;
int n = l.size*l.size*l.c/l.groups;
int k = l.out_w*l.out_h;
int i, j;
for(i = 0; i < l.batch; ++i){
for(j = 0; j < l.groups; ++j){
float *a = l.delta_gpu + (i*l.groups + j)*m*k;
float *b = net.workspace;
float *c = l.weight_updates_gpu + j*l.nweights/l.groups;
float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w;
float *imd = net.delta_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w;
im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
if (net.delta_gpu) {
if (l.binary || l.xnor) swap_binary(&l);
a = l.weights_gpu + j*l.nweights/l.groups;
b = l.delta_gpu + (i*l.groups + j)*m*k;
c = net.workspace;
if (l.size == 1) {
c = imd;
}
gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k);
if (l.size != 1) {
col2im_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, imd);
}
if(l.binary || l.xnor) {
swap_binary(&l);
}
}
if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w);
}
}
#endif
}
void pull_convolutional_layer(layer l)
{
cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_pull_array(l.scales_gpu, l.scales, l.n);
cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
}
void push_convolutional_layer(layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
if (l.batch_normalize){
cuda_push_array(l.scales_gpu, l.scales, l.n);
cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
}
}
void update_convolutional_layer_gpu(layer l, update_args a)
{
float learning_rate = a.learning_rate*l.learning_rate_scale;
float momentum = a.momentum;
float decay = a.decay;
int batch = a.batch;
if(a.adam){
adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t);
adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t);
if(l.scales_gpu){
adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t);
}
}else{
axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1);
axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
scal_gpu(l.n, momentum, l.bias_updates_gpu, 1);
if(l.scales_gpu){
axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
scal_gpu(l.n, momentum, l.scale_updates_gpu, 1);
}
}
if(l.clip){
constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1);
}
}
|
956776edfdacf32783e16360d71ce87b72d444f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_t1;
int xdim0_update_halo_kernel1_t1_h = -1;
__constant__ int xdim1_update_halo_kernel1_t1;
int xdim1_update_halo_kernel1_t1_h = -1;
__constant__ int xdim2_update_halo_kernel1_t1;
int xdim2_update_halo_kernel1_t1_h = -1;
__constant__ int xdim3_update_halo_kernel1_t1;
int xdim3_update_halo_kernel1_t1_h = -1;
__constant__ int xdim4_update_halo_kernel1_t1;
int xdim4_update_halo_kernel1_t1_h = -1;
__constant__ int xdim5_update_halo_kernel1_t1;
int xdim5_update_halo_kernel1_t1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x,y) (x+xdim0_update_halo_kernel1_t1*(y))
#define OPS_ACC1(x,y) (x+xdim1_update_halo_kernel1_t1*(y))
#define OPS_ACC2(x,y) (x+xdim2_update_halo_kernel1_t1*(y))
#define OPS_ACC3(x,y) (x+xdim3_update_halo_kernel1_t1*(y))
#define OPS_ACC4(x,y) (x+xdim4_update_halo_kernel1_t1*(y))
#define OPS_ACC5(x,y) (x+xdim5_update_halo_kernel1_t1*(y))
//user function
__device__
inline void update_halo_kernel1_t1_gpu(double *density0,
double *energy0, double *energy1,
double *u, double *p,
double *sd , const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0[OPS_ACC0(0,0)] = density0[OPS_ACC0(0,-1)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC1(0,0)] = energy0[OPS_ACC1(0,-1)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC2(0,0)] = energy1[OPS_ACC2(0,-1)];
if(fields[FIELD_U] == 1) u[OPS_ACC3(0,0)] = u[OPS_ACC3(0,-1)];
if(fields[FIELD_P] == 1) p[OPS_ACC4(0,0)] = p[OPS_ACC4(0,-1)];
if(fields[FIELD_SD] == 1) sd[OPS_ACC5(0,0)] = sd[OPS_ACC5(0,-1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_update_halo_kernel1_t1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_t1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_t1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_t1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_t1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_t1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_t1;
if (idx_x < size0 && idx_y < size1) {
update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,52)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(52,"update_halo_kernel1_t1");
OPS_kernels[52].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_update_halo_kernel1_t1_h || xdim1 != xdim1_update_halo_kernel1_t1_h || xdim2 != xdim2_update_halo_kernel1_t1_h || xdim3 != xdim3_update_halo_kernel1_t1_h || xdim4 != xdim4_update_halo_kernel1_t1_h || xdim5 != xdim5_update_halo_kernel1_t1_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_t1_h = xdim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_t1_h = xdim1;
hipMemcpyToSymbol( xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_t1_h = xdim2;
hipMemcpyToSymbol( xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_t1_h = xdim3;
hipMemcpyToSymbol( xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_t1_h = xdim4;
hipMemcpyToSymbol( xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_t1_h = xdim5;
}
int *arg6h = (int *)arg6.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[52].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_t1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[52].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[52].mpi_time += t2-t1;
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 52;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 52;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(52,"update_halo_kernel1_t1");
}
ops_enqueue_kernel(desc);
}
#endif
| 956776edfdacf32783e16360d71ce87b72d444f8.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_t1;
int xdim0_update_halo_kernel1_t1_h = -1;
__constant__ int xdim1_update_halo_kernel1_t1;
int xdim1_update_halo_kernel1_t1_h = -1;
__constant__ int xdim2_update_halo_kernel1_t1;
int xdim2_update_halo_kernel1_t1_h = -1;
__constant__ int xdim3_update_halo_kernel1_t1;
int xdim3_update_halo_kernel1_t1_h = -1;
__constant__ int xdim4_update_halo_kernel1_t1;
int xdim4_update_halo_kernel1_t1_h = -1;
__constant__ int xdim5_update_halo_kernel1_t1;
int xdim5_update_halo_kernel1_t1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x,y) (x+xdim0_update_halo_kernel1_t1*(y))
#define OPS_ACC1(x,y) (x+xdim1_update_halo_kernel1_t1*(y))
#define OPS_ACC2(x,y) (x+xdim2_update_halo_kernel1_t1*(y))
#define OPS_ACC3(x,y) (x+xdim3_update_halo_kernel1_t1*(y))
#define OPS_ACC4(x,y) (x+xdim4_update_halo_kernel1_t1*(y))
#define OPS_ACC5(x,y) (x+xdim5_update_halo_kernel1_t1*(y))
//user function
__device__
inline void update_halo_kernel1_t1_gpu(double *density0,
double *energy0, double *energy1,
double *u, double *p,
double *sd , const int* fields) {
if(fields[FIELD_DENSITY] == 1) density0[OPS_ACC0(0,0)] = density0[OPS_ACC0(0,-1)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC1(0,0)] = energy0[OPS_ACC1(0,-1)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC2(0,0)] = energy1[OPS_ACC2(0,-1)];
if(fields[FIELD_U] == 1) u[OPS_ACC3(0,0)] = u[OPS_ACC3(0,-1)];
if(fields[FIELD_P] == 1) p[OPS_ACC4(0,0)] = p[OPS_ACC4(0,-1)];
if(fields[FIELD_SD] == 1) sd[OPS_ACC5(0,0)] = sd[OPS_ACC5(0,-1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_update_halo_kernel1_t1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
const int* __restrict arg6,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_t1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_t1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_t1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_t1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_t1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_t1;
if (idx_x < size0 && idx_y < size1) {
update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,52)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(52,"update_halo_kernel1_t1");
OPS_kernels[52].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != xdim0_update_halo_kernel1_t1_h || xdim1 != xdim1_update_halo_kernel1_t1_h || xdim2 != xdim2_update_halo_kernel1_t1_h || xdim3 != xdim3_update_halo_kernel1_t1_h || xdim4 != xdim4_update_halo_kernel1_t1_h || xdim5 != xdim5_update_halo_kernel1_t1_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_t1_h = xdim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_t1_h = xdim1;
cudaMemcpyToSymbol( xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_t1_h = xdim2;
cudaMemcpyToSymbol( xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_t1_h = xdim3;
cudaMemcpyToSymbol( xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_t1_h = xdim4;
cudaMemcpyToSymbol( xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_t1_h = xdim5;
}
int *arg6h = (int *)arg6.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg6.data = OPS_consts_h + consts_bytes;
arg6.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[52].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0)
ops_update_halo_kernel1_t1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(int *)arg6.data_d,x_size, y_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[52].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[52].mpi_time += t2-t1;
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 52;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 52;
for ( int i=0; i<4; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int));
desc->args[6].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(52,"update_halo_kernel1_t1");
}
ops_enqueue_kernel(desc);
}
#endif
|
7addf5bccb5af8749d5d02aad4c898e7990fc813.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(int argc, char **argv)
{
printf(argv[1]);
printf(argv[2]);
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| 7addf5bccb5af8749d5d02aad4c898e7990fc813.cu | #include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(int argc, char **argv)
{
printf(argv[1]);
printf(argv[2]);
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
1fdc9982623ea49a1bdeb0c68f5e8f2d93036048.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "cudaHelpers.hpp"
__global__ void gpuMinMaxOfEachBlock(float const* const input, float* const output, const int cols)
{
const auto globalThreadPos2d = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const auto globalThreadPos1d = globalThreadPos2d.y * cols + globalThreadPos2d.x;
extern __shared__ float temp[];
auto minTemp = temp;
auto maxTemp = temp + blockDim.x;
minTemp[threadIdx.x] = input[globalThreadPos1d];
maxTemp[threadIdx.x] = input[globalThreadPos1d];
// --- Before going further, we have to make sure that all the shared memory loads have been completed
__syncthreads();
// --- Reduction in shared memory. Only half of the threads contribute to reduction.
for (auto s = blockDim.x / 2; s>0; s >>= 1)
{
if (threadIdx.x < s)
{
minTemp[threadIdx.x] = fminf(minTemp[threadIdx.x], minTemp[threadIdx.x + s]);
maxTemp[threadIdx.x] = fmaxf(maxTemp[threadIdx.x], maxTemp[threadIdx.x + s]);
}
// --- At the end of each iteration loop, we have to make sure that all memory operations have been completed
__syncthreads();
}
if (threadIdx.x == 0)
{
output[blockIdx.y] = minTemp[threadIdx.x];
output[blockIdx.y + gridDim.y] = maxTemp[threadIdx.x];
}
}
std::vector<float> gpuMinMaxImpl(std::vector<float> input, int rows, int cols)
{
auto deviceInput = cudaHelpers::copyToDevice<float>(input);
auto outputSize = rows * 2;
auto deviceBlockOutputs = cudaHelpers::initOnDevice<float>(outputSize);
dim3 blocksInGrid(1, rows);
dim3 threadsInBlock(cols);
int sMemSize = threadsInBlock.x * sizeof(float) * 2;
hipLaunchKernelGGL(( gpuMinMaxOfEachBlock), dim3(blocksInGrid), dim3(threadsInBlock), sMemSize, 0, deviceInput, deviceBlockOutputs, threadsInBlock.x);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
return cudaHelpers::copyFromDevice(deviceBlockOutputs, outputSize);
}
std::vector<float> gpuMinMax(std::vector<float> input, int rows, int cols)
{
auto intermediate = gpuMinMaxImpl(input, rows, cols);
return gpuMinMaxImpl(intermediate, 1, rows*2);
}
| 1fdc9982623ea49a1bdeb0c68f5e8f2d93036048.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "cudaHelpers.hpp"
__global__ void gpuMinMaxOfEachBlock(float const* const input, float* const output, const int cols)
{
const auto globalThreadPos2d = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const auto globalThreadPos1d = globalThreadPos2d.y * cols + globalThreadPos2d.x;
extern __shared__ float temp[];
auto minTemp = temp;
auto maxTemp = temp + blockDim.x;
minTemp[threadIdx.x] = input[globalThreadPos1d];
maxTemp[threadIdx.x] = input[globalThreadPos1d];
// --- Before going further, we have to make sure that all the shared memory loads have been completed
__syncthreads();
// --- Reduction in shared memory. Only half of the threads contribute to reduction.
for (auto s = blockDim.x / 2; s>0; s >>= 1)
{
if (threadIdx.x < s)
{
minTemp[threadIdx.x] = fminf(minTemp[threadIdx.x], minTemp[threadIdx.x + s]);
maxTemp[threadIdx.x] = fmaxf(maxTemp[threadIdx.x], maxTemp[threadIdx.x + s]);
}
// --- At the end of each iteration loop, we have to make sure that all memory operations have been completed
__syncthreads();
}
if (threadIdx.x == 0)
{
output[blockIdx.y] = minTemp[threadIdx.x];
output[blockIdx.y + gridDim.y] = maxTemp[threadIdx.x];
}
}
std::vector<float> gpuMinMaxImpl(std::vector<float> input, int rows, int cols)
{
auto deviceInput = cudaHelpers::copyToDevice<float>(input);
auto outputSize = rows * 2;
auto deviceBlockOutputs = cudaHelpers::initOnDevice<float>(outputSize);
dim3 blocksInGrid(1, rows);
dim3 threadsInBlock(cols);
int sMemSize = threadsInBlock.x * sizeof(float) * 2;
gpuMinMaxOfEachBlock<<<blocksInGrid, threadsInBlock, sMemSize>>>(deviceInput, deviceBlockOutputs, threadsInBlock.x);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
return cudaHelpers::copyFromDevice(deviceBlockOutputs, outputSize);
}
std::vector<float> gpuMinMax(std::vector<float> input, int rows, int cols)
{
auto intermediate = gpuMinMaxImpl(input, rows, cols);
return gpuMinMaxImpl(intermediate, 1, rows*2);
}
|
c5946fe33cfd301eebdcf2672c1818143935179c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ppm.h"
#include <math.h>
// Constant memory for convolution filter
__constant__ Filter filter_c;
// the black and white kernel, each thread changes a pixel
__global__ void blackAndWhite(PPMPixel *imageData, PPMPixel *outputData, int width, int height) {
int tx = threadIdx.x;
int ty = blockIdx.x;
if(ty < height && tx < width) {
int i = ty*width + tx;
int avg = (imageData[i].red + imageData[i].green + imageData[i].blue) / 3;
outputData[i].red = avg;
outputData[i].green = avg;
outputData[i].blue = avg;
}
}
// the convolution kernel, each thread convolves for a pixel
__global__ void convolution(PPMPixel *imageData, PPMPixel *outputData, int width, int height)
{
__shared__ PPMPixel imageData_s[INPUT_TILE_SIZE][INPUT_TILE_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
// get output tile row and col
int row_o = blockIdx.y * OUTPUT_TILE_SIZE + ty;
int col_o = blockIdx.x * OUTPUT_TILE_SIZE + tx;
// get input tile row and col
int row_i = row_o - FILTER_SIZE / 2;
int col_i = col_o - FILTER_SIZE / 2;
// if input is in bounds read from global to shared memory
if ((row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < width))
{
imageData_s[ty][tx] = imageData[row_i * width + col_i];
}
else // set pixel to black (all zero)
{
imageData_s[ty][tx].red = 0;
imageData_s[ty][tx].blue = 0;
imageData_s[ty][tx].green = 0;
}
__syncthreads();
int red = 0, blue = 0, green = 0;
// if in bounds calculate convolution for this pixel
if ((ty < OUTPUT_TILE_SIZE) && (tx < OUTPUT_TILE_SIZE))
{
int i, j;
for (i = 0; i < FILTER_SIZE; i++)
{
for (j = 0; j < FILTER_SIZE; j++)
{
red += filter_c.data[j * FILTER_SIZE + i] * imageData_s[j + ty][i + tx].red;
blue += filter_c.data[j * FILTER_SIZE + i] * imageData_s[j + ty][i + tx].blue;
green += filter_c.data[j * FILTER_SIZE + i] * imageData_s[j + ty][i + tx].green;
}
}
// write value to output, saturate between 0 and 255
if ((row_o < height) && (col_o < width))
{
outputData[row_o * width + col_o].red = min( max( (int)(filter_c.factor * red + filter_c.bias), 0), 255);
outputData[row_o * width + col_o].blue = min( max( (int)(filter_c.factor * blue + filter_c.bias), 0), 255);
outputData[row_o * width + col_o].green = min( max( (int)(filter_c.factor * green + filter_c.bias), 0), 255);
}
}
} | c5946fe33cfd301eebdcf2672c1818143935179c.cu | #include "ppm.h"
#include <math.h>
// Constant memory for convolution filter
__constant__ Filter filter_c;
// the black and white kernel, each thread changes a pixel
__global__ void blackAndWhite(PPMPixel *imageData, PPMPixel *outputData, int width, int height) {
int tx = threadIdx.x;
int ty = blockIdx.x;
if(ty < height && tx < width) {
int i = ty*width + tx;
int avg = (imageData[i].red + imageData[i].green + imageData[i].blue) / 3;
outputData[i].red = avg;
outputData[i].green = avg;
outputData[i].blue = avg;
}
}
// the convolution kernel, each thread convolves for a pixel
__global__ void convolution(PPMPixel *imageData, PPMPixel *outputData, int width, int height)
{
__shared__ PPMPixel imageData_s[INPUT_TILE_SIZE][INPUT_TILE_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
// get output tile row and col
int row_o = blockIdx.y * OUTPUT_TILE_SIZE + ty;
int col_o = blockIdx.x * OUTPUT_TILE_SIZE + tx;
// get input tile row and col
int row_i = row_o - FILTER_SIZE / 2;
int col_i = col_o - FILTER_SIZE / 2;
// if input is in bounds read from global to shared memory
if ((row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < width))
{
imageData_s[ty][tx] = imageData[row_i * width + col_i];
}
else // set pixel to black (all zero)
{
imageData_s[ty][tx].red = 0;
imageData_s[ty][tx].blue = 0;
imageData_s[ty][tx].green = 0;
}
__syncthreads();
int red = 0, blue = 0, green = 0;
// if in bounds calculate convolution for this pixel
if ((ty < OUTPUT_TILE_SIZE) && (tx < OUTPUT_TILE_SIZE))
{
int i, j;
for (i = 0; i < FILTER_SIZE; i++)
{
for (j = 0; j < FILTER_SIZE; j++)
{
red += filter_c.data[j * FILTER_SIZE + i] * imageData_s[j + ty][i + tx].red;
blue += filter_c.data[j * FILTER_SIZE + i] * imageData_s[j + ty][i + tx].blue;
green += filter_c.data[j * FILTER_SIZE + i] * imageData_s[j + ty][i + tx].green;
}
}
// write value to output, saturate between 0 and 255
if ((row_o < height) && (col_o < width))
{
outputData[row_o * width + col_o].red = min( max( (int)(filter_c.factor * red + filter_c.bias), 0), 255);
outputData[row_o * width + col_o].blue = min( max( (int)(filter_c.factor * blue + filter_c.bias), 0), 255);
outputData[row_o * width + col_o].green = min( max( (int)(filter_c.factor * green + filter_c.bias), 0), 255);
}
}
} |
4fe00c4bfff09f7cb9f4d73d29c2c959b0d03866.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//$Id: cudaMultiregKnr2.cu,v 1.3 2010/05/15 14:39:39 afs Exp $
//
// !!! Processing in mycudamath functions assumes column order for compatibility with R
//
#include "d_rngMT.cu"
namespace Kmt312 {
using namespace MT312_rng;
__global__ void
cudaruniregNRK(float* d_betabar, float* tau, float* y, int nu, int nreg, int nobs, int m, int seed)
{
const int ti = blockIdx.x * blockDim.x + threadIdx.x;
if(ti >= nreg) return;
const float df = nu+nobs;
const ulint seedti = (ulint)((seed >> 1)+ti);
rngGamma drng(df / 2.0f, 0.5f, seedti);
//
float* X = d_X;
float* XpX = d_XpX;
float* A = d_Abeta;
float* ssq = d_ssq;
const int mxm = m*m;
//
const float sigmasq = tau[ti];
//----------------------------
// IR=backsolve(chol(XpX/sigmasq+A),diag(k))
float IR[MDIM];
{
float tmp0[MDIM];
for(int i=0; i < mxm; i++)
tmp0[i] = XpX[i] / sigmasq + A[i];
mdgbacksolve(tmp0, &m, IR);
}
//----------------------------
// Xpy
float Xpy[XDIM];
float* yblock = &y[ti*nobs];
mvtcrossp(X, yblock, Xpy, &nobs, &m);
//----------------------------
// btilde=crossprod(t(IR))%*%(Xpy/sigmasq+A%*%betabar)
float* betabar = &d_betabar[ti*m];
float btilde[XDIM];
{
float tmp1[XDIM];
mvprod(A,betabar, tmp1, &m, &m);
// (Xpy/sigmasq+A%*%betabar)
for (int i=0; i<m; i++)
tmp1[i] = Xpy[i] / sigmasq + tmp1[i];
// crossprod(t(IR))
float cIR[MDIM];
mtcrossp(IR, IR, cIR, &m);
mvprod(cIR, tmp1, btilde, &m, &m);
}
//----------------------------
// beta = btilde + IR%*%rnorm(k)
// Update betabar
float beta[XDIM];
{
float tmp1[XDIM];
for(int i=0; i < m; i++) tmp1[i]=drng.d_rnorm();
// d_rnorm(&drng, m, 0., 1., tmp1);
mvprod(IR, tmp1, beta, &m, &m);
for (int i=0; i < m; i++)
beta[i] = beta[i] + btilde[i];
}
//----------------------------
// res=y-X%*%beta
// s=t(res)%*%res
// sigmasq=(nu*ssq + s)/rchisq(1,nu+n)
float s;
float resid[OBSDIM];
mvprod(X, beta, resid, &nobs, &m);
for(int i=0; i < nobs; i++)
resid[i] = yblock[i] - resid[i];
vprod(resid, resid, &s, &nobs);
float rchi;
rchi = drng.d_rchisq();
// d_rchisq(&drng, 1, &rchi);
//----------------------------
// Results
tau[ti] = (nu*ssq[ti] + s)/rchi;
// __syncthreads();
int ix=ti*m;
for(int i=0; i < m; i++) {
d_betabar[ix+i] = beta[i];
}
}
}
| 4fe00c4bfff09f7cb9f4d73d29c2c959b0d03866.cu | //$Id: cudaMultiregKnr2.cu,v 1.3 2010/05/15 14:39:39 afs Exp $
//
// !!! Processing in mycudamath functions assumes column order for compatibility with R
//
#include "d_rngMT.cu"
namespace Kmt312 {
using namespace MT312_rng;
__global__ void
cudaruniregNRK(float* d_betabar, float* tau, float* y, int nu, int nreg, int nobs, int m, int seed)
{
const int ti = blockIdx.x * blockDim.x + threadIdx.x;
if(ti >= nreg) return;
const float df = nu+nobs;
const ulint seedti = (ulint)((seed >> 1)+ti);
rngGamma drng(df / 2.0f, 0.5f, seedti);
//
float* X = d_X;
float* XpX = d_XpX;
float* A = d_Abeta;
float* ssq = d_ssq;
const int mxm = m*m;
//
const float sigmasq = tau[ti];
//----------------------------
// IR=backsolve(chol(XpX/sigmasq+A),diag(k))
float IR[MDIM];
{
float tmp0[MDIM];
for(int i=0; i < mxm; i++)
tmp0[i] = XpX[i] / sigmasq + A[i];
mdgbacksolve(tmp0, &m, IR);
}
//----------------------------
// Xpy
float Xpy[XDIM];
float* yblock = &y[ti*nobs];
mvtcrossp(X, yblock, Xpy, &nobs, &m);
//----------------------------
// btilde=crossprod(t(IR))%*%(Xpy/sigmasq+A%*%betabar)
float* betabar = &d_betabar[ti*m];
float btilde[XDIM];
{
float tmp1[XDIM];
mvprod(A,betabar, tmp1, &m, &m);
// (Xpy/sigmasq+A%*%betabar)
for (int i=0; i<m; i++)
tmp1[i] = Xpy[i] / sigmasq + tmp1[i];
// crossprod(t(IR))
float cIR[MDIM];
mtcrossp(IR, IR, cIR, &m);
mvprod(cIR, tmp1, btilde, &m, &m);
}
//----------------------------
// beta = btilde + IR%*%rnorm(k)
// Update betabar
float beta[XDIM];
{
float tmp1[XDIM];
for(int i=0; i < m; i++) tmp1[i]=drng.d_rnorm();
// d_rnorm(&drng, m, 0., 1., tmp1);
mvprod(IR, tmp1, beta, &m, &m);
for (int i=0; i < m; i++)
beta[i] = beta[i] + btilde[i];
}
//----------------------------
// res=y-X%*%beta
// s=t(res)%*%res
// sigmasq=(nu*ssq + s)/rchisq(1,nu+n)
float s;
float resid[OBSDIM];
mvprod(X, beta, resid, &nobs, &m);
for(int i=0; i < nobs; i++)
resid[i] = yblock[i] - resid[i];
vprod(resid, resid, &s, &nobs);
float rchi;
rchi = drng.d_rchisq();
// d_rchisq(&drng, 1, &rchi);
//----------------------------
// Results
tau[ti] = (nu*ssq[ti] + s)/rchi;
// __syncthreads();
int ix=ti*m;
for(int i=0; i < m; i++) {
d_betabar[ix+i] = beta[i];
}
}
}
|
fa749970d60c929d31ff54bcb923d01463e9180f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "redukcja.h"
__device__ void warpReduce(volatile real *aux, size_t tid) {
aux[tid] += aux[tid + 32];
aux[tid] += aux[tid + 16];
aux[tid] += aux[tid + 8];
aux[tid] += aux[tid + 4];
aux[tid] += aux[tid + 2];
aux[tid] += aux[tid + 1];
}
// <<Unroll last warp>>
template<size_t Block>
__global__ void device::scalar(real *x, real *y, int dim, real *res) {
size_t tid = threadIdx.x;
// Sum a sector for the thread.
size_t lo = (tid * dim) / blockDim.x,
hi = ((tid + 1) * dim) / blockDim.x;
real total = 0;
for (size_t i = lo; i < hi; ++i) {
total += x[i] * y[i];
}
__shared__ real aux[Block];
aux[tid] = total;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) aux[tid] += aux[tid + s];
__syncthreads();
}
if (tid < 32) warpReduce(aux, tid);
if (tid == 0) *res = aux[0];
}
| fa749970d60c929d31ff54bcb923d01463e9180f.cu | #include "redukcja.h"
__device__ void warpReduce(volatile real *aux, size_t tid) {
aux[tid] += aux[tid + 32];
aux[tid] += aux[tid + 16];
aux[tid] += aux[tid + 8];
aux[tid] += aux[tid + 4];
aux[tid] += aux[tid + 2];
aux[tid] += aux[tid + 1];
}
// <<Unroll last warp>>
template<size_t Block>
__global__ void device::scalar(real *x, real *y, int dim, real *res) {
size_t tid = threadIdx.x;
// Sum a sector for the thread.
size_t lo = (tid * dim) / blockDim.x,
hi = ((tid + 1) * dim) / blockDim.x;
real total = 0;
for (size_t i = lo; i < hi; ++i) {
total += x[i] * y[i];
}
__shared__ real aux[Block];
aux[tid] = total;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) aux[tid] += aux[tid + s];
__syncthreads();
}
if (tid < 32) warpReduce(aux, tid);
if (tid == 0) *res = aux[0];
}
|
a3705a8d2fe828797e16065476ddb1b475db9b7a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
//#include <chrono>
using namespace std;
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define BLOCK_SIZE 1024
#define CSC(call) { \
hipError_t err = call; \
if(err != hipSuccess) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, hipGetErrorString(err)); \
exit(1); \
} \
} while (0)
__global__ void kernel_main(double * matrix, unsigned int i, unsigned int height, unsigned int width, unsigned int * index, int *index_of_max, unsigned int offset)
{
double max = 0.0000001;
*index_of_max = -1;
for (unsigned int j = offset; j < height; j++)
{
if (fabs(matrix[index[j] * width + i]) > max)
{
max = fabs(matrix[index[j] * width + i]);
*index_of_max = j;
}
}
if (*index_of_max != -1)
{
unsigned int tmp = index[*index_of_max];
index[*index_of_max] = index[offset];
index[offset] = tmp;
}
}
//__device__ double ratio_ = 0;
/*__global__ void kernel_count_ratio(double * matrix, unsigned int i, unsigned int l, unsigned int height, unsigned int width, unsigned int * index, unsigned int offset)
{
ratio_ = matrix[index[l] * width + i] / matrix[index[(offset - 1)] * width + i];
}*/
__global__ void kernel_count_ratios(double * matrix, unsigned int i, unsigned int height, unsigned int width, unsigned int * index, unsigned int offset, double * ratios)
{
/* double tmp = matrix[index[(offset - 1)] * width + i];
for (unsigned int l = offset; l < height; l++)
{
ratios[l] = matrix[index[l] * width + i] / tmp;
}
*/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ double tmp;
tmp = matrix[index[(offset - 1)] * width + i];
while (offset + tid < height)
{
ratios[offset + tid] = matrix[index[offset + tid] * width + i] / tmp;
tid += blockDim.x * gridDim.x;
}
}
/*__global__ void kernel_rows_substraction(double * matrix, unsigned int i, unsigned int l, unsigned int width, unsigned int * index, unsigned int offset)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (i + tid < width)
{
matrix[index[l] * width + i + tid] -= ratio_ * matrix[index[(offset - 1)] * width + i + tid];
tid += blockDim.x * gridDim.x;
}
}*/
__global__ void kernel_rows_substraction(double * matrix, unsigned int i, unsigned int height, unsigned int width, unsigned int * index, unsigned int offset, double * ratios)
{
/*for (unsigned int l = offset; l < height; l++)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
double ratio_ = ratios[l];
while (i + tid < width)
{
matrix[index[l] * width + i + tid] -= ratio_ * matrix[index[(offset - 1)] * width + i + tid];
tid += blockDim.x * gridDim.x;
}
}*/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (i + tid < width)
{
double factor = matrix[index[(offset - 1)] * width + i + tid];
for (unsigned int l = offset; l < height; l++)
{
matrix[index[l] * width + i + tid] -= ratios[l] * factor;
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void kernel_rank_count(double * matrix, unsigned int height, unsigned int width, unsigned int * index, unsigned int *rank)
{
unsigned int i = 0, j = 0;
//*rank = 0;
unsigned int rank_ = 0;
while (true)
{
if (fabs(matrix[index[i] * width + j]) > 0.0000001)
{
rank_++;
i++;
j++;
if (i >= height || j >= width)
break;
}
else
{
j++;
if (j >= width)
break;
}
}
(*rank) = rank_;
}
int main()
{
unsigned int height, width;
scanf("%ud", &height);
scanf("%ud", &width);
if (height == 0 || width == 0)
{
cout << "ERROR: incorrect data\n";
return 0;
}
if (height == 1 && width == 1)
{
double tmp;
scanf("%lf", &tmp);
if (fabs(tmp) > 0.0000001)
{
cout << 1 << '\n';
}
else
{
cout << 0 << '\n';
}
return 0;
}
//double * matrix = new double[height * width];
double * matrix = (double *)malloc(sizeof(double) * height * width);
for (unsigned int i = 0; i < height; i++)
{
for (unsigned int j = 0; j < width; j++)
{
double tmp;
//cin >> tmp;
scanf("%lf", &tmp);
matrix[i * width + j] = tmp;
}
}
double * dev_matrix;
CSC(hipMalloc(&dev_matrix, sizeof(double) * height * width));
CSC(hipMemcpy(dev_matrix, matrix, sizeof(double) * height * width, hipMemcpyHostToDevice));
//unsigned int * index = new unsigned int[height];
unsigned int * index = (unsigned int *)malloc(sizeof(unsigned int) * height);
for (unsigned int i = 0; i < height; i++)
{
index[i] = i;
}
unsigned int * dev_index;
CSC(hipMalloc(&dev_index, sizeof(unsigned int) * height));
CSC(hipMemcpy(dev_index, index, sizeof(unsigned int) * height, hipMemcpyHostToDevice));
int host_index_of_max;
int * device_index_of_max;
CSC(hipMalloc(&device_index_of_max, sizeof(int)));
unsigned int threads_count = BLOCK_SIZE;
unsigned int blocks_count = MAX(width, height) / threads_count + 1;
unsigned int offset = 0;
/*hipEvent_t start, stop;
CSC(hipEventCreate(&start));
CSC(hipEventCreate(&stop));
CSC(hipEventRecord(start, 0));*/
double * dev_ratios;
CSC(hipMalloc(&dev_ratios, sizeof(double) * height));
//auto start_time = chrono::high_resolution_clock::now();
for (unsigned int i = 0; i < width; i++)
{
kernel_main << < 1, 1 >> > (dev_matrix, i, height, width, dev_index, device_index_of_max, offset);
CSC(hipMemcpy(&host_index_of_max, device_index_of_max, sizeof(int), hipMemcpyDeviceToHost));
if (host_index_of_max != -1)
{
offset++;
kernel_count_ratios << < height / threads_count + 1, threads_count >> >(dev_matrix, i, height, width, dev_index, offset, dev_ratios);
kernel_rows_substraction << < blocks_count, threads_count >> > (dev_matrix, i, height, width, dev_index, offset, dev_ratios);
}
}
unsigned int * dev_rank;
CSC(hipMalloc(&dev_rank, sizeof(unsigned int)));
kernel_rank_count << < 1, 1 >> > (dev_matrix, height, width, dev_index, dev_rank);
/*CSC(hipEventRecord(stop, 0));
CSC(hipEventSynchronize(stop));*/
unsigned int rank;
CSC(hipMemcpy(&rank, dev_rank, sizeof(unsigned int), hipMemcpyDeviceToHost));
cout << rank << '\n';
/* CSC(hipMemcpy(matrix, dev_matrix, sizeof(double) * height * width, hipMemcpyDeviceToHost));
CSC(hipMemcpy(index, dev_index, sizeof(unsigned int) * height, hipMemcpyDeviceToHost));
for (unsigned int i = 0; i < height; i++)
{
for (unsigned int j = 0; j < width; j++)
{
cout << matrix[index[i] * width + j] << ' ';
}
cout << '\n';
}*/
//delete matrix;
//delete index;
free(matrix);
free(index);
CSC(hipFree(dev_matrix));
CSC(hipFree(dev_index));
CSC(hipFree(dev_rank));
//auto end_time = chrono::high_resolution_clock::now();
//cout << chrono::duration_cast<chrono::milliseconds>(end_time - start_time).count() << "\n";
hipProfilerStop();
return 0;
}
| a3705a8d2fe828797e16065476ddb1b475db9b7a.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_profiler_api.h>
#include <stdio.h>
#include <stdlib.h>
//#include <chrono>
using namespace std;
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define BLOCK_SIZE 1024
#define CSC(call) { \
cudaError err = call; \
if(err != cudaSuccess) { \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(err)); \
exit(1); \
} \
} while (0)
__global__ void kernel_main(double * matrix, unsigned int i, unsigned int height, unsigned int width, unsigned int * index, int *index_of_max, unsigned int offset)
{
double max = 0.0000001;
*index_of_max = -1;
for (unsigned int j = offset; j < height; j++)
{
if (fabs(matrix[index[j] * width + i]) > max)
{
max = fabs(matrix[index[j] * width + i]);
*index_of_max = j;
}
}
if (*index_of_max != -1)
{
unsigned int tmp = index[*index_of_max];
index[*index_of_max] = index[offset];
index[offset] = tmp;
}
}
//__device__ double ratio_ = 0;
/*__global__ void kernel_count_ratio(double * matrix, unsigned int i, unsigned int l, unsigned int height, unsigned int width, unsigned int * index, unsigned int offset)
{
ratio_ = matrix[index[l] * width + i] / matrix[index[(offset - 1)] * width + i];
}*/
__global__ void kernel_count_ratios(double * matrix, unsigned int i, unsigned int height, unsigned int width, unsigned int * index, unsigned int offset, double * ratios)
{
/* double tmp = matrix[index[(offset - 1)] * width + i];
for (unsigned int l = offset; l < height; l++)
{
ratios[l] = matrix[index[l] * width + i] / tmp;
}
*/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ double tmp;
tmp = matrix[index[(offset - 1)] * width + i];
while (offset + tid < height)
{
ratios[offset + tid] = matrix[index[offset + tid] * width + i] / tmp;
tid += blockDim.x * gridDim.x;
}
}
/*__global__ void kernel_rows_substraction(double * matrix, unsigned int i, unsigned int l, unsigned int width, unsigned int * index, unsigned int offset)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (i + tid < width)
{
matrix[index[l] * width + i + tid] -= ratio_ * matrix[index[(offset - 1)] * width + i + tid];
tid += blockDim.x * gridDim.x;
}
}*/
__global__ void kernel_rows_substraction(double * matrix, unsigned int i, unsigned int height, unsigned int width, unsigned int * index, unsigned int offset, double * ratios)
{
/*for (unsigned int l = offset; l < height; l++)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
double ratio_ = ratios[l];
while (i + tid < width)
{
matrix[index[l] * width + i + tid] -= ratio_ * matrix[index[(offset - 1)] * width + i + tid];
tid += blockDim.x * gridDim.x;
}
}*/
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (i + tid < width)
{
double factor = matrix[index[(offset - 1)] * width + i + tid];
for (unsigned int l = offset; l < height; l++)
{
matrix[index[l] * width + i + tid] -= ratios[l] * factor;
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void kernel_rank_count(double * matrix, unsigned int height, unsigned int width, unsigned int * index, unsigned int *rank)
{
unsigned int i = 0, j = 0;
//*rank = 0;
unsigned int rank_ = 0;
while (true)
{
if (fabs(matrix[index[i] * width + j]) > 0.0000001)
{
rank_++;
i++;
j++;
if (i >= height || j >= width)
break;
}
else
{
j++;
if (j >= width)
break;
}
}
(*rank) = rank_;
}
int main()
{
unsigned int height, width;
scanf("%ud", &height);
scanf("%ud", &width);
if (height == 0 || width == 0)
{
cout << "ERROR: incorrect data\n";
return 0;
}
if (height == 1 && width == 1)
{
double tmp;
scanf("%lf", &tmp);
if (fabs(tmp) > 0.0000001)
{
cout << 1 << '\n';
}
else
{
cout << 0 << '\n';
}
return 0;
}
//double * matrix = new double[height * width];
double * matrix = (double *)malloc(sizeof(double) * height * width);
for (unsigned int i = 0; i < height; i++)
{
for (unsigned int j = 0; j < width; j++)
{
double tmp;
//cin >> tmp;
scanf("%lf", &tmp);
matrix[i * width + j] = tmp;
}
}
double * dev_matrix;
CSC(cudaMalloc(&dev_matrix, sizeof(double) * height * width));
CSC(cudaMemcpy(dev_matrix, matrix, sizeof(double) * height * width, cudaMemcpyHostToDevice));
//unsigned int * index = new unsigned int[height];
unsigned int * index = (unsigned int *)malloc(sizeof(unsigned int) * height);
for (unsigned int i = 0; i < height; i++)
{
index[i] = i;
}
unsigned int * dev_index;
CSC(cudaMalloc(&dev_index, sizeof(unsigned int) * height));
CSC(cudaMemcpy(dev_index, index, sizeof(unsigned int) * height, cudaMemcpyHostToDevice));
int host_index_of_max;
int * device_index_of_max;
CSC(cudaMalloc(&device_index_of_max, sizeof(int)));
unsigned int threads_count = BLOCK_SIZE;
unsigned int blocks_count = MAX(width, height) / threads_count + 1;
unsigned int offset = 0;
/*cudaEvent_t start, stop;
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&stop));
CSC(cudaEventRecord(start, 0));*/
double * dev_ratios;
CSC(cudaMalloc(&dev_ratios, sizeof(double) * height));
//auto start_time = chrono::high_resolution_clock::now();
for (unsigned int i = 0; i < width; i++)
{
kernel_main << < 1, 1 >> > (dev_matrix, i, height, width, dev_index, device_index_of_max, offset);
CSC(cudaMemcpy(&host_index_of_max, device_index_of_max, sizeof(int), cudaMemcpyDeviceToHost));
if (host_index_of_max != -1)
{
offset++;
kernel_count_ratios << < height / threads_count + 1, threads_count >> >(dev_matrix, i, height, width, dev_index, offset, dev_ratios);
kernel_rows_substraction << < blocks_count, threads_count >> > (dev_matrix, i, height, width, dev_index, offset, dev_ratios);
}
}
unsigned int * dev_rank;
CSC(cudaMalloc(&dev_rank, sizeof(unsigned int)));
kernel_rank_count << < 1, 1 >> > (dev_matrix, height, width, dev_index, dev_rank);
/*CSC(cudaEventRecord(stop, 0));
CSC(cudaEventSynchronize(stop));*/
unsigned int rank;
CSC(cudaMemcpy(&rank, dev_rank, sizeof(unsigned int), cudaMemcpyDeviceToHost));
cout << rank << '\n';
/* CSC(cudaMemcpy(matrix, dev_matrix, sizeof(double) * height * width, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(index, dev_index, sizeof(unsigned int) * height, cudaMemcpyDeviceToHost));
for (unsigned int i = 0; i < height; i++)
{
for (unsigned int j = 0; j < width; j++)
{
cout << matrix[index[i] * width + j] << ' ';
}
cout << '\n';
}*/
//delete matrix;
//delete index;
free(matrix);
free(index);
CSC(cudaFree(dev_matrix));
CSC(cudaFree(dev_index));
CSC(cudaFree(dev_rank));
//auto end_time = chrono::high_resolution_clock::now();
//cout << chrono::duration_cast<chrono::milliseconds>(end_time - start_time).count() << "\n";
cudaProfilerStop();
return 0;
}
|
1890c90306a74dd7b3bd455187317a5fc97268b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <bitset>
#include <assert.h>
#include "common.h"
#include "gpu_opt.h"
#define N_COLS 4
#define N_ROWS 4
#define SUBKEY_SIZE 16
#define BYTE0(x) (x & 0x000000FF)
#define BYTE1(x) (x & 0x0000FF00)
#define BYTE2(x) (x & 0x00FF0000)
#define BYTE3(x) (x & 0xFF000000)
// If someone else has an optimized flag, get rid of it.
#ifdef OPTIMIZED
#undef OPTIMIZED
#endif
#define OPTIMIZED 1
namespace PAES {
namespace GPU_OPT {
using PAES::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// A small helper for quick mod16, since it'll be used alot when accessing keys
// in sharedmem. Works by abusing the fact that 16 and 4 are powers of 2.
template<typename T>
__host__ __device__ __forceinline__ T mod16(T n) {
return n & 15;
}
template<typename T>
__host__ __device__ __forceinline__ T mod4(T n) {
return n & 3;
}
// Gets length of key in bytes for flavor
__host__ int get_key_len(const AESType& a) {
switch (a) {
case AESType::AES128: return 128 / 8;
case AESType::AES192: return 192 / 8;
case AESType::AES256: return 256 / 8;
}
return -1;
}
// Returns length of expanded key in bytes.
__host__ int get_exp_key_len(const AESType& a) {
switch (a) {
case AESType::AES128: return 176;
case AESType::AES192: return 208;
case AESType::AES256: return 240;
}
return -1;
};
// Gets number of rounds for flavor
__host__ int get_num_rounds(const AESType& a) {
switch (a) {
case AESType::AES128: return 10;
case AESType::AES192: return 12;
case AESType::AES256: return 14;
}
return -1;
}
__host__ void encrypt_ecb(const AESType& flavor, uint8_t * key, uint8_t * data, uint32_t datalen)
{
// Straightforward, nothing fancy is done here.
// ECB is inherently insecure because it does not diffuse the data
assert(mod16(datalen) == 0);
// Expand the key
uint8_t* expkey = (uint8_t*)malloc(get_exp_key_len(flavor));
expandKey(flavor, key, expkey, get_key_len(flavor), get_num_rounds(flavor));
// Copy key to device memory
uint8_t* d_expkey;
hipMalloc(&d_expkey, get_exp_key_len(flavor));
hipMemcpy(d_expkey, expkey, get_exp_key_len(flavor), hipMemcpyHostToDevice);
// Copy data to device memory
uint8_t* d_data;
hipMalloc(&d_data, sizeof(uint8_t) * datalen);
hipMemcpy(d_data, data, sizeof(uint8_t) * datalen, hipMemcpyHostToDevice);
// Calculate number of kernels needed
// We need one kernel per aes block. So we will calculate how many
// cuda blocks of 1024 threads we need to satisfy that
int threadsPerBlock = 1024;
int aes_blocks = datalen / BLOCKSIZE;
int cudaBlocks = (aes_blocks + 1023) / 1024;
// Call the kernels to get to work!
timer().startGpuTimer();
core_encrypt_ecb << <cudaBlocks, threadsPerBlock >> > (aes_blocks, d_data, d_expkey, get_num_rounds(flavor));
checkCUDAError("ECB Encrypt Failed!");
hipDeviceSynchronize();
timer().endGpuTimer();
// Retrieve the data from the device
hipMemcpy(data, d_data, sizeof(uint8_t) * datalen, hipMemcpyDeviceToHost);
// Free CUDA memory
hipFree(d_data);
hipFree(d_expkey);
}
__host__ void decrypt_ecb(const AESType& flavor, uint8_t * key, uint8_t * data, uint32_t datalen)
{
// Straightforward, nothing fancy is done here.
// ECB is inherently insecure because it does not diffuse the data
assert(mod16(datalen) == 0);
// Expand the key
uint8_t* expkey = (uint8_t*)malloc(get_exp_key_len(flavor));
expandKey(flavor, key, expkey, get_key_len(flavor), get_num_rounds(flavor));
// Copy key to device memory
uint8_t* d_expkey;
hipMalloc(&d_expkey, get_exp_key_len(flavor));
hipMemcpy(d_expkey, expkey, get_exp_key_len(flavor), hipMemcpyHostToDevice);
// Copy data to device memory
uint8_t* d_data;
hipMalloc(&d_data, sizeof(uint8_t) * datalen);
hipMemcpy(d_data, data, sizeof(uint8_t) * datalen, hipMemcpyHostToDevice);
// Calculate number of kernels needed
// We need one kernel per aes block. So we will calculate how many
// cuda blocks of 1024 threads we need to satisfy that
int threadsPerBlock = 1024;
int aes_blocks = datalen / BLOCKSIZE;
int cudaBlocks = (aes_blocks + 1023) / 1024;
// Call the kernels to get to work!
timer().startGpuTimer();
core_decrypt_ecb << <cudaBlocks, threadsPerBlock >> > (aes_blocks, d_data, d_expkey, get_num_rounds(flavor));
checkCUDAError("ECB Decrypt Failed!");
hipDeviceSynchronize();
timer().endGpuTimer();
// Retrieve the data from the device
hipMemcpy(data, d_data, sizeof(uint8_t) * datalen, hipMemcpyDeviceToHost);
// Free CUDA memory
hipFree(d_data);
hipFree(d_expkey);
}
__host__ void encrypt_ctr(const AESType& flavor, uint8_t * key, uint8_t * ctr, uint8_t * data, uint32_t datalen)
{
// In counter mode, we don't actually encrypt the data.
// Instead, we encrypt a counter value and then xor it with the data.
// We use an IV to create our counter and then increment the counter for
// each block encrypted.
// CTR/IV is one block size.
assert(mod16(datalen) == 0);
// Expand the key
uint8_t* expkey = (uint8_t*)malloc(get_exp_key_len(flavor));
expandKey(flavor, key, expkey, get_key_len(flavor), get_num_rounds(flavor));
// Copy key to device memory
uint8_t* d_expkey;
hipMalloc(&d_expkey, get_exp_key_len(flavor));
hipMemcpy(d_expkey, expkey, get_exp_key_len(flavor), hipMemcpyHostToDevice);
// Copy data to device memory
uint8_t* d_data;
hipMalloc(&d_data, sizeof(uint8_t) * datalen);
hipMemcpy(d_data, data, sizeof(uint8_t) * datalen, hipMemcpyHostToDevice);
// Copy counter to device memroy
// OG ctr will be constant, each kernel reads it into its own memory
// and performs increments
uint8_t* d_ctr;
hipMalloc(&d_ctr, sizeof(uint8_t) * BLOCKSIZE);
hipMemcpy(d_ctr, ctr, sizeof(uint8_t) * BLOCKSIZE, hipMemcpyHostToDevice);
// Calculate number of kernels needed
// We need one kernel per aes block. So we will calculate how many
// cuda blocks of 1024 threads we need to satisfy that
int threadsPerBlock = 1024;
int aes_blocks = datalen / BLOCKSIZE;
int cudaBlocks = (aes_blocks + 1023) / 1024;
// Start the kernels. Each kernel will increment the counter
// based on their index.
timer().startGpuTimer();
core_xcrypt_ctr << <cudaBlocks, threadsPerBlock>> > (aes_blocks, d_data, d_expkey, get_num_rounds(flavor), d_ctr);
checkCUDAError("CTR Xcrypt Failed!");
hipDeviceSynchronize();
timer().endGpuTimer();
// Retrieve the data from the device
hipMemcpy(data, d_data, sizeof(uint8_t) * datalen, hipMemcpyDeviceToHost);
// Free CUDA memory
hipFree(d_data);
hipFree(d_expkey);
hipFree(d_ctr);
}
__host__ void decrypt_ctr(const AESType& flavor, uint8_t * key, uint8_t * ctr, uint8_t * data, uint32_t datalen)
{
// A convienent feature of CTR mode... decryption is the SAME operation! No inverse needed!
encrypt_ctr(flavor, key, ctr, data, datalen);
}
__host__ void rot_word(uint8_t* n) {
// This function shifts the 4 bytes in a word to the left once.
// [a0,a1,a2,a3] becomes [a1,a2,a3,a0]
uint8_t tmp = n[0];
n[0] = n[1];
n[1] = n[2];
n[2] = n[3];
n[3] = tmp;
}
__host__ void sub_word(uint8_t* n) {
// SubWord() is a function that takes a four-byte input word and
// applies the S-box to each of the four bytes to produce an output word.
n[0] = h_sbox[n[0]];
n[1] = h_sbox[n[1]];
n[2] = h_sbox[n[2]];
n[3] = h_sbox[n[3]];
}
__host__ void expandKey(const AESType& flavor, uint8_t* ogkey, uint8_t* expkey, uint32_t keysize, uint32_t num_rounds) {
// The AES key will either be 128, 192, or 256 bits.
// The AES algorithm itself is not actually modified by the key size, but the number
// of rounds is. In expandKey() we take the provided key and stretch it out to create enough
// 128bit subkeys/roundkeys for each round.
// This is only done once, so we won't parallelize this.
// The logic below follows the Rijndael Key Schedule (https://en.wikipedia.org/wiki/Rijndael_key_schedule)
// Code adapted from tiny-aes since this is not parallelizable (each subkey depends on value of previous subkey)
// and there is nothing inherintly unique about this.
// Changes were made to make keysize a runtime option.
unsigned i, j, k;
uint8_t tmp[4]; // Used for the column/row operations
uint32_t N = keysize / 4; // Length of key in 32bit words.
// The first round key is the key itself.
for (i = 0; i < N; ++i) {
expkey[(i * 4) + 0] = ogkey[(i * 4) + 0];
expkey[(i * 4) + 1] = ogkey[(i * 4) + 1];
expkey[(i * 4) + 2] = ogkey[(i * 4) + 2];
expkey[(i * 4) + 3] = ogkey[(i * 4) + 3];
}
// All other round keys are found from the previous round keys.
for (i = N; i < N_COLS * (num_rounds + 1); ++i) {
// Tuck away temporary values
// These will be reused later.
k = (i - 1) * 4;
tmp[0] = expkey[k + 0];
tmp[1] = expkey[k + 1];
tmp[2] = expkey[k + 2];
tmp[3] = expkey[k + 3];
// If i % N is zero, xor the previous word with sbox(rotate(previousword)) and
// xor that with the Round Constant. Round Constant depends on the
// flavor of AES
if (i % N == 0) {
rot_word(tmp); // Rotate...
sub_word(tmp); // Substitute...
tmp[0] = tmp[0] ^ roundcon[i / N]; // Apply round coefficient
}
// Next step is only done if in AES256 mode
else if (flavor == AESType::AES256 && (i % N == 4)) {
sub_word(tmp); // Just subsitute
}
j = i * 4;
k = (i - N) * 4;
expkey[j + 0] = expkey[k + 0] ^ tmp[0];
expkey[j + 1] = expkey[k + 1] ^ tmp[1];
expkey[j + 2] = expkey[k + 2] ^ tmp[2];
expkey[j + 3] = expkey[k + 3] ^ tmp[3];
}
}
__global__ void core_encrypt_ecb(int N, uint8_t* data, const uint8_t* key, const int num_rounds) {
// Lenght of buffer is ALWAYS 128 bits == 16 bytes
// This is defined by AES Algorithm
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
// Get shared mem buffers
__shared__ uint8_t s_sbox[256];
__shared__ uint8_t s_mul2[256];
__shared__ uint8_t s_mul3[256];
// If we have enough threads, let each one do one copy.
if (N >= 256 && idx < 256) {
s_sbox[idx] = c_sbox[idx];
s_mul2[idx] = c_mul2[idx];
s_mul3[idx] = c_mul3[idx];
}
// If we don't have enough blocks, just let thread 0 do it all.
// If you only have 256 blocks you should be doing AES in CPU
// but thats none of my business *sips tea*.
else if (idx == 0) {
for (int i = 0; i < 256; i++) {
s_sbox[i] = c_sbox[i];
s_mul2[i] = c_mul2[i];
s_mul3[i] = c_mul3[i];
}
}
// Wait for shared memory to load
__syncthreads();
// Each thread running this function will act on ONE block in memory.
#if 0 // Get a pointer to global mem
uint8_t* myData = data + idx * BLOCKSIZE;
#else // Copy into local mem
uint8_t myData[BLOCKSIZE];
((uint32_t*)myData)[0] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 0];
((uint32_t*)myData)[1] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 1];
((uint32_t*)myData)[2] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 2];
((uint32_t*)myData)[3] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 3];
#endif
// Lots of comments are pulled from https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
// Above article mentions that SubBytes, ShiftRows, and MixColumns can be combined
// into 16 table lookups and 12 32bit XOR operations.
// If doing each block with 1 byte per thread, then each thread needs to perform
// one table lookup and at most one XOR.
// What are the memory implications? Can the tables be stored in RO memory to speed
// up the operations? Hoho! Use texture memory???
// Initial Round Key Addition
// Each byte of the state is combined with a block of the round key using bitwise xor
add_round_key(idx, 0, myData, key);
// We perform the next steps for a number of rounds
// dependent on the flavor of AES.
// Pass this in via a context?
for (int r = 1; r < num_rounds; r++) {
sub_bytes(idx, myData, s_sbox);
shift_rows(idx, myData);
mix_columns(idx, myData, s_mul2, s_mul3);
add_round_key(idx, r, myData, key);
}
// For the last step we do NOT perform the mix_columns step.
sub_bytes(idx, myData, s_sbox);
shift_rows(idx, myData);
add_round_key(idx, num_rounds, myData, key);
// Encryption on this block is done, encrypted data is stored inplace.
#if 1 // If copied into local mem above, need to write it back down
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 0] = ((uint32_t*)myData)[0];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 1] = ((uint32_t*)myData)[1];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 2] = ((uint32_t*)myData)[2];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 3] = ((uint32_t*)myData)[3];
#endif
}
__global__ void core_decrypt_ecb(int N, uint8_t* data, const uint8_t* key, const int num_rounds) {
// This performs the same steps as the encryption, but uses inverted values
// to recover the plaintext.
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
// Get shared mem buffers
__shared__ uint8_t s_rsbox[256];
__shared__ uint8_t s_mul9[256];
__shared__ uint8_t s_mulB[256];
__shared__ uint8_t s_mulD[256];
__shared__ uint8_t s_mulE[256];
// If we have enough threads, let each one do one copy.
if (N >= 256 && idx < 256) {
s_rsbox[idx] = c_rsbox[idx];
s_mul9[idx] = c_mul9[idx];
s_mulB[idx] = c_mulB[idx];
s_mulD[idx] = c_mulD[idx];
s_mulE[idx] = c_mulE[idx];
}
// If we don't have enough blocks, just let thread 0 do it all.
// If you only have 256 blocks you should be doing AES in CPU
// but thats none of my business *sips tea*.
else if (idx == 0) {
for (int i = 0; i < 256; i++) {
s_rsbox[i] = c_rsbox[i];
s_mul9[i] = c_mul9[i];
s_mulB[i] = c_mulB[i];
s_mulD[i] = c_mulD[i];
s_mulE[i] = c_mulE[i];
}
}
// Wait for shared memory to load
__syncthreads();
// Each thread running this function will act on ONE block in memory.
#if OPTIMIZED == 0 // Get a pointer to global mem
uint8_t* myData = data + idx * BLOCKSIZE;
#else // Copy into local mem
uint8_t myData[BLOCKSIZE];
((uint32_t*)myData)[0] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 0];
((uint32_t*)myData)[1] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 1];
((uint32_t*)myData)[2] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 2];
((uint32_t*)myData)[3] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 3];
#endif
// Initial Round Key Addition
// Each byte of the state is combined with a block of the round key using bitwise xor
add_round_key(idx, num_rounds, myData, key);
// We perform the next steps for a number of rounds
// dependent on the flavor of AES.
// This is done in the inverse compared to encryption
for (int r = num_rounds - 1; r > 0; r--)
{
inv_shift_rows(idx, myData);
inv_sub_bytes(idx, myData, s_rsbox);
add_round_key(idx, r, myData, key);
inv_mix_columns(idx, myData, s_mul9, s_mulB, s_mulD, s_mulE);
}
// For the last step we do NOT perform the mix_columns step.
inv_shift_rows(idx, myData);
inv_sub_bytes(idx, myData, s_rsbox);
add_round_key(idx, 0, myData, key);
// Decryption on this block is done, decrypted data is stored inplace.
#if 1 // If copied into local mem above, need to write it back down
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 0] = ((uint32_t*)myData)[0];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 1] = ((uint32_t*)myData)[1];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 2] = ((uint32_t*)myData)[2];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 3] = ((uint32_t*)myData)[3];
#endif
}
__global__ void core_xcrypt_ctr(int N, uint8_t* data, const uint8_t* key, const int num_rounds, const uint8_t * ctr) {
// Lenght of buffer is ALWAYS 128 bits == 16 bytes
// This is defined by AES Algorithm
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
// Get shared mem buffers
__shared__ uint8_t s_sbox[256];
__shared__ uint8_t s_mul2[256];
__shared__ uint8_t s_mul3[256];
// If we have enough threads, let each one do one copy.
if (N >= 256 && idx < 256) {
s_sbox[idx] = c_sbox[idx];
s_mul2[idx] = c_mul2[idx];
s_mul3[idx] = c_mul3[idx];
}
// If we don't have enough blocks, just let thread 0 do it all.
// If you only have 256 blocks you should be doing AES in CPU
// but thats none of my business *sips tea*.
else if (idx == 0) {
for (int i = 0; i < 256; i++) {
s_sbox[i] = c_sbox[i];
s_mul2[i] = c_mul2[i];
s_mul3[i] = c_mul3[i];
}
}
// Wait for shared memory to load
__syncthreads();
// Each thread running this function will act on ONE block in memory.
// (This is at least true in ECB mode, CTR mode might need its own kern.)
uint8_t* myData = data + idx * BLOCKSIZE;
// Copy the counter to this kernel and increment it by our idx
uint8_t myCtr[BLOCKSIZE];
for (int i = 0; i < BLOCKSIZE; i++) {
myCtr[i] = ctr[i];
}
ctr_increment(myCtr, idx);
// Lots of comments are pulled from https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
// Above article mentions that SubBytes, ShiftRows, and MixColumns can be combined
// into 16 table lookups and 12 32bit XOR operations.
// If doing each block with 1 byte per thread, then each thread needs to perform
// one table lookup and at most one XOR.
// What are the memory implications? Can the tables be stored in RO memory to speed
// up the operations? Hoho! Use texture memory???
// Initial Round Key Addition
// Each byte of the state is combined with a block of the round key using bitwise xor
add_round_key(idx, 0, myCtr, key);
// We perform the next steps for a number of rounds
// dependent on the flavor of AES.
// Pass this in via a context?
for (int r = 1; r < num_rounds; r++) {
sub_bytes(idx, myCtr, s_sbox);
shift_rows(idx, myCtr);
mix_columns(idx, myCtr, s_mul2, s_mul3);
add_round_key(idx, r, myCtr, key);
}
// For the last step we do NOT perform the mix_columns step.
sub_bytes(idx, myCtr, s_sbox);
shift_rows(idx, myCtr);
add_round_key(idx, num_rounds, myCtr, key);
// myCtr is now encrypted, xor it with our data before we leave
for (int i = 0; i < BLOCKSIZE; i++) {
myData[i] ^= myCtr[i];
}
}
__device__ void ctr_increment(uint8_t * ctr, int val)
{
// We want to increment the counter by VAL while
// avoiding as much divergence as possible.
// There will be SOME divergence, but hopefully minimal.
int remaining = val;
for (int b = BLOCKSIZE - 1; b >= 0 && remaining > 0; b--)
{
if (ctr[b] == 255)
{
ctr[b] = 0;
continue;
}
int added = __min(remaining, 255 - ctr[b]);
ctr[b] += added;
remaining -= added;
}
}
__device__ void add_round_key(int idx, uint8_t round, uint8_t * data, const uint8_t * key)
{
// Treat everything as a 1d array.
// Matrix representations will be helpful later on,
// but this is clearer to me. We can easily parallelize this
#if OPTIMIZED == 0 // loop on 8bits
for (uint8_t i = 0; i < 4; i++)
{
data[i] ^= key[(round * SUBKEY_SIZE) + i];
}
#else // unrolled loop, 32bits
((uint32_t*)data)[0] ^= ((uint32_t*)key)[(round * (SUBKEY_SIZE / 4)) + 0];
((uint32_t*)data)[1] ^= ((uint32_t*)key)[(round * (SUBKEY_SIZE / 4)) + 1];
((uint32_t*)data)[2] ^= ((uint32_t*)key)[(round * (SUBKEY_SIZE / 4)) + 2];
((uint32_t*)data)[3] ^= ((uint32_t*)key)[(round * (SUBKEY_SIZE / 4)) + 3];
#endif
// Parallelized
// Cache issues accessing key? Store Key in SMEM/TMEM
//data[idx] ^= key[(round * SUBKEY_SIZE) + mod16(idx)];
}
__device__ void sub_bytes(int idx, uint8_t * data, uint8_t* sbox)
{
uint8_t i;
for (i = 0; i < 16; ++i)
{
data[i] = sbox[data[i]];
}
// Again, this is EASILY parallelizable
// data[idx] = sbox[data[mod16(idx)]];
// However, I decided to make 1 kernel per block. This would require breaking that promise.
}
__device__ void inv_sub_bytes(int idx, uint8_t * data, uint8_t* rsbox)
{
uint8_t i;
for (i = 0; i < 16; ++i)
{
data[i] = rsbox[data[i]];
}
// Again, this is EASILY parallelizable
// data[idx] = rsbox[data[mod16(idx)]];
// However, I decided to make 1 kernel per block. This would require breaking that promise.
}
__device__ void shift_rows(int idx, uint8_t * data)
{
// This is not as simple as the previous steps. If we want to parallelize this,
// it will need to be a read, followed by a syncthreads, and then a write.
// Could the overhead of a syncthreads be more expnsive then just reducing the
// parallelism???
// row0 -- No Shift
// rows 1 to 3, shift left by row ammount
#if OPTIMIZED == 0
// 12 8bit reads / 12 8bit writes
uint8_t tmp[4];
for (int row = 1; row < N_ROWS; row++) {
tmp[0] = data[row + N_COLS * 0];
tmp[1] = data[row + N_COLS * 1];
tmp[2] = data[row + N_COLS * 2];
tmp[3] = data[row + N_COLS * 3];
data[row + N_COLS * 0] = tmp[mod4(0 + row)];
data[row + N_COLS * 1] = tmp[mod4(1 + row)];
data[row + N_COLS * 2] = tmp[mod4(2 + row)];
data[row + N_COLS * 3] = tmp[mod4(3 + row)];
}
#else // Unrolled 4 32bit reads 4 32bit writes + some math
// Read our whole block in...
uint32_t c0 = ((uint32_t*)data)[0];
uint32_t c1 = ((uint32_t*)data)[1];
uint32_t c2 = ((uint32_t*)data)[2];
uint32_t c3 = ((uint32_t*)data)[3];
// This looks a bit cryptic, but trust me, this is the end configuration
// once the math is done.
/*
| 0 1 2 3 | | 0 5 A F |
| 4 5 6 7 | --> | 4 9 E 3 |
| 8 9 A B | --> | 8 D 2 7 |
| C D E F | | C 1 6 B |
*/
((uint32_t*)data)[0] = BYTE0(c0) | BYTE1(c1) | BYTE2(c2) | BYTE3(c3);
((uint32_t*)data)[1] = BYTE0(c1) | BYTE1(c2) | BYTE2(c3) | BYTE3(c0);
((uint32_t*)data)[2] = BYTE0(c2) | BYTE1(c3) | BYTE2(c0) | BYTE3(c1);
((uint32_t*)data)[3] = BYTE0(c3) | BYTE1(c0) | BYTE2(c1) | BYTE3(c2);
#endif
}
__device__ void inv_shift_rows(int idx, uint8_t * data)
{
// This is not as simple as the previous steps. If we want to parallelize this,
// it will need to be a read, followed by a syncthreads, and then a write.
// Could the overhead of a syncthreads be more expnsive then just reducing the
// parallelism???
// row0 -- No Shift
// rows 1 to 3, shift right (since we inv) by row ammount
#if OPTIMIZED == 0 // 12 8bit reads / 12 8bit writes
uint8_t tmp[4];
for (int row = 1; row < N_ROWS; row++) {
tmp[0] = data[row + N_COLS * 0];
tmp[1] = data[row + N_COLS * 1];
tmp[2] = data[row + N_COLS * 2];
tmp[3] = data[row + N_COLS * 3];
data[row + N_COLS * 0] = tmp[mod4(0 + 4 - row)];
data[row + N_COLS * 1] = tmp[mod4(1 + 4 - row)];
data[row + N_COLS * 2] = tmp[mod4(2 + 4 - row)];
data[row + N_COLS * 3] = tmp[mod4(3 + 4 - row)];
}
#else // Unrolled 4 32bit reads 4 32bit writes + some math
// Read our whole block in...
uint32_t c0 = ((uint32_t*)data)[0];
uint32_t c1 = ((uint32_t*)data)[1];
uint32_t c2 = ((uint32_t*)data)[2];
uint32_t c3 = ((uint32_t*)data)[3];
// This looks a bit cryptic, but trust me, this is the end configuration
// once the math is done.
((uint32_t*)data)[0] = BYTE0(c0) | BYTE1(c3) | BYTE2(c2) | BYTE3(c1);
((uint32_t*)data)[1] = BYTE0(c1) | BYTE1(c0) | BYTE2(c3) | BYTE3(c2);
((uint32_t*)data)[2] = BYTE0(c2) | BYTE1(c1) | BYTE2(c0) | BYTE3(c3);
((uint32_t*)data)[3] = BYTE0(c3) | BYTE1(c2) | BYTE2(c1) | BYTE3(c0);
#endif
}
__device__ void mix_columns(int idx, uint8_t * data, uint8_t* mul2, uint8_t* mul3)
{
// This is the most complicated step, but can be improved using our lookup tables.
// Problem is, this is going to cause all sorts of contention because we read and write across
// 4 different banks.
/* Matrix used for mixin'
| 2 3 1 1 |
| 1 2 3 1 |
| 1 1 2 3 |
| 3 1 1 2 |
*/
// logic adapted from https://www.youtube.com/watch?v=bERjYzLqAfw
for (int i = 0; i < N_COLS; i++) {
#if OPTIMIZED == 0 // 8bit RW
uint8_t idx0 = i * N_COLS + 0;
uint8_t idx1 = i * N_COLS + 1;
uint8_t idx2 = i * N_COLS + 2;
uint8_t idx3 = i * N_COLS + 3;
// Hmmm... will compiler vectorize this as one read32?
uint8_t d0 = data[idx0];
uint8_t d1 = data[idx1];
uint8_t d2 = data[idx2];
uint8_t d3 = data[idx3];
data[idx0] = mul2[d0] ^ mul3[d1] ^ d2 ^ d3;
data[idx1] = d0 ^ mul2[d1] ^ mul3[d2] ^ d3;
data[idx2] = d0 ^ d1 ^ mul2[d2] ^ mul3[d3];
data[idx3] = mul3[d0] ^ d1 ^ d2 ^ mul2[d3];
#else // 32bit RW
// One 32bit read from global
uint32_t quarterblock = ((uint32_t*)data)[i];
uint8_t in0 = ((quarterblock & 0x000000FF) >> 0);
uint8_t in1 = ((quarterblock & 0x0000FF00) >> 8);
uint8_t in2 = ((quarterblock & 0x00FF0000) >> 16);
uint8_t in3 = ((quarterblock & 0xFF000000) >> 24);
// Use lookup table, with shared mem this is p good
uint32_t out0 = mul2[in0] ^ mul3[in1] ^ in2 ^ in3;
uint32_t out1 = in0 ^ mul2[in1] ^ mul3[in2] ^ in3;
uint32_t out2 = in0 ^ in1 ^ mul2[in2] ^ mul3[in3];
uint32_t out3 = mul3[in0] ^ in1 ^ in2 ^ mul2[in3];
// One 32bit write to global
((uint32_t*)data)[i] = out0 | (out1 << 8) | (out2 << 16) | (out3 << 24);
#endif
// Compute at runtime - Ended up slower, higher instruction cost, still high LG Throttle
//data[idx0] = rt_mul2(in0) ^ rt_mul3(in1) ^ in2 ^ in3;
//data[idx1] = in0 ^ rt_mul2(in1) ^ rt_mul3(in2) ^ in3;
//data[idx2] = in0 ^ in1 ^ rt_mul2(in2) ^ rt_mul3(in3);
//data[idx3] = rt_mul3(in0) ^ in1 ^ in2 ^ rt_mul2(in3);
}
// This can be parallelized by block, but going deeper will incur
// contention penalties. Not worth the trouble.
}
__device__ void inv_mix_columns(int idx, uint8_t * data,
uint8_t* mul9, uint8_t* mulB, uint8_t* mulD, uint8_t* mulE)
{
// Inverse mix columns -- This is the same procedure, but we use MORE lookup tables!
// The inmverse operation defines a differnt table, which will increase out total
// operations. Interesting to see how this compares against the forward case...
/* Matrix used for mixin'
| E B D 9 |
| 9 E B D |
| D 9 E B |
| B D 9 E |
*/
for (int i = 0; i < N_COLS; i++) {
#if OPTIMIZED == 0 // 8bit RW
uint8_t idx0 = i * N_COLS + 0;
uint8_t idx1 = i * N_COLS + 1;
uint8_t idx2 = i * N_COLS + 2;
uint8_t idx3 = i * N_COLS + 3;
// Hmmm... will compiler vectorize this as one read32?
uint8_t d0 = data[idx0];
uint8_t d1 = data[idx1];
uint8_t d2 = data[idx2];
uint8_t d3 = data[idx3];
data[idx0] = mulE[d0] ^ mulB[d1] ^ mulD[d2] ^ mul9[d3];
data[idx1] = mul9[d0] ^ mulE[d1] ^ mulB[d2] ^ mulD[d3];
data[idx2] = mulD[d0] ^ mul9[d1] ^ mulE[d2] ^ mulB[d3];
data[idx3] = mulB[d0] ^ mulD[d1] ^ mul9[d2] ^ mulE[d3];
uint8_t base = i * N_COLS;
#else // 32bit RW
// One 32bit read from global
uint32_t quarterblock = ((uint32_t*)data)[i];
uint8_t in0 = ((quarterblock & 0x000000FF) >> 0);
uint8_t in1 = ((quarterblock & 0x0000FF00) >> 8);
uint8_t in2 = ((quarterblock & 0x00FF0000) >> 16);
uint8_t in3 = ((quarterblock & 0xFF000000) >> 24);
// Use lookup table, with shared mem this is p good
uint32_t out0 = mulE[in0] ^ mulB[in1] ^ mulD[in2] ^ mul9[in3];
uint32_t out1 = mul9[in0] ^ mulE[in1] ^ mulB[in2] ^ mulD[in3];
uint32_t out2 = mulD[in0] ^ mul9[in1] ^ mulE[in2] ^ mulB[in3];
uint32_t out3 = mulB[in0] ^ mulD[in1] ^ mul9[in2] ^ mulE[in3];
// One 32bit write to global
((uint32_t*)data)[i] = out0 | (out1 << 8) | (out2 << 16) | (out3 << 24);
#endif
}
}
}
}
| 1890c90306a74dd7b3bd455187317a5fc97268b8.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <bitset>
#include <assert.h>
#include "common.h"
#include "gpu_opt.h"
#define N_COLS 4
#define N_ROWS 4
#define SUBKEY_SIZE 16
#define BYTE0(x) (x & 0x000000FF)
#define BYTE1(x) (x & 0x0000FF00)
#define BYTE2(x) (x & 0x00FF0000)
#define BYTE3(x) (x & 0xFF000000)
// If someone else has an optimized flag, get rid of it.
#ifdef OPTIMIZED
#undef OPTIMIZED
#endif
#define OPTIMIZED 1
namespace PAES {
namespace GPU_OPT {
using PAES::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// A small helper for quick mod16, since it'll be used alot when accessing keys
// in sharedmem. Works by abusing the fact that 16 and 4 are powers of 2.
template<typename T>
__host__ __device__ __forceinline__ T mod16(T n) {
return n & 15;
}
template<typename T>
__host__ __device__ __forceinline__ T mod4(T n) {
return n & 3;
}
// Gets length of key in bytes for flavor
__host__ int get_key_len(const AESType& a) {
switch (a) {
case AESType::AES128: return 128 / 8;
case AESType::AES192: return 192 / 8;
case AESType::AES256: return 256 / 8;
}
return -1;
}
// Returns length of expanded key in bytes.
__host__ int get_exp_key_len(const AESType& a) {
switch (a) {
case AESType::AES128: return 176;
case AESType::AES192: return 208;
case AESType::AES256: return 240;
}
return -1;
};
// Gets number of rounds for flavor
__host__ int get_num_rounds(const AESType& a) {
switch (a) {
case AESType::AES128: return 10;
case AESType::AES192: return 12;
case AESType::AES256: return 14;
}
return -1;
}
__host__ void encrypt_ecb(const AESType& flavor, uint8_t * key, uint8_t * data, uint32_t datalen)
{
// Straightforward, nothing fancy is done here.
// ECB is inherently insecure because it does not diffuse the data
assert(mod16(datalen) == 0);
// Expand the key
uint8_t* expkey = (uint8_t*)malloc(get_exp_key_len(flavor));
expandKey(flavor, key, expkey, get_key_len(flavor), get_num_rounds(flavor));
// Copy key to device memory
uint8_t* d_expkey;
cudaMalloc(&d_expkey, get_exp_key_len(flavor));
cudaMemcpy(d_expkey, expkey, get_exp_key_len(flavor), cudaMemcpyHostToDevice);
// Copy data to device memory
uint8_t* d_data;
cudaMalloc(&d_data, sizeof(uint8_t) * datalen);
cudaMemcpy(d_data, data, sizeof(uint8_t) * datalen, cudaMemcpyHostToDevice);
// Calculate number of kernels needed
// We need one kernel per aes block. So we will calculate how many
// cuda blocks of 1024 threads we need to satisfy that
int threadsPerBlock = 1024;
int aes_blocks = datalen / BLOCKSIZE;
int cudaBlocks = (aes_blocks + 1023) / 1024;
// Call the kernels to get to work!
timer().startGpuTimer();
core_encrypt_ecb << <cudaBlocks, threadsPerBlock >> > (aes_blocks, d_data, d_expkey, get_num_rounds(flavor));
checkCUDAError("ECB Encrypt Failed!");
cudaDeviceSynchronize();
timer().endGpuTimer();
// Retrieve the data from the device
cudaMemcpy(data, d_data, sizeof(uint8_t) * datalen, cudaMemcpyDeviceToHost);
// Free CUDA memory
cudaFree(d_data);
cudaFree(d_expkey);
}
__host__ void decrypt_ecb(const AESType& flavor, uint8_t * key, uint8_t * data, uint32_t datalen)
{
// Straightforward, nothing fancy is done here.
// ECB is inherently insecure because it does not diffuse the data
assert(mod16(datalen) == 0);
// Expand the key
uint8_t* expkey = (uint8_t*)malloc(get_exp_key_len(flavor));
expandKey(flavor, key, expkey, get_key_len(flavor), get_num_rounds(flavor));
// Copy key to device memory
uint8_t* d_expkey;
cudaMalloc(&d_expkey, get_exp_key_len(flavor));
cudaMemcpy(d_expkey, expkey, get_exp_key_len(flavor), cudaMemcpyHostToDevice);
// Copy data to device memory
uint8_t* d_data;
cudaMalloc(&d_data, sizeof(uint8_t) * datalen);
cudaMemcpy(d_data, data, sizeof(uint8_t) * datalen, cudaMemcpyHostToDevice);
// Calculate number of kernels needed
// We need one kernel per aes block. So we will calculate how many
// cuda blocks of 1024 threads we need to satisfy that
int threadsPerBlock = 1024;
int aes_blocks = datalen / BLOCKSIZE;
int cudaBlocks = (aes_blocks + 1023) / 1024;
// Call the kernels to get to work!
timer().startGpuTimer();
core_decrypt_ecb << <cudaBlocks, threadsPerBlock >> > (aes_blocks, d_data, d_expkey, get_num_rounds(flavor));
checkCUDAError("ECB Decrypt Failed!");
cudaDeviceSynchronize();
timer().endGpuTimer();
// Retrieve the data from the device
cudaMemcpy(data, d_data, sizeof(uint8_t) * datalen, cudaMemcpyDeviceToHost);
// Free CUDA memory
cudaFree(d_data);
cudaFree(d_expkey);
}
__host__ void encrypt_ctr(const AESType& flavor, uint8_t * key, uint8_t * ctr, uint8_t * data, uint32_t datalen)
{
// In counter mode, we don't actually encrypt the data.
// Instead, we encrypt a counter value and then xor it with the data.
// We use an IV to create our counter and then increment the counter for
// each block encrypted.
// CTR/IV is one block size.
assert(mod16(datalen) == 0);
// Expand the key
uint8_t* expkey = (uint8_t*)malloc(get_exp_key_len(flavor));
expandKey(flavor, key, expkey, get_key_len(flavor), get_num_rounds(flavor));
// Copy key to device memory
uint8_t* d_expkey;
cudaMalloc(&d_expkey, get_exp_key_len(flavor));
cudaMemcpy(d_expkey, expkey, get_exp_key_len(flavor), cudaMemcpyHostToDevice);
// Copy data to device memory
uint8_t* d_data;
cudaMalloc(&d_data, sizeof(uint8_t) * datalen);
cudaMemcpy(d_data, data, sizeof(uint8_t) * datalen, cudaMemcpyHostToDevice);
// Copy counter to device memroy
// OG ctr will be constant, each kernel reads it into its own memory
// and performs increments
uint8_t* d_ctr;
cudaMalloc(&d_ctr, sizeof(uint8_t) * BLOCKSIZE);
cudaMemcpy(d_ctr, ctr, sizeof(uint8_t) * BLOCKSIZE, cudaMemcpyHostToDevice);
// Calculate number of kernels needed
// We need one kernel per aes block. So we will calculate how many
// cuda blocks of 1024 threads we need to satisfy that
int threadsPerBlock = 1024;
int aes_blocks = datalen / BLOCKSIZE;
int cudaBlocks = (aes_blocks + 1023) / 1024;
// Start the kernels. Each kernel will increment the counter
// based on their index.
timer().startGpuTimer();
core_xcrypt_ctr << <cudaBlocks, threadsPerBlock>> > (aes_blocks, d_data, d_expkey, get_num_rounds(flavor), d_ctr);
checkCUDAError("CTR Xcrypt Failed!");
cudaDeviceSynchronize();
timer().endGpuTimer();
// Retrieve the data from the device
cudaMemcpy(data, d_data, sizeof(uint8_t) * datalen, cudaMemcpyDeviceToHost);
// Free CUDA memory
cudaFree(d_data);
cudaFree(d_expkey);
cudaFree(d_ctr);
}
__host__ void decrypt_ctr(const AESType& flavor, uint8_t * key, uint8_t * ctr, uint8_t * data, uint32_t datalen)
{
// A convienent feature of CTR mode... decryption is the SAME operation! No inverse needed!
encrypt_ctr(flavor, key, ctr, data, datalen);
}
__host__ void rot_word(uint8_t* n) {
// This function shifts the 4 bytes in a word to the left once.
// [a0,a1,a2,a3] becomes [a1,a2,a3,a0]
uint8_t tmp = n[0];
n[0] = n[1];
n[1] = n[2];
n[2] = n[3];
n[3] = tmp;
}
__host__ void sub_word(uint8_t* n) {
// SubWord() is a function that takes a four-byte input word and
// applies the S-box to each of the four bytes to produce an output word.
n[0] = h_sbox[n[0]];
n[1] = h_sbox[n[1]];
n[2] = h_sbox[n[2]];
n[3] = h_sbox[n[3]];
}
__host__ void expandKey(const AESType& flavor, uint8_t* ogkey, uint8_t* expkey, uint32_t keysize, uint32_t num_rounds) {
// The AES key will either be 128, 192, or 256 bits.
// The AES algorithm itself is not actually modified by the key size, but the number
// of rounds is. In expandKey() we take the provided key and stretch it out to create enough
// 128bit subkeys/roundkeys for each round.
// This is only done once, so we won't parallelize this.
// The logic below follows the Rijndael Key Schedule (https://en.wikipedia.org/wiki/Rijndael_key_schedule)
// Code adapted from tiny-aes since this is not parallelizable (each subkey depends on value of previous subkey)
// and there is nothing inherintly unique about this.
// Changes were made to make keysize a runtime option.
unsigned i, j, k;
uint8_t tmp[4]; // Used for the column/row operations
uint32_t N = keysize / 4; // Length of key in 32bit words.
// The first round key is the key itself.
for (i = 0; i < N; ++i) {
expkey[(i * 4) + 0] = ogkey[(i * 4) + 0];
expkey[(i * 4) + 1] = ogkey[(i * 4) + 1];
expkey[(i * 4) + 2] = ogkey[(i * 4) + 2];
expkey[(i * 4) + 3] = ogkey[(i * 4) + 3];
}
// All other round keys are found from the previous round keys.
for (i = N; i < N_COLS * (num_rounds + 1); ++i) {
// Tuck away temporary values
// These will be reused later.
k = (i - 1) * 4;
tmp[0] = expkey[k + 0];
tmp[1] = expkey[k + 1];
tmp[2] = expkey[k + 2];
tmp[3] = expkey[k + 3];
// If i % N is zero, xor the previous word with sbox(rotate(previousword)) and
// xor that with the Round Constant. Round Constant depends on the
// flavor of AES
if (i % N == 0) {
rot_word(tmp); // Rotate...
sub_word(tmp); // Substitute...
tmp[0] = tmp[0] ^ roundcon[i / N]; // Apply round coefficient
}
// Next step is only done if in AES256 mode
else if (flavor == AESType::AES256 && (i % N == 4)) {
sub_word(tmp); // Just subsitute
}
j = i * 4;
k = (i - N) * 4;
expkey[j + 0] = expkey[k + 0] ^ tmp[0];
expkey[j + 1] = expkey[k + 1] ^ tmp[1];
expkey[j + 2] = expkey[k + 2] ^ tmp[2];
expkey[j + 3] = expkey[k + 3] ^ tmp[3];
}
}
__global__ void core_encrypt_ecb(int N, uint8_t* data, const uint8_t* key, const int num_rounds) {
// Lenght of buffer is ALWAYS 128 bits == 16 bytes
// This is defined by AES Algorithm
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
// Get shared mem buffers
__shared__ uint8_t s_sbox[256];
__shared__ uint8_t s_mul2[256];
__shared__ uint8_t s_mul3[256];
// If we have enough threads, let each one do one copy.
if (N >= 256 && idx < 256) {
s_sbox[idx] = c_sbox[idx];
s_mul2[idx] = c_mul2[idx];
s_mul3[idx] = c_mul3[idx];
}
// If we don't have enough blocks, just let thread 0 do it all.
// If you only have 256 blocks you should be doing AES in CPU
// but thats none of my business *sips tea*.
else if (idx == 0) {
for (int i = 0; i < 256; i++) {
s_sbox[i] = c_sbox[i];
s_mul2[i] = c_mul2[i];
s_mul3[i] = c_mul3[i];
}
}
// Wait for shared memory to load
__syncthreads();
// Each thread running this function will act on ONE block in memory.
#if 0 // Get a pointer to global mem
uint8_t* myData = data + idx * BLOCKSIZE;
#else // Copy into local mem
uint8_t myData[BLOCKSIZE];
((uint32_t*)myData)[0] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 0];
((uint32_t*)myData)[1] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 1];
((uint32_t*)myData)[2] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 2];
((uint32_t*)myData)[3] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 3];
#endif
// Lots of comments are pulled from https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
// Above article mentions that SubBytes, ShiftRows, and MixColumns can be combined
// into 16 table lookups and 12 32bit XOR operations.
// If doing each block with 1 byte per thread, then each thread needs to perform
// one table lookup and at most one XOR.
// What are the memory implications? Can the tables be stored in RO memory to speed
// up the operations? Hoho! Use texture memory???
// Initial Round Key Addition
// Each byte of the state is combined with a block of the round key using bitwise xor
add_round_key(idx, 0, myData, key);
// We perform the next steps for a number of rounds
// dependent on the flavor of AES.
// Pass this in via a context?
for (int r = 1; r < num_rounds; r++) {
sub_bytes(idx, myData, s_sbox);
shift_rows(idx, myData);
mix_columns(idx, myData, s_mul2, s_mul3);
add_round_key(idx, r, myData, key);
}
// For the last step we do NOT perform the mix_columns step.
sub_bytes(idx, myData, s_sbox);
shift_rows(idx, myData);
add_round_key(idx, num_rounds, myData, key);
// Encryption on this block is done, encrypted data is stored inplace.
#if 1 // If copied into local mem above, need to write it back down
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 0] = ((uint32_t*)myData)[0];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 1] = ((uint32_t*)myData)[1];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 2] = ((uint32_t*)myData)[2];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 3] = ((uint32_t*)myData)[3];
#endif
}
__global__ void core_decrypt_ecb(int N, uint8_t* data, const uint8_t* key, const int num_rounds) {
// This performs the same steps as the encryption, but uses inverted values
// to recover the plaintext.
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
// Get shared mem buffers
__shared__ uint8_t s_rsbox[256];
__shared__ uint8_t s_mul9[256];
__shared__ uint8_t s_mulB[256];
__shared__ uint8_t s_mulD[256];
__shared__ uint8_t s_mulE[256];
// If we have enough threads, let each one do one copy.
if (N >= 256 && idx < 256) {
s_rsbox[idx] = c_rsbox[idx];
s_mul9[idx] = c_mul9[idx];
s_mulB[idx] = c_mulB[idx];
s_mulD[idx] = c_mulD[idx];
s_mulE[idx] = c_mulE[idx];
}
// If we don't have enough blocks, just let thread 0 do it all.
// If you only have 256 blocks you should be doing AES in CPU
// but thats none of my business *sips tea*.
else if (idx == 0) {
for (int i = 0; i < 256; i++) {
s_rsbox[i] = c_rsbox[i];
s_mul9[i] = c_mul9[i];
s_mulB[i] = c_mulB[i];
s_mulD[i] = c_mulD[i];
s_mulE[i] = c_mulE[i];
}
}
// Wait for shared memory to load
__syncthreads();
// Each thread running this function will act on ONE block in memory.
#if OPTIMIZED == 0 // Get a pointer to global mem
uint8_t* myData = data + idx * BLOCKSIZE;
#else // Copy into local mem
uint8_t myData[BLOCKSIZE];
((uint32_t*)myData)[0] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 0];
((uint32_t*)myData)[1] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 1];
((uint32_t*)myData)[2] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 2];
((uint32_t*)myData)[3] = ((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 3];
#endif
// Initial Round Key Addition
// Each byte of the state is combined with a block of the round key using bitwise xor
add_round_key(idx, num_rounds, myData, key);
// We perform the next steps for a number of rounds
// dependent on the flavor of AES.
// This is done in the inverse compared to encryption
for (int r = num_rounds - 1; r > 0; r--)
{
inv_shift_rows(idx, myData);
inv_sub_bytes(idx, myData, s_rsbox);
add_round_key(idx, r, myData, key);
inv_mix_columns(idx, myData, s_mul9, s_mulB, s_mulD, s_mulE);
}
// For the last step we do NOT perform the mix_columns step.
inv_shift_rows(idx, myData);
inv_sub_bytes(idx, myData, s_rsbox);
add_round_key(idx, 0, myData, key);
// Decryption on this block is done, decrypted data is stored inplace.
#if 1 // If copied into local mem above, need to write it back down
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 0] = ((uint32_t*)myData)[0];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 1] = ((uint32_t*)myData)[1];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 2] = ((uint32_t*)myData)[2];
((uint32_t*)data)[idx*(BLOCKSIZE / 4) + 3] = ((uint32_t*)myData)[3];
#endif
}
__global__ void core_xcrypt_ctr(int N, uint8_t* data, const uint8_t* key, const int num_rounds, const uint8_t * ctr) {
// Lenght of buffer is ALWAYS 128 bits == 16 bytes
// This is defined by AES Algorithm
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
// Get shared mem buffers
__shared__ uint8_t s_sbox[256];
__shared__ uint8_t s_mul2[256];
__shared__ uint8_t s_mul3[256];
// If we have enough threads, let each one do one copy.
if (N >= 256 && idx < 256) {
s_sbox[idx] = c_sbox[idx];
s_mul2[idx] = c_mul2[idx];
s_mul3[idx] = c_mul3[idx];
}
// If we don't have enough blocks, just let thread 0 do it all.
// If you only have 256 blocks you should be doing AES in CPU
// but thats none of my business *sips tea*.
else if (idx == 0) {
for (int i = 0; i < 256; i++) {
s_sbox[i] = c_sbox[i];
s_mul2[i] = c_mul2[i];
s_mul3[i] = c_mul3[i];
}
}
// Wait for shared memory to load
__syncthreads();
// Each thread running this function will act on ONE block in memory.
// (This is at least true in ECB mode, CTR mode might need its own kern.)
uint8_t* myData = data + idx * BLOCKSIZE;
// Copy the counter to this kernel and increment it by our idx
uint8_t myCtr[BLOCKSIZE];
for (int i = 0; i < BLOCKSIZE; i++) {
myCtr[i] = ctr[i];
}
ctr_increment(myCtr, idx);
// Lots of comments are pulled from https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
// Above article mentions that SubBytes, ShiftRows, and MixColumns can be combined
// into 16 table lookups and 12 32bit XOR operations.
// If doing each block with 1 byte per thread, then each thread needs to perform
// one table lookup and at most one XOR.
// What are the memory implications? Can the tables be stored in RO memory to speed
// up the operations? Hoho! Use texture memory???
// Initial Round Key Addition
// Each byte of the state is combined with a block of the round key using bitwise xor
add_round_key(idx, 0, myCtr, key);
// We perform the next steps for a number of rounds
// dependent on the flavor of AES.
// Pass this in via a context?
for (int r = 1; r < num_rounds; r++) {
sub_bytes(idx, myCtr, s_sbox);
shift_rows(idx, myCtr);
mix_columns(idx, myCtr, s_mul2, s_mul3);
add_round_key(idx, r, myCtr, key);
}
// For the last step we do NOT perform the mix_columns step.
sub_bytes(idx, myCtr, s_sbox);
shift_rows(idx, myCtr);
add_round_key(idx, num_rounds, myCtr, key);
// myCtr is now encrypted, xor it with our data before we leave
for (int i = 0; i < BLOCKSIZE; i++) {
myData[i] ^= myCtr[i];
}
}
__device__ void ctr_increment(uint8_t * ctr, int val)
{
// We want to increment the counter by VAL while
// avoiding as much divergence as possible.
// There will be SOME divergence, but hopefully minimal.
int remaining = val;
for (int b = BLOCKSIZE - 1; b >= 0 && remaining > 0; b--)
{
if (ctr[b] == 255)
{
ctr[b] = 0;
continue;
}
int added = __min(remaining, 255 - ctr[b]);
ctr[b] += added;
remaining -= added;
}
}
__device__ void add_round_key(int idx, uint8_t round, uint8_t * data, const uint8_t * key)
{
// Treat everything as a 1d array.
// Matrix representations will be helpful later on,
// but this is clearer to me. We can easily parallelize this
#if OPTIMIZED == 0 // loop on 8bits
for (uint8_t i = 0; i < 4; i++)
{
data[i] ^= key[(round * SUBKEY_SIZE) + i];
}
#else // unrolled loop, 32bits
((uint32_t*)data)[0] ^= ((uint32_t*)key)[(round * (SUBKEY_SIZE / 4)) + 0];
((uint32_t*)data)[1] ^= ((uint32_t*)key)[(round * (SUBKEY_SIZE / 4)) + 1];
((uint32_t*)data)[2] ^= ((uint32_t*)key)[(round * (SUBKEY_SIZE / 4)) + 2];
((uint32_t*)data)[3] ^= ((uint32_t*)key)[(round * (SUBKEY_SIZE / 4)) + 3];
#endif
// Parallelized
// Cache issues accessing key? Store Key in SMEM/TMEM
//data[idx] ^= key[(round * SUBKEY_SIZE) + mod16(idx)];
}
__device__ void sub_bytes(int idx, uint8_t * data, uint8_t* sbox)
{
uint8_t i;
for (i = 0; i < 16; ++i)
{
data[i] = sbox[data[i]];
}
// Again, this is EASILY parallelizable
// data[idx] = sbox[data[mod16(idx)]];
// However, I decided to make 1 kernel per block. This would require breaking that promise.
}
__device__ void inv_sub_bytes(int idx, uint8_t * data, uint8_t* rsbox)
{
uint8_t i;
for (i = 0; i < 16; ++i)
{
data[i] = rsbox[data[i]];
}
// Again, this is EASILY parallelizable
// data[idx] = rsbox[data[mod16(idx)]];
// However, I decided to make 1 kernel per block. This would require breaking that promise.
}
__device__ void shift_rows(int idx, uint8_t * data)
{
// This is not as simple as the previous steps. If we want to parallelize this,
// it will need to be a read, followed by a syncthreads, and then a write.
// Could the overhead of a syncthreads be more expnsive then just reducing the
// parallelism???
// row0 -- No Shift
// rows 1 to 3, shift left by row ammount
#if OPTIMIZED == 0
// 12 8bit reads / 12 8bit writes
uint8_t tmp[4];
for (int row = 1; row < N_ROWS; row++) {
tmp[0] = data[row + N_COLS * 0];
tmp[1] = data[row + N_COLS * 1];
tmp[2] = data[row + N_COLS * 2];
tmp[3] = data[row + N_COLS * 3];
data[row + N_COLS * 0] = tmp[mod4(0 + row)];
data[row + N_COLS * 1] = tmp[mod4(1 + row)];
data[row + N_COLS * 2] = tmp[mod4(2 + row)];
data[row + N_COLS * 3] = tmp[mod4(3 + row)];
}
#else // Unrolled 4 32bit reads 4 32bit writes + some math
// Read our whole block in...
uint32_t c0 = ((uint32_t*)data)[0];
uint32_t c1 = ((uint32_t*)data)[1];
uint32_t c2 = ((uint32_t*)data)[2];
uint32_t c3 = ((uint32_t*)data)[3];
// This looks a bit cryptic, but trust me, this is the end configuration
// once the math is done.
/*
| 0 1 2 3 | | 0 5 A F |
| 4 5 6 7 | --> | 4 9 E 3 |
| 8 9 A B | --> | 8 D 2 7 |
| C D E F | | C 1 6 B |
*/
((uint32_t*)data)[0] = BYTE0(c0) | BYTE1(c1) | BYTE2(c2) | BYTE3(c3);
((uint32_t*)data)[1] = BYTE0(c1) | BYTE1(c2) | BYTE2(c3) | BYTE3(c0);
((uint32_t*)data)[2] = BYTE0(c2) | BYTE1(c3) | BYTE2(c0) | BYTE3(c1);
((uint32_t*)data)[3] = BYTE0(c3) | BYTE1(c0) | BYTE2(c1) | BYTE3(c2);
#endif
}
__device__ void inv_shift_rows(int idx, uint8_t * data)
{
// This is not as simple as the previous steps. If we want to parallelize this,
// it will need to be a read, followed by a syncthreads, and then a write.
// Could the overhead of a syncthreads be more expnsive then just reducing the
// parallelism???
// row0 -- No Shift
// rows 1 to 3, shift right (since we inv) by row ammount
#if OPTIMIZED == 0 // 12 8bit reads / 12 8bit writes
uint8_t tmp[4];
for (int row = 1; row < N_ROWS; row++) {
tmp[0] = data[row + N_COLS * 0];
tmp[1] = data[row + N_COLS * 1];
tmp[2] = data[row + N_COLS * 2];
tmp[3] = data[row + N_COLS * 3];
data[row + N_COLS * 0] = tmp[mod4(0 + 4 - row)];
data[row + N_COLS * 1] = tmp[mod4(1 + 4 - row)];
data[row + N_COLS * 2] = tmp[mod4(2 + 4 - row)];
data[row + N_COLS * 3] = tmp[mod4(3 + 4 - row)];
}
#else // Unrolled 4 32bit reads 4 32bit writes + some math
// Read our whole block in...
uint32_t c0 = ((uint32_t*)data)[0];
uint32_t c1 = ((uint32_t*)data)[1];
uint32_t c2 = ((uint32_t*)data)[2];
uint32_t c3 = ((uint32_t*)data)[3];
// This looks a bit cryptic, but trust me, this is the end configuration
// once the math is done.
((uint32_t*)data)[0] = BYTE0(c0) | BYTE1(c3) | BYTE2(c2) | BYTE3(c1);
((uint32_t*)data)[1] = BYTE0(c1) | BYTE1(c0) | BYTE2(c3) | BYTE3(c2);
((uint32_t*)data)[2] = BYTE0(c2) | BYTE1(c1) | BYTE2(c0) | BYTE3(c3);
((uint32_t*)data)[3] = BYTE0(c3) | BYTE1(c2) | BYTE2(c1) | BYTE3(c0);
#endif
}
__device__ void mix_columns(int idx, uint8_t * data, uint8_t* mul2, uint8_t* mul3)
{
// This is the most complicated step, but can be improved using our lookup tables.
// Problem is, this is going to cause all sorts of contention because we read and write across
// 4 different banks.
/* Matrix used for mixin'
| 2 3 1 1 |
| 1 2 3 1 |
| 1 1 2 3 |
| 3 1 1 2 |
*/
// logic adapted from https://www.youtube.com/watch?v=bERjYzLqAfw
for (int i = 0; i < N_COLS; i++) {
#if OPTIMIZED == 0 // 8bit RW
uint8_t idx0 = i * N_COLS + 0;
uint8_t idx1 = i * N_COLS + 1;
uint8_t idx2 = i * N_COLS + 2;
uint8_t idx3 = i * N_COLS + 3;
// Hmmm... will compiler vectorize this as one read32?
uint8_t d0 = data[idx0];
uint8_t d1 = data[idx1];
uint8_t d2 = data[idx2];
uint8_t d3 = data[idx3];
data[idx0] = mul2[d0] ^ mul3[d1] ^ d2 ^ d3;
data[idx1] = d0 ^ mul2[d1] ^ mul3[d2] ^ d3;
data[idx2] = d0 ^ d1 ^ mul2[d2] ^ mul3[d3];
data[idx3] = mul3[d0] ^ d1 ^ d2 ^ mul2[d3];
#else // 32bit RW
// One 32bit read from global
uint32_t quarterblock = ((uint32_t*)data)[i];
uint8_t in0 = ((quarterblock & 0x000000FF) >> 0);
uint8_t in1 = ((quarterblock & 0x0000FF00) >> 8);
uint8_t in2 = ((quarterblock & 0x00FF0000) >> 16);
uint8_t in3 = ((quarterblock & 0xFF000000) >> 24);
// Use lookup table, with shared mem this is p good
uint32_t out0 = mul2[in0] ^ mul3[in1] ^ in2 ^ in3;
uint32_t out1 = in0 ^ mul2[in1] ^ mul3[in2] ^ in3;
uint32_t out2 = in0 ^ in1 ^ mul2[in2] ^ mul3[in3];
uint32_t out3 = mul3[in0] ^ in1 ^ in2 ^ mul2[in3];
// One 32bit write to global
((uint32_t*)data)[i] = out0 | (out1 << 8) | (out2 << 16) | (out3 << 24);
#endif
// Compute at runtime - Ended up slower, higher instruction cost, still high LG Throttle
//data[idx0] = rt_mul2(in0) ^ rt_mul3(in1) ^ in2 ^ in3;
//data[idx1] = in0 ^ rt_mul2(in1) ^ rt_mul3(in2) ^ in3;
//data[idx2] = in0 ^ in1 ^ rt_mul2(in2) ^ rt_mul3(in3);
//data[idx3] = rt_mul3(in0) ^ in1 ^ in2 ^ rt_mul2(in3);
}
// This can be parallelized by block, but going deeper will incur
// contention penalties. Not worth the trouble.
}
__device__ void inv_mix_columns(int idx, uint8_t * data,
uint8_t* mul9, uint8_t* mulB, uint8_t* mulD, uint8_t* mulE)
{
// Inverse mix columns -- This is the same procedure, but we use MORE lookup tables!
// The inmverse operation defines a differnt table, which will increase out total
// operations. Interesting to see how this compares against the forward case...
/* Matrix used for mixin'
| E B D 9 |
| 9 E B D |
| D 9 E B |
| B D 9 E |
*/
for (int i = 0; i < N_COLS; i++) {
#if OPTIMIZED == 0 // 8bit RW
uint8_t idx0 = i * N_COLS + 0;
uint8_t idx1 = i * N_COLS + 1;
uint8_t idx2 = i * N_COLS + 2;
uint8_t idx3 = i * N_COLS + 3;
// Hmmm... will compiler vectorize this as one read32?
uint8_t d0 = data[idx0];
uint8_t d1 = data[idx1];
uint8_t d2 = data[idx2];
uint8_t d3 = data[idx3];
data[idx0] = mulE[d0] ^ mulB[d1] ^ mulD[d2] ^ mul9[d3];
data[idx1] = mul9[d0] ^ mulE[d1] ^ mulB[d2] ^ mulD[d3];
data[idx2] = mulD[d0] ^ mul9[d1] ^ mulE[d2] ^ mulB[d3];
data[idx3] = mulB[d0] ^ mulD[d1] ^ mul9[d2] ^ mulE[d3];
uint8_t base = i * N_COLS;
#else // 32bit RW
// One 32bit read from global
uint32_t quarterblock = ((uint32_t*)data)[i];
uint8_t in0 = ((quarterblock & 0x000000FF) >> 0);
uint8_t in1 = ((quarterblock & 0x0000FF00) >> 8);
uint8_t in2 = ((quarterblock & 0x00FF0000) >> 16);
uint8_t in3 = ((quarterblock & 0xFF000000) >> 24);
// Use lookup table, with shared mem this is p good
uint32_t out0 = mulE[in0] ^ mulB[in1] ^ mulD[in2] ^ mul9[in3];
uint32_t out1 = mul9[in0] ^ mulE[in1] ^ mulB[in2] ^ mulD[in3];
uint32_t out2 = mulD[in0] ^ mul9[in1] ^ mulE[in2] ^ mulB[in3];
uint32_t out3 = mulB[in0] ^ mulD[in1] ^ mul9[in2] ^ mulE[in3];
// One 32bit write to global
((uint32_t*)data)[i] = out0 | (out1 << 8) | (out2 << 16) | (out3 << 24);
#endif
}
}
}
}
|
8113a5dab2c25f9462769015a5eb354107b1cc15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The Game of Life
*
* a cell is born, if it has exactly three neighbours
* a cell dies of loneliness, if it has less than two neighbours
* a cell dies of overcrowding, if it has more than three neighbours
* a cell survives to the next generation, if it does not die of loneliness
* or overcrowding
*
* In this version, a 2D array of ints is used. A 1 cell is on, a 0 cell is off.
* The game plays a number of steps (given by the input), printing to the screen each time. 'x' printed
* means on, space means off.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef unsigned char bool_t;
typedef unsigned char cell_t;
#define TILE_SIZE 16
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
cell_t *allocate_board_flat(int flat_size) {
cell_t *board = (cell_t *) malloc(sizeof(cell_t) * flat_size);
return board;
}
__device__ __inline__ int adjacent_to(const cell_t *d_board, int size, int i, int j) {
int k, l, count = 0;
int sk = (i > 0) ? i - 1 : i;
int ek = (i + 1 < size) ? i + 1 : i;
int sl = (j > 0) ? j - 1 : j;
int el = (j + 1 < size) ? j + 1 : j;
for (k = sk; k <= ek; k++)
for (l = sl; l <= el; l++)
count += d_board[l * size + k];
count -= d_board[j * size + i];
return count;
}
__global__ void playKernel(cell_t *d_board, cell_t *d_newboard, int size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < size && col < size) {
int a = adjacent_to(d_board, size, col, row);
if (a == 2)
d_newboard[row * size + col] = d_board[row * size + col];
if (a == 3)
d_newboard[row * size + col] = 1;
if (a < 2)
d_newboard[row * size + col] = 0;
if (a > 3)
d_newboard[row * size + col] = 0;
}
}
/* print the life board */
void print_flat(cell_t *board, int size) {
int i, j;
/* for each row */
for (j = 0; j < size; j++) {
/* print each column position... */
for (i = 0; i < size; i++)
printf("%c", board[j * size + i] ? 'x' : ' ');
/* followed by a carriage return */
printf("\n");
}
}
/* read a file into the life board */
void read_file_flat(FILE *f, cell_t *board, int size) {
int i, j;
size_t len;
char *s = (char *) malloc(size + 10);
for (j = 0; j < size; j++) {
/* get a string */
fgets(s, size + 10, f);
len = strlen(s) - 1;
/* copy the string to the life board */
for (i = 0; i < size; i++) {
board[j * size + i] = i < len ? s[i] == 'x' : 0;
}
}
}
int main(int argc, char *argv[]) {
// Host variables
int size, flat_size, steps, i, grid_size;
FILE *f_in;
cell_t *h_prev;
bool_t writeOutput = 1, evenSteps;
// Device variables
cell_t *d_prev, *d_next;
f_in = stdin;
// Read the input file and write its content in the host array
fscanf(f_in, "%d %d", &size, &steps);
flat_size = size * size;
evenSteps = steps % 2 == 0;
h_prev = allocate_board_flat(flat_size);
read_file_flat(f_in, h_prev, size);
fclose(f_in);
grid_size = int(ceil((float) size / TILE_SIZE));
dim3 dimGrid(grid_size, grid_size, 1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// Allocate device arrays
gpuErrchk(hipMalloc((void **) &d_prev, flat_size * sizeof(cell_t)));
gpuErrchk(hipMalloc((void **) &d_next, flat_size * sizeof(cell_t)));
// Copy the data from the host array to the device array
gpuErrchk(hipMemcpy(d_prev, h_prev, flat_size * sizeof(cell_t), hipMemcpyHostToDevice));
for (i = 0; i < int(ceil((float) steps / 2)); i++) {
// printf("Step: %d\n", 2 * i);
// Instead of using hipMemcpy and a buffer or swapping pointers,
// run the same kernel with the variables inverted
hipLaunchKernelGGL(( playKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_prev, d_next, size);
if (evenSteps || (2 * i + 1) < steps) {
// printf("Step: %d\n", 2 * i + 1);
hipLaunchKernelGGL(( playKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_next, d_prev, size);
}
}
// Copy data back from the device array to the host array
gpuErrchk(hipMemcpy(h_prev, evenSteps ? d_prev : d_next, flat_size * sizeof(cell_t), hipMemcpyDeviceToHost))
// Deallocate device arrays
gpuErrchk(hipFree(d_next));
gpuErrchk(hipFree(d_prev));
if (writeOutput) {
print_flat(h_prev, size);
}
free(h_prev);
return EXIT_SUCCESS;
}
| 8113a5dab2c25f9462769015a5eb354107b1cc15.cu | /*
* The Game of Life
*
* a cell is born, if it has exactly three neighbours
* a cell dies of loneliness, if it has less than two neighbours
* a cell dies of overcrowding, if it has more than three neighbours
* a cell survives to the next generation, if it does not die of loneliness
* or overcrowding
*
* In this version, a 2D array of ints is used. A 1 cell is on, a 0 cell is off.
* The game plays a number of steps (given by the input), printing to the screen each time. 'x' printed
* means on, space means off.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef unsigned char bool_t;
typedef unsigned char cell_t;
#define TILE_SIZE 16
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
cell_t *allocate_board_flat(int flat_size) {
cell_t *board = (cell_t *) malloc(sizeof(cell_t) * flat_size);
return board;
}
__device__ __inline__ int adjacent_to(const cell_t *d_board, int size, int i, int j) {
int k, l, count = 0;
int sk = (i > 0) ? i - 1 : i;
int ek = (i + 1 < size) ? i + 1 : i;
int sl = (j > 0) ? j - 1 : j;
int el = (j + 1 < size) ? j + 1 : j;
for (k = sk; k <= ek; k++)
for (l = sl; l <= el; l++)
count += d_board[l * size + k];
count -= d_board[j * size + i];
return count;
}
__global__ void playKernel(cell_t *d_board, cell_t *d_newboard, int size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < size && col < size) {
int a = adjacent_to(d_board, size, col, row);
if (a == 2)
d_newboard[row * size + col] = d_board[row * size + col];
if (a == 3)
d_newboard[row * size + col] = 1;
if (a < 2)
d_newboard[row * size + col] = 0;
if (a > 3)
d_newboard[row * size + col] = 0;
}
}
/* print the life board */
void print_flat(cell_t *board, int size) {
int i, j;
/* for each row */
for (j = 0; j < size; j++) {
/* print each column position... */
for (i = 0; i < size; i++)
printf("%c", board[j * size + i] ? 'x' : ' ');
/* followed by a carriage return */
printf("\n");
}
}
/* read a file into the life board */
void read_file_flat(FILE *f, cell_t *board, int size) {
int i, j;
size_t len;
char *s = (char *) malloc(size + 10);
for (j = 0; j < size; j++) {
/* get a string */
fgets(s, size + 10, f);
len = strlen(s) - 1;
/* copy the string to the life board */
for (i = 0; i < size; i++) {
board[j * size + i] = i < len ? s[i] == 'x' : 0;
}
}
}
int main(int argc, char *argv[]) {
// Host variables
int size, flat_size, steps, i, grid_size;
FILE *f_in;
cell_t *h_prev;
bool_t writeOutput = 1, evenSteps;
// Device variables
cell_t *d_prev, *d_next;
f_in = stdin;
// Read the input file and write its content in the host array
fscanf(f_in, "%d %d", &size, &steps);
flat_size = size * size;
evenSteps = steps % 2 == 0;
h_prev = allocate_board_flat(flat_size);
read_file_flat(f_in, h_prev, size);
fclose(f_in);
grid_size = int(ceil((float) size / TILE_SIZE));
dim3 dimGrid(grid_size, grid_size, 1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// Allocate device arrays
gpuErrchk(cudaMalloc((void **) &d_prev, flat_size * sizeof(cell_t)));
gpuErrchk(cudaMalloc((void **) &d_next, flat_size * sizeof(cell_t)));
// Copy the data from the host array to the device array
gpuErrchk(cudaMemcpy(d_prev, h_prev, flat_size * sizeof(cell_t), cudaMemcpyHostToDevice));
for (i = 0; i < int(ceil((float) steps / 2)); i++) {
// printf("Step: %d\n", 2 * i);
// Instead of using cudaMemcpy and a buffer or swapping pointers,
// run the same kernel with the variables inverted
playKernel<<<dimGrid, dimBlock>>>(d_prev, d_next, size);
if (evenSteps || (2 * i + 1) < steps) {
// printf("Step: %d\n", 2 * i + 1);
playKernel<<<dimGrid, dimBlock>>>(d_next, d_prev, size);
}
}
// Copy data back from the device array to the host array
gpuErrchk(cudaMemcpy(h_prev, evenSteps ? d_prev : d_next, flat_size * sizeof(cell_t), cudaMemcpyDeviceToHost))
// Deallocate device arrays
gpuErrchk(cudaFree(d_next));
gpuErrchk(cudaFree(d_prev));
if (writeOutput) {
print_flat(h_prev, size);
}
free(h_prev);
return EXIT_SUCCESS;
}
|
8456bb58a8816e49f7a11074afc826ac969f0020.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "WCSPHSolver.cuh"
#include "utils/handler.h"
#include <helper_math.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
//#define DEBUG
//#define CONFINE_RANDOM
#define CUDA_MEMCPY_ASYNC
#define CUDA_MEMSET_ASYNC
//#define DYNAMIC_VELOCITY_MINMAX
const int kCudaSortArrayCount = 4;
#if defined(CUDA_MEMCPY_ASYNC) || defined(CUDA_MEMCPY_ASYNC)
const int kCudaMemcpyTime = 7;
#endif // defined(CUDA_MEMCPY_ASYNC) || defined(CUDA_MEMCPY_ASYNC)
////////////////////////////////////////////////////////////////////////////////
// Device array declare
////////////////////////////////////////////////////////////////////////////////
WCSPHSystem* sph_device = NULL;
int* particle_zidx = NULL; // each particle belongs to which zone
int* zone_pidx = NULL; // first particle index in one zone
hiprandState_t* devStates = NULL;
float3* color = NULL; // color of particles
float3* cur_pos = NULL;
float3* next_pos = NULL;
float* density = NULL;
float* delta_density = NULL;
float* pressure = NULL;
float3* delta_pressure = NULL;
float3* delta_viscosity = NULL;
float* velo_min = NULL;
float* velo_max = NULL;
float* velocity_len = NULL;
float3* velocity = NULL;
float3* delta_velocity = NULL;
////////////////////////////////////////////////////////////////////////////////
//
// Init CUDA Device System
//
////////////////////////////////////////////////////////////////////////////////
void InitDeviceSystem(WCSPHSystem* para, float* dens_init, float3* pos_init, float3* velo_init) {
#ifdef DEBUG
std::cout << "Do InitDeviceSystem" << std::endl;
#endif // DEBUG
int num = para->particle_num;
#if defined (CUDA_MEMCPY_ASYNC) || defined (CUDA_MEMSET_ASYNC)
hipStream_t stream[kCudaMemcpyTime];
int streamnum = 0;
for (int i = 0; i < kCudaMemcpyTime; i++) {
checkCudaErrors(hipStreamCreate(&stream[i]));
}
#endif // CUDA_MEMCPY_ASYNC || CUDA_MEMSET_ASYNC
checkCudaErrors(hipMalloc((void**)&sph_device, sizeof(WCSPHSystem)));
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(hipMemcpyAsync(sph_device, para, sizeof(WCSPHSystem), hipMemcpyHostToDevice, stream[streamnum++]));
#else
checkCudaErrors(hipMemcpy(sph_device, para, sizeof(WCSPHSystem), hipMemcpyHostToDevice));
#endif // CUDA_MEMCPY_ASYNC
checkCudaErrors(hipMalloc((void**)&particle_zidx, kCudaSortArrayCount * num * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&zone_pidx, (para->zone_size + 1) * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&devStates, num * sizeof(hiprandState_t)));
checkCudaErrors(hipMalloc((void**)&color, num * sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&cur_pos, num * sizeof(float3)));
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(hipMemcpyAsync(cur_pos, pos_init, num * sizeof(float3), hipMemcpyHostToDevice, stream[streamnum++]));
#else
checkCudaErrors(hipMemcpy(cur_pos, pos_init, num * sizeof(float3), hipMemcpyHostToDevice));
#endif // CUDA_MEMCPY_ASYNC
checkCudaErrors(hipMalloc((void**)&next_pos, num * sizeof(float3)));
#ifdef CUDA_MEMSET_ASYNC
checkCudaErrors(hipMemsetAsync(next_pos, 0, num * sizeof(float3), stream[3]));
#else
checkCudaErrors(hipMemset(next_pos, 0, num * sizeof(float3)));
#endif // CUDA_MEMSET_ASYNC
checkCudaErrors(hipMalloc((void**)&density, num * sizeof(float)));
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(hipMemcpyAsync(density, dens_init, num * sizeof(float), hipMemcpyHostToDevice, stream[streamnum++]));
#else
checkCudaErrors(hipMemcpy(density, dens_init, num * sizeof(float), hipMemcpyHostToDevice));
#endif // CUDA_MEMCPY_ASYNC
checkCudaErrors(hipMalloc((void**)&delta_density, num * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&pressure, num * sizeof(float)));
#ifdef CUDA_MEMSET_ASYNC
checkCudaErrors(hipMemsetAsync(pressure, 0, num * sizeof(float), stream[5]));
#else
checkCudaErrors(hipMemset(pressure, 0, num * sizeof(float)));
#endif // CUDA_MEMSET_ASYNC
checkCudaErrors(hipMalloc((void**)&delta_pressure, num * sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&delta_viscosity, num * sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&velo_min, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&velo_max, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&velocity_len, num * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&velocity, num * sizeof(float3)));
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(hipMemcpyAsync(velocity, velo_init, num * sizeof(float3), hipMemcpyHostToDevice, stream[streamnum++]));
#else
checkCudaErrors(hipMemcpy(velocity, velo_init, num * sizeof(float3), hipMemcpyHostToDevice));
#endif // CUDA_MEMCPY_ASYNC
checkCudaErrors(hipMalloc((void**)&delta_velocity, num * sizeof(float3)));
#if defined (CUDA_MEMCPY_ASYNC) || defined (CUDA_MEMSET_ASYNC)
for (int i = 0; i < kCudaMemcpyTime; i++) {
checkCudaErrors(hipStreamSynchronize(stream[i]));
checkCudaErrors(hipStreamDestroy(stream[i]));
}
#endif // CUDA_MEMCPY_ASYNC || CUDA_MEMSET_ASYNC
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
#ifdef DEBUG
std::cout << "Finish InitDeviceSystem" << std::endl;
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Free CUDA Device System
//
////////////////////////////////////////////////////////////////////////////////
void FreeDeviceSystem(WCSPHSystem* para) {
#ifdef DEBUG
std::cout << "Do FreeDeviceSystem" << std::endl;
#endif // DEBUG
delete para;
checkCudaErrors(hipFree(sph_device));
checkCudaErrors(hipFree(particle_zidx));
checkCudaErrors(hipFree(zone_pidx));
checkCudaErrors(hipFree(devStates));
checkCudaErrors(hipFree(color));
checkCudaErrors(hipFree(cur_pos));
checkCudaErrors(hipFree(next_pos));
checkCudaErrors(hipFree(density));
checkCudaErrors(hipFree(delta_density));
checkCudaErrors(hipFree(pressure));
checkCudaErrors(hipFree(delta_pressure));
checkCudaErrors(hipFree(delta_viscosity));
checkCudaErrors(hipFree(velo_min));
checkCudaErrors(hipFree(velo_max));
checkCudaErrors(hipFree(velocity_len));
checkCudaErrors(hipFree(velocity));
checkCudaErrors(hipFree(delta_velocity));
#ifdef DEBUG
std::cout << "Finish InitDeviceSystem" << std::endl;
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
// CUDA function are implemented here
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Compute which zone each particle belongs to
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ComputeZoneIdx( WCSPHSystem* para,
int* particle_zidx, int* zone_pidx,
float3* cur_pos) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ComputeBid\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// compute zone_idx for each particle i
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
// compute particle position inside which zidx zone
int3 zidx = make_int3(cur_pos[i] / para->zone_length);
particle_zidx[i] = MapIndex3DTo1D(zidx, para->zone_dim);
atomicAdd(&zone_pidx[particle_zidx[i]], 1);
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ComputeBid\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Use Radix sort to place particle in zone order
//
////////////////////////////////////////////////////////////////////////////////
__global__ void SortParticles( WCSPHSystem* para,
int* particle_zidx, int* zone_pidx,
float* density, float* pressure,
float3* cur_pos, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do SortParticles\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
int num = para->particle_num;
if (threadIdx.x == 0) {
thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 0, particle_zidx + num * 1, cur_pos);
}
else if (threadIdx.x == 1) {
thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 1, particle_zidx + num * 2, density);
}
else if (threadIdx.x == 2) {
thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 2, particle_zidx + num * 3, pressure);
}
else if (threadIdx.x == 3) {
thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 3, particle_zidx + num * 4, velocity);
}
//else if (threadIdx.x == 4) {
// thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 4, particle_zidx + num * 5, next_pos);
//}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish SortParticles\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Compute delta value of density, pressure and viscosity for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ComputeDeltaValue( WCSPHSystem* para,
int* zone_pidx,
float* delta_density, float* density, float* pressure,
float3* cur_pos, float3* delta_pressure, float3* delta_viscosity, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ComputeDeltaValue\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
int3 zidx = make_int3(cur_pos[i] / para->zone_length);
// Initialize
delta_density[i] = 0.0;
delta_pressure[i] = make_float3(0, 0, 0);
delta_viscosity[i] = make_float3(0, 0, 0);
// for each neighbor zone
for (int ii = 0; ii < 27; ii++) {
int3 zidx_neighb = zidx + make_int3(ii / 9 - 1, (ii % 9) / 3 - 1, ii % 3 - 1); // neighbor zone index
int zidx_neighb1D = MapIndex3DTo1D(zidx_neighb, para->zone_dim);
if (zidx_neighb1D < 0) continue; // index not valid
// find neighbour particle[j]
#pragma unroll
for (int j = zone_pidx[zidx_neighb1D]; j < zone_pidx[zidx_neighb1D + 1]; j++) {
if (i == j) continue;
float3 vec_ij = cur_pos[i] - cur_pos[j];
float len_ij = Norm2(vec_ij);
len_ij = fmaxf(len_ij, M_EPS);
//float pol_ker = Poly6Kernel(para->dim, len_ij, para->h, para->poly6_factor);
//float spi_ker = SpikyGradientKernel(para->dim, len_ij, para->h, para->spiky_grad_factor);
float cub_ker = CubicSplineKernel(para->dim, len_ij, para->h, para->cubic_factor3D);
float cub_ker_deri = CubicSplineKernelDerivative(para->dim, len_ij, para->h, para->cubic_factor3D);
// Density (Continuity equation, summation approach)
delta_density[i] += para->mass * cub_ker;
//// Density (Continuity equation, differential update)
//delta_density[i] += para->mass * cub_ker_deri * dot((velocity[i] - velocity[j]), (vec_ij / len_ij));
// Pressure (Momentum equation)
delta_pressure[i] -= para->mass * cub_ker_deri * (vec_ij / len_ij) *
(pressure[i] / fmaxf(M_EPS, pow(density[i], 2)) + pressure[j] / fmaxf(M_EPS, pow(density[j], 2)));
// Viscosity
float v_ij = dot(velocity[i] - velocity[j], vec_ij);
if (v_ij < 0) {
float viscous = -2.0 * para->alpha * para->h * para->C_s / fmaxf(M_EPS, density[i] + density[j]);
delta_viscosity[i] -= para->mass * cub_ker_deri * (vec_ij / len_ij) *
viscous * v_ij / fmaxf(M_EPS, pow(len_ij, 2) + 0.01 * pow(para->h, 2));
}
}
}
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ComputeDeltaValue\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Compute delta_velocity and velocity using delta_pressure and delta_viscosity for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ComputeVelocity( WCSPHSystem* para,
float3* cur_pos, float3* delta_pressure, float3* delta_viscosity, float3* delta_velocity, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ComputeVelocity\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
float3 G = make_float3(0, para->gravity, 0);
// velocity (Momentum equation)
/*if (Norm2(delta_pressure[i]) > 2000)
delta_pressure[i] = delta_pressure[i] / Norm2(delta_pressure[i]) * 2000;*/
delta_velocity[i] = delta_pressure[i] + delta_viscosity[i] + G;
velocity[i] += para->time_delta * delta_velocity[i];
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ComputeVelocity\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Compute new position using velocity for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ComputePosition( WCSPHSystem* para,
float3* cur_pos, float3* next_pos, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ComputePosition\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
next_pos[i] = cur_pos[i] + para->time_delta * velocity[i];
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ComputePosition\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// If particle exceed the boundary, confine it to the inside, change the velocity and position
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ConfineToBoundary( WCSPHSystem* para, hiprandState_t* devStates,
float3* cur_pos, float3* next_pos, float3* velocity) {
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
// change position if outside
float3 bmin = make_float3(para->particle_radius);
float3 bmax = para->box_length - para->particle_radius;
#ifdef CONFINE_RANDOM
if (next_pos[i].x <= bmin.x) {
next_pos[i].x = bmin.x + M_EPS * cudaRandomFloat(devStates, i);
}
else if (next_pos[i].x >= bmax.x) {
next_pos[i].x = bmax.x - M_EPS * cudaRandomFloat(devStates, i);
}
if (next_pos[i].y <= bmin.y) {
next_pos[i].y = bmin.y + M_EPS * cudaRandomFloat(devStates, i);
}
else if (next_pos[i].y >= bmax.y) {
next_pos[i].y = bmax.y - M_EPS * cudaRandomFloat(devStates, i);
}
if (next_pos[i].z <= bmin.z) {
next_pos[i].z = bmin.z + M_EPS * cudaRandomFloat(devStates, i);
}
else if (next_pos[i].z >= bmax.z) {
next_pos[i].z = bmax.z - M_EPS * cudaRandomFloat(devStates, i);
}
// change velocity
velocity[i] = (next_pos[i] - cur_pos[i]) / para->time_delta;
#else
float ETA = para->eta;
if (next_pos[i].x <= bmin.x) {
next_pos[i].x = min(bmax.x, bmin.x + (bmin.x - next_pos[i].x) * ETA);
velocity[i].x = -velocity[i].x * ETA;
}
else if (next_pos[i].x >= bmax.x) {
next_pos[i].x = max(bmin.x, bmax.x - (next_pos[i].x - bmax.x) * ETA);
velocity[i].x = -velocity[i].x * ETA;
}
if (next_pos[i].y <= bmin.y) {
next_pos[i].y = min(bmax.y, bmin.y + (bmin.y - next_pos[i].y) * ETA);
velocity[i].y = -velocity[i].y * ETA;
}
else if (next_pos[i].y >= bmax.y) {
next_pos[i].y = max(bmin.y, bmax.y - (next_pos[i].y - bmax.y));
velocity[i].y = -velocity[i].y * ETA;
}
if (next_pos[i].z <= bmin.z) {
next_pos[i].z = min(bmax.z, bmin.z + (bmin.z - next_pos[i].z) * ETA);
velocity[i].z = -velocity[i].z * ETA;
}
else if (next_pos[i].z >= bmax.z) {
next_pos[i].z = max(bmin.z, bmax.z - (next_pos[i].z - bmax.z) * ETA);
velocity[i].z = -velocity[i].z * ETA;
}
#endif // CONFINE_RANDOM
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
}
////////////////////////////////////////////////////////////////////////////////
//
// Update the new density, pressure, velocity and position for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void UpdateParticles( WCSPHSystem* para,
float* delta_density, float* density, float* pressure, float* velocity_len,
float3* cur_pos, float3* next_pos, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do UpdateParticles\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
//// Density (Continuity equation, summation approach)
//density[i] = delta_density[i];
// Density (Continuity equation, differential update)
density[i] += para->time_delta * delta_density[i];
// Pressure update function (Taits equation)
//pressure[i] = para->rho_0 * pow(para->C_s, 2) / para->gamma * (pow(density[i] / para->rho_0, para->gamma) - 1.0);
pressure[i] = PressureUpdate(density[i], para->rho_0, para->C_s, para->gamma);
velocity[i] *= (1.0 - para->f_air); // air resistence
velocity_len[i] = Norm2(velocity[i]);
cur_pos[i] = next_pos[i];
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish UpdateParticles\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Use for debug, output the variable value on gpu
//
////////////////////////////////////////////////////////////////////////////////
__global__ void DebugOutput( WCSPHSystem* para,
int* particle_zidx, int* zone_pidx,
float* delta_density, float* density, float* pressure,
float3* cur_pos, float3* next_pos, float3* delta_pressure, float3* delta_viscocity, float3* delta_velocity, float3* velocity) {
//for (int i = 0; i < para->zone_size; i++) {
// printf("Zone #%d:", i);
// printf(" \n\t zone pidx: %d\n", zone_pidx[i]);
// printf("\n");
//}
for (int i = 0; i < para->particle_num; i++) {
printf("Particle #%d:", i);
printf("\n\t particle_bid: %d\n\t cur_pos (%f, %f, %f)\n\t next_pos (%f, %f, %f)\n", particle_zidx[i], cur_pos[i].x, cur_pos[i].y, cur_pos[i].z, next_pos[i].x, next_pos[i].y, next_pos[i].z);
printf("\n\t delta_density (%f)\n\t delta_pressure (%f, %f, %f)\n\t delta_viscosity (%f, %f, %f)\n\t delta_velocity (%f, %f, %f)\n", delta_density[i], delta_pressure[i].x, delta_pressure[i].y, delta_pressure[i].z, delta_viscocity[i].x, delta_viscocity[i].y, delta_viscocity[i].z, delta_velocity[i].x, delta_velocity[i].y, delta_velocity[i].z);
printf("\n\t density (%f)\n\t pressure (%f)\n\t velocity (%f, %f, %f)\n", density[i], pressure[i], velocity[i].x, velocity[i].y, velocity[i].z);
printf("\n");
}
}
////////////////////////////////////////////////////////////////////////////////
//
// Smartly choose the time step to calculate
//
////////////////////////////////////////////////////////////////////////////////
__global__ void AdaptiveStep( WCSPHSystem* para,
float* density,
float3* delta_velocity, float3* velocity) {
float max_v = FLT_MIN;
float max_a = FLT_MIN;
float max_r = FLT_MIN;
for (int i = 0; i < para->particle_num; i++) {
if (Norm2(velocity[i]) > max_v) {
max_v = Norm2(velocity[i]);
}
if (Norm2(delta_velocity[i]) > max_a) {
max_a = Norm2(delta_velocity[i]);
}
if (density[i] > max_r) {
max_r = density[i];
}
}
float dt_cfl = para->CFL_v * para->h / max_v;
float dt_f = para->CFL_a * sqrt(para->h / max_a);
float dt_a = 0.2 * para->h / (para->C_s * pow(sqrt(max_r / para->rho_0), para->gamma));
para->time_delta = fminf(dt_cfl, fminf(dt_f, dt_a));
}
////////////////////////////////////////////////////////////////////////////////
//
// Find maximum and minimum value of velocity_len for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void FindVelocityLenMinMax(unsigned int blockSize, float* velocity_len, float* g_odata, unsigned int num, bool findmin) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do FindVelocityLenMinMax\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
if (findmin)
sdata[tid] = 1e20;
else sdata[tid] = 0;
pfunc func = find_minmax[findmin];
while (i < num) {
sdata[tid] = func(sdata[tid], velocity_len[i]);
if (i + blockSize < num)
sdata[tid] = func(sdata[tid], velocity_len[i + blockSize]);
i += gridSize;
}
__syncthreads();
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = func(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = func(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = func(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) { FindMinMaxWarpReduce(blockSize, sdata, tid, func); }
if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; }
#ifdef DEBUG
if (tid == 0) { printf("velocity_%s: %f\n", findmin ? "min" : "max", g_odata[blockIdx.x]); }
#endif // DEBUG
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish FindVelocityLenMinMax\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Export particle information to VBO for drawing, blue(0, 0, 1) is slow, white(1, 1, 1) is fast
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ExportParticleInfo( WCSPHSystem* para,
float* velocity_len, float* velo_min, float* velo_max,
float3* cur_pos, float3* pos_info, float3* color_info) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ExportParticleInfo\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
pos_info[i] = cur_pos[i] - para->box_length / 2;
#ifdef DYNAMIC_VELOCITY_MINMAX
// use dynamic velocity min max, focus on relative velocity changing between particles
float percent = NormalizeTo01(velocity_len[i], *velo_min, *velo_max);
#else
// use set velocity min max, focus on overall velocity changing between different systems
float percent = NormalizeTo01(velocity_len[i], para->velo_draw_min, para->velo_draw_max);
#endif
color_info[i] = make_float3(percent, percent, 1.0);
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ExportParticleInfo\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Get next frame information
//
////////////////////////////////////////////////////////////////////////////////
void getNextFrame(WCSPHSystem* para, cudaGraphicsResource* position_resource, cudaGraphicsResource* color_resource) {
dim3 blocks(para->grid_size);
dim3 threads(para->block_size);
unsigned int num = para->particle_num;
for (int i = 0; i < para->step_each_frame; i++) {
//DebugOutput <<<1, 1 >>> (sph_device, particle_bid, block_pidx, block_pnum, delta_density, density, pressure, cur_pos, next_pos, delta_pressure, delta_viscosity, delta_velocity, velocity);
//hipDeviceSynchronize();
//thrust::fill(thrust::device, zone_pidx, zone_pidx + para->zone_size + 1, 0);
#ifdef CUDA_MEMSET_ASYNC
checkCudaErrors(hipMemsetAsync(zone_pidx, 0, (para->zone_size + 1) * sizeof(int)));
#else
checkCudaErrors(hipMemset(zone_pidx, 0, (para->zone_size + 1) * sizeof(int)));
#endif // CUDA_MEMSET_ASYNC
hipLaunchKernelGGL(( ComputeZoneIdx) , dim3(blocks), dim3(threads) , 0, 0, sph_device, particle_zidx, zone_pidx, cur_pos);
hipDeviceSynchronize();
#ifdef CUDA_MEMCPY_ASYNC
hipStream_t stream[kCudaSortArrayCount];
#endif // CUDA_MEMCPY_ASYNC
for (int k = 1; k < kCudaSortArrayCount; k++) {
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(hipStreamCreate(&stream[k]));
checkCudaErrors(hipMemcpyAsync(particle_zidx + num * k, particle_zidx, num * sizeof(int), hipMemcpyDeviceToDevice, stream[k]));
#else
checkCudaErrors(hipMemcpy(particle_zidx + num * k, particle_zidx, num * sizeof(int), hipMemcpyDeviceToDevice));
#endif // CUDA_MEMCPY_ASYNC
}
#ifdef CUDA_MEMCPY_ASYNC
for (int k = 1; k < kCudaSortArrayCount; k++) {
checkCudaErrors(hipStreamSynchronize(stream[k]));
checkCudaErrors(hipStreamDestroy(stream[k]));
}
#endif // CUDA_MEMCPY_ASYNC
hipLaunchKernelGGL(( SortParticles) , dim3(1), dim3(kCudaSortArrayCount) , 0, 0, sph_device, particle_zidx, zone_pidx, density, pressure, cur_pos, velocity);
hipDeviceSynchronize();
// get prefix sum, then pidx is the first particle index of this zone
thrust::exclusive_scan(thrust::device, zone_pidx, zone_pidx + para->zone_size + 1, zone_pidx);
hipLaunchKernelGGL(( ComputeDeltaValue) , dim3(blocks), dim3(threads) , 0, 0, sph_device, zone_pidx, delta_density, density, pressure, cur_pos, delta_pressure, delta_viscosity, velocity);
hipDeviceSynchronize();
hipLaunchKernelGGL(( ComputeVelocity) , dim3(blocks), dim3(threads) , 0, 0, sph_device, cur_pos, delta_pressure, delta_viscosity, delta_velocity, velocity);
hipDeviceSynchronize();
hipLaunchKernelGGL(( ComputePosition) , dim3(blocks), dim3(threads) , 0, 0, sph_device, cur_pos, next_pos, velocity);
hipDeviceSynchronize();
hipLaunchKernelGGL(( ConfineToBoundary) , dim3(blocks), dim3(threads) , 0, 0, sph_device, devStates, cur_pos, next_pos, velocity);
hipDeviceSynchronize();
hipLaunchKernelGGL(( UpdateParticles) , dim3(blocks), dim3(threads) , 0, 0, sph_device, delta_density, density, pressure, velocity_len, cur_pos, next_pos, velocity);
hipDeviceSynchronize();
}
float3* pos_info;
float3* color_info;
checkCudaErrors(hipGraphicsMapResources(1, &position_resource));
checkCudaErrors(hipGraphicsMapResources(1, &color_resource));
hipDeviceSynchronize();
size_t pbytes, cbytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&pos_info, &pbytes, position_resource));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&color_info, &cbytes, color_resource));
hipDeviceSynchronize();
#ifdef DYNAMIC_VELOCITY_MINMAX
unsigned int thread_num = para->block_size;
hipLaunchKernelGGL(( FindVelocityLenMinMax) , dim3(1), dim3(threads), thread_num * sizeof(float) , 0, thread_num, velocity_len, velo_min, num, true); // find min
hipDeviceSynchronize();
hipLaunchKernelGGL(( FindVelocityLenMinMax) , dim3(1), dim3(threads), thread_num * sizeof(float) , 0, thread_num, velocity_len, velo_max, num, false); // find max
hipDeviceSynchronize();
#endif // DYNAMIC_VELOCITY_MINMAX
hipLaunchKernelGGL(( ExportParticleInfo) , dim3(blocks), dim3(threads) , 0, 0, sph_device, velocity_len, velo_min, velo_max, cur_pos, pos_info, color_info);
hipDeviceSynchronize();
checkCudaErrors(hipGraphicsUnmapResources(1, &position_resource));
checkCudaErrors(hipGraphicsUnmapResources(1, &color_resource));
hipDeviceSynchronize();
}
| 8456bb58a8816e49f7a11074afc826ac969f0020.cu | #include "WCSPHSolver.cuh"
#include "utils/handler.h"
#include <helper_math.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
//#define DEBUG
//#define CONFINE_RANDOM
#define CUDA_MEMCPY_ASYNC
#define CUDA_MEMSET_ASYNC
//#define DYNAMIC_VELOCITY_MINMAX
const int kCudaSortArrayCount = 4;
#if defined(CUDA_MEMCPY_ASYNC) || defined(CUDA_MEMCPY_ASYNC)
const int kCudaMemcpyTime = 7;
#endif // defined(CUDA_MEMCPY_ASYNC) || defined(CUDA_MEMCPY_ASYNC)
////////////////////////////////////////////////////////////////////////////////
// Device array declare
////////////////////////////////////////////////////////////////////////////////
WCSPHSystem* sph_device = NULL;
int* particle_zidx = NULL; // each particle belongs to which zone
int* zone_pidx = NULL; // first particle index in one zone
curandState* devStates = NULL;
float3* color = NULL; // color of particles
float3* cur_pos = NULL;
float3* next_pos = NULL;
float* density = NULL;
float* delta_density = NULL;
float* pressure = NULL;
float3* delta_pressure = NULL;
float3* delta_viscosity = NULL;
float* velo_min = NULL;
float* velo_max = NULL;
float* velocity_len = NULL;
float3* velocity = NULL;
float3* delta_velocity = NULL;
////////////////////////////////////////////////////////////////////////////////
//
// Init CUDA Device System
//
////////////////////////////////////////////////////////////////////////////////
void InitDeviceSystem(WCSPHSystem* para, float* dens_init, float3* pos_init, float3* velo_init) {
#ifdef DEBUG
std::cout << "Do InitDeviceSystem" << std::endl;
#endif // DEBUG
int num = para->particle_num;
#if defined (CUDA_MEMCPY_ASYNC) || defined (CUDA_MEMSET_ASYNC)
cudaStream_t stream[kCudaMemcpyTime];
int streamnum = 0;
for (int i = 0; i < kCudaMemcpyTime; i++) {
checkCudaErrors(cudaStreamCreate(&stream[i]));
}
#endif // CUDA_MEMCPY_ASYNC || CUDA_MEMSET_ASYNC
checkCudaErrors(cudaMalloc((void**)&sph_device, sizeof(WCSPHSystem)));
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaMemcpyAsync(sph_device, para, sizeof(WCSPHSystem), cudaMemcpyHostToDevice, stream[streamnum++]));
#else
checkCudaErrors(cudaMemcpy(sph_device, para, sizeof(WCSPHSystem), cudaMemcpyHostToDevice));
#endif // CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaMalloc((void**)&particle_zidx, kCudaSortArrayCount * num * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&zone_pidx, (para->zone_size + 1) * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&devStates, num * sizeof(curandState)));
checkCudaErrors(cudaMalloc((void**)&color, num * sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&cur_pos, num * sizeof(float3)));
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaMemcpyAsync(cur_pos, pos_init, num * sizeof(float3), cudaMemcpyHostToDevice, stream[streamnum++]));
#else
checkCudaErrors(cudaMemcpy(cur_pos, pos_init, num * sizeof(float3), cudaMemcpyHostToDevice));
#endif // CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaMalloc((void**)&next_pos, num * sizeof(float3)));
#ifdef CUDA_MEMSET_ASYNC
checkCudaErrors(cudaMemsetAsync(next_pos, 0, num * sizeof(float3), stream[3]));
#else
checkCudaErrors(cudaMemset(next_pos, 0, num * sizeof(float3)));
#endif // CUDA_MEMSET_ASYNC
checkCudaErrors(cudaMalloc((void**)&density, num * sizeof(float)));
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaMemcpyAsync(density, dens_init, num * sizeof(float), cudaMemcpyHostToDevice, stream[streamnum++]));
#else
checkCudaErrors(cudaMemcpy(density, dens_init, num * sizeof(float), cudaMemcpyHostToDevice));
#endif // CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaMalloc((void**)&delta_density, num * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&pressure, num * sizeof(float)));
#ifdef CUDA_MEMSET_ASYNC
checkCudaErrors(cudaMemsetAsync(pressure, 0, num * sizeof(float), stream[5]));
#else
checkCudaErrors(cudaMemset(pressure, 0, num * sizeof(float)));
#endif // CUDA_MEMSET_ASYNC
checkCudaErrors(cudaMalloc((void**)&delta_pressure, num * sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&delta_viscosity, num * sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&velo_min, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&velo_max, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&velocity_len, num * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&velocity, num * sizeof(float3)));
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaMemcpyAsync(velocity, velo_init, num * sizeof(float3), cudaMemcpyHostToDevice, stream[streamnum++]));
#else
checkCudaErrors(cudaMemcpy(velocity, velo_init, num * sizeof(float3), cudaMemcpyHostToDevice));
#endif // CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaMalloc((void**)&delta_velocity, num * sizeof(float3)));
#if defined (CUDA_MEMCPY_ASYNC) || defined (CUDA_MEMSET_ASYNC)
for (int i = 0; i < kCudaMemcpyTime; i++) {
checkCudaErrors(cudaStreamSynchronize(stream[i]));
checkCudaErrors(cudaStreamDestroy(stream[i]));
}
#endif // CUDA_MEMCPY_ASYNC || CUDA_MEMSET_ASYNC
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
#ifdef DEBUG
std::cout << "Finish InitDeviceSystem" << std::endl;
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Free CUDA Device System
//
////////////////////////////////////////////////////////////////////////////////
void FreeDeviceSystem(WCSPHSystem* para) {
#ifdef DEBUG
std::cout << "Do FreeDeviceSystem" << std::endl;
#endif // DEBUG
delete para;
checkCudaErrors(cudaFree(sph_device));
checkCudaErrors(cudaFree(particle_zidx));
checkCudaErrors(cudaFree(zone_pidx));
checkCudaErrors(cudaFree(devStates));
checkCudaErrors(cudaFree(color));
checkCudaErrors(cudaFree(cur_pos));
checkCudaErrors(cudaFree(next_pos));
checkCudaErrors(cudaFree(density));
checkCudaErrors(cudaFree(delta_density));
checkCudaErrors(cudaFree(pressure));
checkCudaErrors(cudaFree(delta_pressure));
checkCudaErrors(cudaFree(delta_viscosity));
checkCudaErrors(cudaFree(velo_min));
checkCudaErrors(cudaFree(velo_max));
checkCudaErrors(cudaFree(velocity_len));
checkCudaErrors(cudaFree(velocity));
checkCudaErrors(cudaFree(delta_velocity));
#ifdef DEBUG
std::cout << "Finish InitDeviceSystem" << std::endl;
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
// CUDA function are implemented here
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Compute which zone each particle belongs to
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ComputeZoneIdx( WCSPHSystem* para,
int* particle_zidx, int* zone_pidx,
float3* cur_pos) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ComputeBid\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// compute zone_idx for each particle i
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
// compute particle position inside which zidx zone
int3 zidx = make_int3(cur_pos[i] / para->zone_length);
particle_zidx[i] = MapIndex3DTo1D(zidx, para->zone_dim);
atomicAdd(&zone_pidx[particle_zidx[i]], 1);
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ComputeBid\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Use Radix sort to place particle in zone order
//
////////////////////////////////////////////////////////////////////////////////
__global__ void SortParticles( WCSPHSystem* para,
int* particle_zidx, int* zone_pidx,
float* density, float* pressure,
float3* cur_pos, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do SortParticles\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
int num = para->particle_num;
if (threadIdx.x == 0) {
thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 0, particle_zidx + num * 1, cur_pos);
}
else if (threadIdx.x == 1) {
thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 1, particle_zidx + num * 2, density);
}
else if (threadIdx.x == 2) {
thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 2, particle_zidx + num * 3, pressure);
}
else if (threadIdx.x == 3) {
thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 3, particle_zidx + num * 4, velocity);
}
//else if (threadIdx.x == 4) {
// thrust::stable_sort_by_key(thrust::device, particle_zidx + num * 4, particle_zidx + num * 5, next_pos);
//}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish SortParticles\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Compute delta value of density, pressure and viscosity for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ComputeDeltaValue( WCSPHSystem* para,
int* zone_pidx,
float* delta_density, float* density, float* pressure,
float3* cur_pos, float3* delta_pressure, float3* delta_viscosity, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ComputeDeltaValue\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
int3 zidx = make_int3(cur_pos[i] / para->zone_length);
// Initialize
delta_density[i] = 0.0;
delta_pressure[i] = make_float3(0, 0, 0);
delta_viscosity[i] = make_float3(0, 0, 0);
// for each neighbor zone
for (int ii = 0; ii < 27; ii++) {
int3 zidx_neighb = zidx + make_int3(ii / 9 - 1, (ii % 9) / 3 - 1, ii % 3 - 1); // neighbor zone index
int zidx_neighb1D = MapIndex3DTo1D(zidx_neighb, para->zone_dim);
if (zidx_neighb1D < 0) continue; // index not valid
// find neighbour particle[j]
#pragma unroll
for (int j = zone_pidx[zidx_neighb1D]; j < zone_pidx[zidx_neighb1D + 1]; j++) {
if (i == j) continue;
float3 vec_ij = cur_pos[i] - cur_pos[j];
float len_ij = Norm2(vec_ij);
len_ij = fmaxf(len_ij, M_EPS);
//float pol_ker = Poly6Kernel(para->dim, len_ij, para->h, para->poly6_factor);
//float spi_ker = SpikyGradientKernel(para->dim, len_ij, para->h, para->spiky_grad_factor);
float cub_ker = CubicSplineKernel(para->dim, len_ij, para->h, para->cubic_factor3D);
float cub_ker_deri = CubicSplineKernelDerivative(para->dim, len_ij, para->h, para->cubic_factor3D);
// Density (Continuity equation, summation approach)
delta_density[i] += para->mass * cub_ker;
//// Density (Continuity equation, differential update)
//delta_density[i] += para->mass * cub_ker_deri * dot((velocity[i] - velocity[j]), (vec_ij / len_ij));
// Pressure (Momentum equation)
delta_pressure[i] -= para->mass * cub_ker_deri * (vec_ij / len_ij) *
(pressure[i] / fmaxf(M_EPS, pow(density[i], 2)) + pressure[j] / fmaxf(M_EPS, pow(density[j], 2)));
// Viscosity
float v_ij = dot(velocity[i] - velocity[j], vec_ij);
if (v_ij < 0) {
float viscous = -2.0 * para->alpha * para->h * para->C_s / fmaxf(M_EPS, density[i] + density[j]);
delta_viscosity[i] -= para->mass * cub_ker_deri * (vec_ij / len_ij) *
viscous * v_ij / fmaxf(M_EPS, pow(len_ij, 2) + 0.01 * pow(para->h, 2));
}
}
}
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ComputeDeltaValue\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Compute delta_velocity and velocity using delta_pressure and delta_viscosity for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ComputeVelocity( WCSPHSystem* para,
float3* cur_pos, float3* delta_pressure, float3* delta_viscosity, float3* delta_velocity, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ComputeVelocity\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
float3 G = make_float3(0, para->gravity, 0);
// velocity (Momentum equation)
/*if (Norm2(delta_pressure[i]) > 2000)
delta_pressure[i] = delta_pressure[i] / Norm2(delta_pressure[i]) * 2000;*/
delta_velocity[i] = delta_pressure[i] + delta_viscosity[i] + G;
velocity[i] += para->time_delta * delta_velocity[i];
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ComputeVelocity\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Compute new position using velocity for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ComputePosition( WCSPHSystem* para,
float3* cur_pos, float3* next_pos, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ComputePosition\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
next_pos[i] = cur_pos[i] + para->time_delta * velocity[i];
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ComputePosition\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// If particle exceed the boundary, confine it to the inside, change the velocity and position
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ConfineToBoundary( WCSPHSystem* para, curandState* devStates,
float3* cur_pos, float3* next_pos, float3* velocity) {
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
// change position if outside
float3 bmin = make_float3(para->particle_radius);
float3 bmax = para->box_length - para->particle_radius;
#ifdef CONFINE_RANDOM
if (next_pos[i].x <= bmin.x) {
next_pos[i].x = bmin.x + M_EPS * cudaRandomFloat(devStates, i);
}
else if (next_pos[i].x >= bmax.x) {
next_pos[i].x = bmax.x - M_EPS * cudaRandomFloat(devStates, i);
}
if (next_pos[i].y <= bmin.y) {
next_pos[i].y = bmin.y + M_EPS * cudaRandomFloat(devStates, i);
}
else if (next_pos[i].y >= bmax.y) {
next_pos[i].y = bmax.y - M_EPS * cudaRandomFloat(devStates, i);
}
if (next_pos[i].z <= bmin.z) {
next_pos[i].z = bmin.z + M_EPS * cudaRandomFloat(devStates, i);
}
else if (next_pos[i].z >= bmax.z) {
next_pos[i].z = bmax.z - M_EPS * cudaRandomFloat(devStates, i);
}
// change velocity
velocity[i] = (next_pos[i] - cur_pos[i]) / para->time_delta;
#else
float ETA = para->eta;
if (next_pos[i].x <= bmin.x) {
next_pos[i].x = min(bmax.x, bmin.x + (bmin.x - next_pos[i].x) * ETA);
velocity[i].x = -velocity[i].x * ETA;
}
else if (next_pos[i].x >= bmax.x) {
next_pos[i].x = max(bmin.x, bmax.x - (next_pos[i].x - bmax.x) * ETA);
velocity[i].x = -velocity[i].x * ETA;
}
if (next_pos[i].y <= bmin.y) {
next_pos[i].y = min(bmax.y, bmin.y + (bmin.y - next_pos[i].y) * ETA);
velocity[i].y = -velocity[i].y * ETA;
}
else if (next_pos[i].y >= bmax.y) {
next_pos[i].y = max(bmin.y, bmax.y - (next_pos[i].y - bmax.y));
velocity[i].y = -velocity[i].y * ETA;
}
if (next_pos[i].z <= bmin.z) {
next_pos[i].z = min(bmax.z, bmin.z + (bmin.z - next_pos[i].z) * ETA);
velocity[i].z = -velocity[i].z * ETA;
}
else if (next_pos[i].z >= bmax.z) {
next_pos[i].z = max(bmin.z, bmax.z - (next_pos[i].z - bmax.z) * ETA);
velocity[i].z = -velocity[i].z * ETA;
}
#endif // CONFINE_RANDOM
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
}
////////////////////////////////////////////////////////////////////////////////
//
// Update the new density, pressure, velocity and position for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void UpdateParticles( WCSPHSystem* para,
float* delta_density, float* density, float* pressure, float* velocity_len,
float3* cur_pos, float3* next_pos, float3* velocity) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do UpdateParticles\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
//// Density (Continuity equation, summation approach)
//density[i] = delta_density[i];
// Density (Continuity equation, differential update)
density[i] += para->time_delta * delta_density[i];
// Pressure update function (Taití»s equation)
//pressure[i] = para->rho_0 * pow(para->C_s, 2) / para->gamma * (pow(density[i] / para->rho_0, para->gamma) - 1.0);
pressure[i] = PressureUpdate(density[i], para->rho_0, para->C_s, para->gamma);
velocity[i] *= (1.0 - para->f_air); // air resistence
velocity_len[i] = Norm2(velocity[i]);
cur_pos[i] = next_pos[i];
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish UpdateParticles\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Use for debug, output the variable value on gpu
//
////////////////////////////////////////////////////////////////////////////////
__global__ void DebugOutput( WCSPHSystem* para,
int* particle_zidx, int* zone_pidx,
float* delta_density, float* density, float* pressure,
float3* cur_pos, float3* next_pos, float3* delta_pressure, float3* delta_viscocity, float3* delta_velocity, float3* velocity) {
//for (int i = 0; i < para->zone_size; i++) {
// printf("Zone #%d:", i);
// printf(" \n\t zone pidx: %d\n", zone_pidx[i]);
// printf("\n");
//}
for (int i = 0; i < para->particle_num; i++) {
printf("Particle #%d:", i);
printf("\n\t particle_bid: %d\n\t cur_pos (%f, %f, %f)\n\t next_pos (%f, %f, %f)\n", particle_zidx[i], cur_pos[i].x, cur_pos[i].y, cur_pos[i].z, next_pos[i].x, next_pos[i].y, next_pos[i].z);
printf("\n\t delta_density (%f)\n\t delta_pressure (%f, %f, %f)\n\t delta_viscosity (%f, %f, %f)\n\t delta_velocity (%f, %f, %f)\n", delta_density[i], delta_pressure[i].x, delta_pressure[i].y, delta_pressure[i].z, delta_viscocity[i].x, delta_viscocity[i].y, delta_viscocity[i].z, delta_velocity[i].x, delta_velocity[i].y, delta_velocity[i].z);
printf("\n\t density (%f)\n\t pressure (%f)\n\t velocity (%f, %f, %f)\n", density[i], pressure[i], velocity[i].x, velocity[i].y, velocity[i].z);
printf("\n");
}
}
////////////////////////////////////////////////////////////////////////////////
//
// Smartly choose the time step to calculate
//
////////////////////////////////////////////////////////////////////////////////
__global__ void AdaptiveStep( WCSPHSystem* para,
float* density,
float3* delta_velocity, float3* velocity) {
float max_v = FLT_MIN;
float max_a = FLT_MIN;
float max_r = FLT_MIN;
for (int i = 0; i < para->particle_num; i++) {
if (Norm2(velocity[i]) > max_v) {
max_v = Norm2(velocity[i]);
}
if (Norm2(delta_velocity[i]) > max_a) {
max_a = Norm2(delta_velocity[i]);
}
if (density[i] > max_r) {
max_r = density[i];
}
}
float dt_cfl = para->CFL_v * para->h / max_v;
float dt_f = para->CFL_a * sqrt(para->h / max_a);
float dt_a = 0.2 * para->h / (para->C_s * pow(sqrt(max_r / para->rho_0), para->gamma));
para->time_delta = fminf(dt_cfl, fminf(dt_f, dt_a));
}
////////////////////////////////////////////////////////////////////////////////
//
// Find maximum and minimum value of velocity_len for each particle
//
////////////////////////////////////////////////////////////////////////////////
__global__ void FindVelocityLenMinMax(unsigned int blockSize, float* velocity_len, float* g_odata, unsigned int num, bool findmin) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do FindVelocityLenMinMax\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockSize * 2) + tid;
unsigned int gridSize = blockSize * 2 * gridDim.x;
if (findmin)
sdata[tid] = 1e20;
else sdata[tid] = 0;
pfunc func = find_minmax[findmin];
while (i < num) {
sdata[tid] = func(sdata[tid], velocity_len[i]);
if (i + blockSize < num)
sdata[tid] = func(sdata[tid], velocity_len[i + blockSize]);
i += gridSize;
}
__syncthreads();
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = func(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = func(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = func(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) { FindMinMaxWarpReduce(blockSize, sdata, tid, func); }
if (tid == 0) { g_odata[blockIdx.x] = sdata[0]; }
#ifdef DEBUG
if (tid == 0) { printf("velocity_%s: %f\n", findmin ? "min" : "max", g_odata[blockIdx.x]); }
#endif // DEBUG
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish FindVelocityLenMinMax\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Export particle information to VBO for drawing, blue(0, 0, 1) is slow, white(1, 1, 1) is fast
//
////////////////////////////////////////////////////////////////////////////////
__global__ void ExportParticleInfo( WCSPHSystem* para,
float* velocity_len, float* velo_min, float* velo_max,
float3* cur_pos, float3* pos_info, float3* color_info) {
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Do ExportParticleInfo\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
// for each particle[i]
int i = MapIndex3DTo1D(blockIdx, gridDim) * GetDimTotalSize(blockDim) + threadIdx.x;
while (i < para->particle_num) {
pos_info[i] = cur_pos[i] - para->box_length / 2;
#ifdef DYNAMIC_VELOCITY_MINMAX
// use dynamic velocity min max, focus on relative velocity changing between particles
float percent = NormalizeTo01(velocity_len[i], *velo_min, *velo_max);
#else
// use set velocity min max, focus on overall velocity changing between different systems
float percent = NormalizeTo01(velocity_len[i], para->velo_draw_min, para->velo_draw_max);
#endif
color_info[i] = make_float3(percent, percent, 1.0);
i += GetDimTotalSize(gridDim) * GetDimTotalSize(blockDim); // gridSize * blockSize
}
#ifdef DEBUG
if (blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0 && threadIdx.x == 0)
printf("Block #(%d,%d,%d) Finish ExportParticleInfo\n", blockIdx.x, blockIdx.y, blockIdx.z);
#endif // DEBUG
}
////////////////////////////////////////////////////////////////////////////////
//
// Get next frame information
//
////////////////////////////////////////////////////////////////////////////////
void getNextFrame(WCSPHSystem* para, cudaGraphicsResource* position_resource, cudaGraphicsResource* color_resource) {
dim3 blocks(para->grid_size);
dim3 threads(para->block_size);
unsigned int num = para->particle_num;
for (int i = 0; i < para->step_each_frame; i++) {
//DebugOutput <<<1, 1 >>> (sph_device, particle_bid, block_pidx, block_pnum, delta_density, density, pressure, cur_pos, next_pos, delta_pressure, delta_viscosity, delta_velocity, velocity);
//cudaDeviceSynchronize();
//thrust::fill(thrust::device, zone_pidx, zone_pidx + para->zone_size + 1, 0);
#ifdef CUDA_MEMSET_ASYNC
checkCudaErrors(cudaMemsetAsync(zone_pidx, 0, (para->zone_size + 1) * sizeof(int)));
#else
checkCudaErrors(cudaMemset(zone_pidx, 0, (para->zone_size + 1) * sizeof(int)));
#endif // CUDA_MEMSET_ASYNC
ComputeZoneIdx <<<blocks, threads >>> (sph_device, particle_zidx, zone_pidx, cur_pos);
cudaDeviceSynchronize();
#ifdef CUDA_MEMCPY_ASYNC
cudaStream_t stream[kCudaSortArrayCount];
#endif // CUDA_MEMCPY_ASYNC
for (int k = 1; k < kCudaSortArrayCount; k++) {
#ifdef CUDA_MEMCPY_ASYNC
checkCudaErrors(cudaStreamCreate(&stream[k]));
checkCudaErrors(cudaMemcpyAsync(particle_zidx + num * k, particle_zidx, num * sizeof(int), cudaMemcpyDeviceToDevice, stream[k]));
#else
checkCudaErrors(cudaMemcpy(particle_zidx + num * k, particle_zidx, num * sizeof(int), cudaMemcpyDeviceToDevice));
#endif // CUDA_MEMCPY_ASYNC
}
#ifdef CUDA_MEMCPY_ASYNC
for (int k = 1; k < kCudaSortArrayCount; k++) {
checkCudaErrors(cudaStreamSynchronize(stream[k]));
checkCudaErrors(cudaStreamDestroy(stream[k]));
}
#endif // CUDA_MEMCPY_ASYNC
SortParticles <<<1, kCudaSortArrayCount >>> (sph_device, particle_zidx, zone_pidx, density, pressure, cur_pos, velocity);
cudaDeviceSynchronize();
// get prefix sum, then pidx is the first particle index of this zone
thrust::exclusive_scan(thrust::device, zone_pidx, zone_pidx + para->zone_size + 1, zone_pidx);
ComputeDeltaValue <<<blocks, threads >>> (sph_device, zone_pidx, delta_density, density, pressure, cur_pos, delta_pressure, delta_viscosity, velocity);
cudaDeviceSynchronize();
ComputeVelocity <<<blocks, threads >>> (sph_device, cur_pos, delta_pressure, delta_viscosity, delta_velocity, velocity);
cudaDeviceSynchronize();
ComputePosition <<<blocks, threads >>> (sph_device, cur_pos, next_pos, velocity);
cudaDeviceSynchronize();
ConfineToBoundary <<<blocks, threads >>> (sph_device, devStates, cur_pos, next_pos, velocity);
cudaDeviceSynchronize();
UpdateParticles <<<blocks, threads >>> (sph_device, delta_density, density, pressure, velocity_len, cur_pos, next_pos, velocity);
cudaDeviceSynchronize();
}
float3* pos_info;
float3* color_info;
checkCudaErrors(cudaGraphicsMapResources(1, &position_resource));
checkCudaErrors(cudaGraphicsMapResources(1, &color_resource));
cudaDeviceSynchronize();
size_t pbytes, cbytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&pos_info, &pbytes, position_resource));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&color_info, &cbytes, color_resource));
cudaDeviceSynchronize();
#ifdef DYNAMIC_VELOCITY_MINMAX
unsigned int thread_num = para->block_size;
FindVelocityLenMinMax <<<1, threads, thread_num * sizeof(float) >>> (thread_num, velocity_len, velo_min, num, true); // find min
cudaDeviceSynchronize();
FindVelocityLenMinMax <<<1, threads, thread_num * sizeof(float) >>> (thread_num, velocity_len, velo_max, num, false); // find max
cudaDeviceSynchronize();
#endif // DYNAMIC_VELOCITY_MINMAX
ExportParticleInfo <<<blocks, threads >>> (sph_device, velocity_len, velo_min, velo_max, cur_pos, pos_info, color_info);
cudaDeviceSynchronize();
checkCudaErrors(cudaGraphicsUnmapResources(1, &position_resource));
checkCudaErrors(cudaGraphicsUnmapResources(1, &color_resource));
cudaDeviceSynchronize();
}
|
f4a2abe160741967fc240a5a541fdb9b3b1226ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cstdio>
#include<iostream>
#include"cuda.h"
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<cmath>
#include<time.h>
#include <Windows.h>
#define N 106182300
using namespace std;
void add_with_cpu(double A[], int len) {
double ans = 0;
clock_t start, end;
start = clock();
for (int i = 0; i < len; i++) {
ans += A[i];
}
end = clock();
cout << "With cpu: " << "ans:" << ans << " " << "time:" << end - start << "ms" << endl;
}
__global__ static void add_with_all_atomic(double *A, int len, double *result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < len) {
atomicAdd(result, A[id]);
id += gridDim.x * blockDim.x;
}
}
__global__ static void add_with_few_atomic(double *A, int len, double *result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
while (id < len) {
temp += A[id];
id += gridDim.x * blockDim.x;
}
atomicAdd(result, temp);
}
__global__ static void add_without_atomic(double *A, double *B, int len) {
extern __shared__ double cache[];
int id = threadIdx.x + blockIdx.x * blockDim.x;
double x = 0;
if (id < len) {
x = A[id];
}
cache[threadIdx.x] = x;
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) {
if (threadIdx.x < offset)
cache[threadIdx.x] += cache[threadIdx.x + offset];
__syncthreads();
}
if (threadIdx.x == 0) {
B[blockIdx.x] == cache[0];
}
}
int main() {
double *A = new double[N];
double result = 0;
int len;
double *dev_A;
double *dev_result;
hipMalloc((void**)&dev_A, N * sizeof(double));
hipMalloc((void**)&dev_result, sizeof(double));
for (int i = 0; i < N; i++) {
A[i] = (double)(rand() % 101) / 101;
}
result = 0;
len = N;
hipMemcpy(dev_A, A, N * sizeof(double),
hipMemcpyHostToDevice);
hipEvent_t start, stop;
float elapsedTime;
// PART1 All atomic
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
add_with_all_atomic << <64, 64 >> > (dev_A, len, dev_result);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(&result, dev_result, sizeof(double), hipMemcpyDeviceToHost);
cout << "With all atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
//PART2 Few Atomic
double *dev_result1;
hipMalloc((void**)&dev_result1, sizeof(double));
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
add_with_few_atomic << <64, 64 >> > (dev_A, len, dev_result1);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(&result, dev_result1, sizeof(double), hipMemcpyDeviceToHost);
cout << "With few atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
//part3
double *dev_result2;
hipMalloc((void**)&dev_result2, sizeof(double));
const int block_size = 512;
const int num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
double *partial_sums = 0;
hipMalloc((void**)&partial_sums, sizeof(double));
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
add_without_atomic << <num_blocks, block_size, block_size * sizeof(double) >> > (dev_A, partial_sums, len);
add_without_atomic << <1, num_blocks, num_blocks * sizeof(double) >> > (partial_sums, partial_sums + num_blocks, num_blocks);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(&dev_result2, partial_sums + num_blocks, sizeof(double), hipMemcpyDeviceToHost);
cout << "Without atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
add_with_cpu(A, len);
} | f4a2abe160741967fc240a5a541fdb9b3b1226ad.cu | #include<cstdio>
#include<iostream>
#include"cuda.h"
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<cmath>
#include<time.h>
#include <Windows.h>
#define N 106182300
using namespace std;
void add_with_cpu(double A[], int len) {
double ans = 0;
clock_t start, end;
start = clock();
for (int i = 0; i < len; i++) {
ans += A[i];
}
end = clock();
cout << "With cpu: " << "ans:" << ans << " " << "time:" << end - start << "ms" << endl;
}
__global__ static void add_with_all_atomic(double *A, int len, double *result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
while (id < len) {
atomicAdd(result, A[id]);
id += gridDim.x * blockDim.x;
}
}
__global__ static void add_with_few_atomic(double *A, int len, double *result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
while (id < len) {
temp += A[id];
id += gridDim.x * blockDim.x;
}
atomicAdd(result, temp);
}
__global__ static void add_without_atomic(double *A, double *B, int len) {
extern __shared__ double cache[];
int id = threadIdx.x + blockIdx.x * blockDim.x;
double x = 0;
if (id < len) {
x = A[id];
}
cache[threadIdx.x] = x;
__syncthreads();
for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) {
if (threadIdx.x < offset)
cache[threadIdx.x] += cache[threadIdx.x + offset];
__syncthreads();
}
if (threadIdx.x == 0) {
B[blockIdx.x] == cache[0];
}
}
int main() {
double *A = new double[N];
double result = 0;
int len;
double *dev_A;
double *dev_result;
cudaMalloc((void**)&dev_A, N * sizeof(double));
cudaMalloc((void**)&dev_result, sizeof(double));
for (int i = 0; i < N; i++) {
A[i] = (double)(rand() % 101) / 101;
}
result = 0;
len = N;
cudaMemcpy(dev_A, A, N * sizeof(double),
cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float elapsedTime;
// PART1 All atomic
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
add_with_all_atomic << <64, 64 >> > (dev_A, len, dev_result);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(&result, dev_result, sizeof(double), cudaMemcpyDeviceToHost);
cout << "With all atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
//PART2 Few Atomic
double *dev_result1;
cudaMalloc((void**)&dev_result1, sizeof(double));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
add_with_few_atomic << <64, 64 >> > (dev_A, len, dev_result1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(&result, dev_result1, sizeof(double), cudaMemcpyDeviceToHost);
cout << "With few atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
//part3
double *dev_result2;
cudaMalloc((void**)&dev_result2, sizeof(double));
const int block_size = 512;
const int num_blocks = (len / block_size) + ((len % block_size) ? 1 : 0);
double *partial_sums = 0;
cudaMalloc((void**)&partial_sums, sizeof(double));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
add_without_atomic << <num_blocks, block_size, block_size * sizeof(double) >> > (dev_A, partial_sums, len);
add_without_atomic << <1, num_blocks, num_blocks * sizeof(double) >> > (partial_sums, partial_sums + num_blocks, num_blocks);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(&dev_result2, partial_sums + num_blocks, sizeof(double), cudaMemcpyDeviceToHost);
cout << "Without atomic: " << "ans:" << result << " " << "time:" << elapsedTime << "ms" << endl;
add_with_cpu(A, len);
} |
a17c95368dec6b553ce0258f30f13ecc8a5df0b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <random>
#include <assert.h>
#include <chrono>
#include <iostream>
using real = float;
#define DEBUG
const real tau_v = 20.;
const real tau_exc = 5.;
const real tau_inh = 10.;
const real v_thresh = -50.;
const real v_reset = -60.;
const real v_rest = -49.;
const real wgt_exc = 60.*.27/5;
const real wgt_inh = -20*4.5/10;
const real ts = .1; // ms
// refractory period is 5ms
// gotta manually divide 5 / ts here because of FP error...
const unsigned char refractory_cycles = 50;
// Simulation parameters
const size_t N = 100000;
const size_t delay = 8;
const size_t max_conns_per_neuron = 1000;
const real seconds = 10.;
const size_t num_iterations = seconds / (ts * 1e-3) / delay;
const double resulting_sparsity = 1. * max_conns_per_neuron / N;
const size_t N_exc = N * 4 / 5;
// gpu optimization params
const size_t threads_per_block = 1000;
const size_t n_blocks = N / threads_per_block;
struct Injection {
real exc, inh;
Injection(real a, real b): exc(a), inh(b) {}
Injection() : exc(0.), inh(0.) {}
};
struct Connection {
unsigned int idx;
real wgt;
Connection(unsigned int d, real w) : idx(d), wgt(w) {}
Connection() : idx(0), wgt(0.) {}
};
const size_t bank_size = N * delay;
#define injection(polarity, delay_idx, neuron_idx) (bank_injections[polarity * bank_size + delay_idx * N + neuron_idx])
#define connection(neuron_idx, synapse_idx) (connections[neuron_idx * max_conns_per_neuron + synapse_idx])
unsigned char __ping_var = 0;
#include <stdio.h>
#define ping() fprintf(stderr, "ping %d\n", __ping_var++); fflush(stderr);
__global__
void iterate(real * v, real * ge, real * gi, unsigned char * refrac, Connection * connections, Injection * bank_injections, bool polarity
#ifdef DEBUG
, int *nspikes
#endif
) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ bool spikes[threads_per_block];
for (unsigned char delay_idx = 0; delay_idx < delay; ++delay_idx) {
real dv = (ge[idx] + gi[idx] - (v[idx] - v_rest)) / tau_v;
real dge = -ge[idx] / tau_exc;
real dgi = -gi[idx] / tau_inh;
if (refrac[idx]) {
--refrac[idx];
dv = 0.;
}
// read once to local register
real v_ = v[idx] + dv * ts;
Injection *inj = &injection(polarity, delay_idx, idx);
ge[idx] += inj->exc + dge * ts;
gi[idx] += inj->inh + dgi * ts;
inj->exc = 0.;
inj->inh = 0.;
bool spiked = v_ > v_thresh;
spikes[threadIdx.x] = spiked;
if (spiked) {
v_ = v_reset;
refrac[idx] = refractory_cycles;
}
v[idx] = v_;
__syncthreads();
for (unsigned int lidx = 0; lidx < blockDim.x; ++lidx) {
// this actually isn't that bad because either all threads take it or all don't
if (spikes[lidx]) {
#ifdef DEBUG
if (threadIdx.x == 0)
atomicAdd(nspikes, 1);
#endif
size_t tidx = blockDim.x * blockIdx.x + lidx;
Connection c = connection(tidx, threadIdx.x);
real wgt = c.wgt;
// We read from polarity, and write to ~polarity to avoid race conditions across blocks
if (c.wgt > 0)
atomicAdd(&injection(!polarity, delay_idx, c.idx).exc, wgt);
else
atomicAdd(&injection(!polarity, delay_idx, c.idx).inh, wgt);
}
}
}
}
int main() {
std::default_random_engine gen;
std::uniform_real_distribution<> voltage_dist(v_reset, v_thresh);
std::poisson_distribution<> connection_dist(N / max_conns_per_neuron);
// std::uniform_real_distribution<> unit_dist(0., 1.);
real * neuron_v;
real * cuda_neuron_v;
real * neuron_ge;
real * cuda_neuron_ge;
real * neuron_gi;
real * cuda_neuron_gi;
unsigned char * neuron_ref_cycles_rem;
unsigned char * cuda_neuron_ref_cycles_rem;
Connection * connections;
Connection * cuda_connections;
Injection * bank_injections;
Injection * cuda_bank_injections;
// allocate
neuron_v = new real[N];
assert(hipSuccess == hipMalloc(&cuda_neuron_v, sizeof(real) * N));
neuron_ge = new real[N];
assert(hipSuccess == hipMalloc(&cuda_neuron_ge, sizeof(real) * N));
neuron_gi = new real[N];
assert(hipSuccess == hipMalloc(&cuda_neuron_gi, sizeof(real) * N));
neuron_ref_cycles_rem = new unsigned char[N];
assert(hipSuccess == hipMalloc(&cuda_neuron_ref_cycles_rem, sizeof(unsigned char) * N));
connections = new Connection[max_conns_per_neuron * N];
assert(hipSuccess == hipMalloc(&cuda_connections, sizeof(Connection) * max_conns_per_neuron * N));
bank_injections = new Injection[2 * N * delay];
assert(hipSuccess == hipMalloc(&cuda_bank_injections, sizeof(Injection) * 2 * N * delay));
ping();
// initialize
for (size_t i = 0; i < N; ++i) {
neuron_v[i] = voltage_dist(gen);
neuron_ge[i] = neuron_gi[i] = 0.;
neuron_ref_cycles_rem[i] = 0;
size_t synapse_idx = connection_dist(gen) - 1;
for (unsigned conn_idx = 0; conn_idx < 1000 && synapse_idx < N; ++conn_idx) {
real wgt = (i < N_exc) ? wgt_exc : wgt_inh;
connection(i, conn_idx) = Connection(synapse_idx, wgt);
synapse_idx += connection_dist(gen);
}
}
ping();
// copy to GPU
assert(hipSuccess == hipMemcpy(cuda_neuron_v, neuron_v, sizeof(real) * N, hipMemcpyHostToDevice));
assert(hipSuccess == hipMemcpy(cuda_neuron_ge, neuron_ge, sizeof(real) * N, hipMemcpyHostToDevice));
assert(hipSuccess == hipMemcpy(cuda_neuron_gi, neuron_gi, sizeof(real) * N, hipMemcpyHostToDevice));
assert(hipSuccess == hipMemcpy(cuda_neuron_ref_cycles_rem, neuron_ref_cycles_rem, sizeof(unsigned char) * N, hipMemcpyHostToDevice));
assert(hipSuccess == hipMemcpy(cuda_connections, connections, sizeof(Connection) * N * max_conns_per_neuron, hipMemcpyHostToDevice));
assert(hipSuccess == hipMemcpy(cuda_bank_injections, bank_injections, sizeof(Injection) * 2 * N * delay, hipMemcpyHostToDevice));
ping();
#ifdef DEBUG
int nspikes = 0;
int * cuda_nspikes;
assert(hipSuccess == hipMalloc(&cuda_nspikes, sizeof(int)));
assert(hipSuccess == hipMemcpy(cuda_nspikes, &nspikes, sizeof(int), hipMemcpyHostToDevice));
#endif
// run
bool polarity = false;
std::cout << "begin!" << std::endl;
auto t1 = std::chrono::high_resolution_clock::now();
for (size_t it = 0; it < num_iterations; ++it) {
hipLaunchKernelGGL(( iterate), dim3(n_blocks), dim3(threads_per_block), 0, 0, cuda_neuron_v, cuda_neuron_ge, cuda_neuron_gi, cuda_neuron_ref_cycles_rem, cuda_connections, cuda_bank_injections, polarity
#ifdef DEBUG
, cuda_nspikes
#endif
);
polarity = !polarity;
}
auto t2 = std::chrono::high_resolution_clock::now();
auto diff = (t2 - t1);
std::cout << "Time Elapsed: " << (diff.count() / 1e9) << std::endl;
#ifdef DEBUG
hipMemcpy(&nspikes, cuda_nspikes, sizeof(int), hipMemcpyDeviceToHost);
std::cout << "Firing Rate: " << (1. * nspikes / N / seconds) << "Hz" << std::endl;
#endif
}
| a17c95368dec6b553ce0258f30f13ecc8a5df0b7.cu | #include <random>
#include <assert.h>
#include <chrono>
#include <iostream>
using real = float;
#define DEBUG
const real tau_v = 20.;
const real tau_exc = 5.;
const real tau_inh = 10.;
const real v_thresh = -50.;
const real v_reset = -60.;
const real v_rest = -49.;
const real wgt_exc = 60.*.27/5;
const real wgt_inh = -20*4.5/10;
const real ts = .1; // ms
// refractory period is 5ms
// gotta manually divide 5 / ts here because of FP error...
const unsigned char refractory_cycles = 50;
// Simulation parameters
const size_t N = 100000;
const size_t delay = 8;
const size_t max_conns_per_neuron = 1000;
const real seconds = 10.;
const size_t num_iterations = seconds / (ts * 1e-3) / delay;
const double resulting_sparsity = 1. * max_conns_per_neuron / N;
const size_t N_exc = N * 4 / 5;
// gpu optimization params
const size_t threads_per_block = 1000;
const size_t n_blocks = N / threads_per_block;
struct Injection {
real exc, inh;
Injection(real a, real b): exc(a), inh(b) {}
Injection() : exc(0.), inh(0.) {}
};
struct Connection {
unsigned int idx;
real wgt;
Connection(unsigned int d, real w) : idx(d), wgt(w) {}
Connection() : idx(0), wgt(0.) {}
};
const size_t bank_size = N * delay;
#define injection(polarity, delay_idx, neuron_idx) (bank_injections[polarity * bank_size + delay_idx * N + neuron_idx])
#define connection(neuron_idx, synapse_idx) (connections[neuron_idx * max_conns_per_neuron + synapse_idx])
unsigned char __ping_var = 0;
#include <stdio.h>
#define ping() fprintf(stderr, "ping %d\n", __ping_var++); fflush(stderr);
__global__
void iterate(real * v, real * ge, real * gi, unsigned char * refrac, Connection * connections, Injection * bank_injections, bool polarity
#ifdef DEBUG
, int *nspikes
#endif
) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ bool spikes[threads_per_block];
for (unsigned char delay_idx = 0; delay_idx < delay; ++delay_idx) {
real dv = (ge[idx] + gi[idx] - (v[idx] - v_rest)) / tau_v;
real dge = -ge[idx] / tau_exc;
real dgi = -gi[idx] / tau_inh;
if (refrac[idx]) {
--refrac[idx];
dv = 0.;
}
// read once to local register
real v_ = v[idx] + dv * ts;
Injection *inj = &injection(polarity, delay_idx, idx);
ge[idx] += inj->exc + dge * ts;
gi[idx] += inj->inh + dgi * ts;
inj->exc = 0.;
inj->inh = 0.;
bool spiked = v_ > v_thresh;
spikes[threadIdx.x] = spiked;
if (spiked) {
v_ = v_reset;
refrac[idx] = refractory_cycles;
}
v[idx] = v_;
__syncthreads();
for (unsigned int lidx = 0; lidx < blockDim.x; ++lidx) {
// this actually isn't that bad because either all threads take it or all don't
if (spikes[lidx]) {
#ifdef DEBUG
if (threadIdx.x == 0)
atomicAdd(nspikes, 1);
#endif
size_t tidx = blockDim.x * blockIdx.x + lidx;
Connection c = connection(tidx, threadIdx.x);
real wgt = c.wgt;
// We read from polarity, and write to ~polarity to avoid race conditions across blocks
if (c.wgt > 0)
atomicAdd(&injection(!polarity, delay_idx, c.idx).exc, wgt);
else
atomicAdd(&injection(!polarity, delay_idx, c.idx).inh, wgt);
}
}
}
}
int main() {
std::default_random_engine gen;
std::uniform_real_distribution<> voltage_dist(v_reset, v_thresh);
std::poisson_distribution<> connection_dist(N / max_conns_per_neuron);
// std::uniform_real_distribution<> unit_dist(0., 1.);
real * neuron_v;
real * cuda_neuron_v;
real * neuron_ge;
real * cuda_neuron_ge;
real * neuron_gi;
real * cuda_neuron_gi;
unsigned char * neuron_ref_cycles_rem;
unsigned char * cuda_neuron_ref_cycles_rem;
Connection * connections;
Connection * cuda_connections;
Injection * bank_injections;
Injection * cuda_bank_injections;
// allocate
neuron_v = new real[N];
assert(cudaSuccess == cudaMalloc(&cuda_neuron_v, sizeof(real) * N));
neuron_ge = new real[N];
assert(cudaSuccess == cudaMalloc(&cuda_neuron_ge, sizeof(real) * N));
neuron_gi = new real[N];
assert(cudaSuccess == cudaMalloc(&cuda_neuron_gi, sizeof(real) * N));
neuron_ref_cycles_rem = new unsigned char[N];
assert(cudaSuccess == cudaMalloc(&cuda_neuron_ref_cycles_rem, sizeof(unsigned char) * N));
connections = new Connection[max_conns_per_neuron * N];
assert(cudaSuccess == cudaMalloc(&cuda_connections, sizeof(Connection) * max_conns_per_neuron * N));
bank_injections = new Injection[2 * N * delay];
assert(cudaSuccess == cudaMalloc(&cuda_bank_injections, sizeof(Injection) * 2 * N * delay));
ping();
// initialize
for (size_t i = 0; i < N; ++i) {
neuron_v[i] = voltage_dist(gen);
neuron_ge[i] = neuron_gi[i] = 0.;
neuron_ref_cycles_rem[i] = 0;
size_t synapse_idx = connection_dist(gen) - 1;
for (unsigned conn_idx = 0; conn_idx < 1000 && synapse_idx < N; ++conn_idx) {
real wgt = (i < N_exc) ? wgt_exc : wgt_inh;
connection(i, conn_idx) = Connection(synapse_idx, wgt);
synapse_idx += connection_dist(gen);
}
}
ping();
// copy to GPU
assert(cudaSuccess == cudaMemcpy(cuda_neuron_v, neuron_v, sizeof(real) * N, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_neuron_ge, neuron_ge, sizeof(real) * N, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_neuron_gi, neuron_gi, sizeof(real) * N, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_neuron_ref_cycles_rem, neuron_ref_cycles_rem, sizeof(unsigned char) * N, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_connections, connections, sizeof(Connection) * N * max_conns_per_neuron, cudaMemcpyHostToDevice));
assert(cudaSuccess == cudaMemcpy(cuda_bank_injections, bank_injections, sizeof(Injection) * 2 * N * delay, cudaMemcpyHostToDevice));
ping();
#ifdef DEBUG
int nspikes = 0;
int * cuda_nspikes;
assert(cudaSuccess == cudaMalloc(&cuda_nspikes, sizeof(int)));
assert(cudaSuccess == cudaMemcpy(cuda_nspikes, &nspikes, sizeof(int), cudaMemcpyHostToDevice));
#endif
// run
bool polarity = false;
std::cout << "begin!" << std::endl;
auto t1 = std::chrono::high_resolution_clock::now();
for (size_t it = 0; it < num_iterations; ++it) {
iterate<<<n_blocks, threads_per_block>>>(cuda_neuron_v, cuda_neuron_ge, cuda_neuron_gi, cuda_neuron_ref_cycles_rem, cuda_connections, cuda_bank_injections, polarity
#ifdef DEBUG
, cuda_nspikes
#endif
);
polarity = !polarity;
}
auto t2 = std::chrono::high_resolution_clock::now();
auto diff = (t2 - t1);
std::cout << "Time Elapsed: " << (diff.count() / 1e9) << std::endl;
#ifdef DEBUG
cudaMemcpy(&nspikes, cuda_nspikes, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "Firing Rate: " << (1. * nspikes / N / seconds) << "Hz" << std::endl;
#endif
}
|
f2ceb56cf376ccac7ddb06f60f57fa34b9243120.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <chrono>
#include <iostream>
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
void print(thrust::host_vector<int> h_vec) {
std::cout << h_vec.size() << "\n";
for (int i = 0; i < h_vec.size(); i++) {
std::cout << h_vec[i] << " ";
}
std::cout << "\n";
}
int main(void) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
thrust::host_vector<int> h_seg_aux(num_of_segments+1);
for (i = 0; i < num_of_segments+1; i++)
scanf("%d", &h_seg_aux[i]);
scanf("%d", &num_of_elements);
thrust::host_vector<int> h_vec(num_of_elements);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
thrust::host_vector<int> h_seg(num_of_elements);
for (i = 0; i < num_of_segments; ++i){
for(int j = h_seg_aux[i]; j < h_seg_aux[i+1]; ++j) {
h_seg[j] = h_seg_aux[i];
}
}
//print(h_seg); print(h_vec);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
thrust::device_vector<int> d_vec = h_vec;
thrust::device_vector<int> d_seg = h_seg;
hipEventRecord(start);
thrust::sort_by_key(d_vec.begin(), d_vec.end(), d_seg.begin());
thrust::sort_by_key(d_seg.begin(), d_seg.end(), d_vec.begin());
hipEventRecord(stop);
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
thrust::copy(d_seg.begin(), d_seg.end(), h_seg.begin());
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
else
print(h_vec);
return 0;
}
| f2ceb56cf376ccac7ddb06f60f57fa34b9243120.cu | /*
============================================================================
Name : sorting_segments.cu
Author : Rafael Schmid
Version :
Copyright : Your copyright notice
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <chrono>
#include <iostream>
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
void print(thrust::host_vector<int> h_vec) {
std::cout << h_vec.size() << "\n";
for (int i = 0; i < h_vec.size(); i++) {
std::cout << h_vec[i] << " ";
}
std::cout << "\n";
}
int main(void) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
thrust::host_vector<int> h_seg_aux(num_of_segments+1);
for (i = 0; i < num_of_segments+1; i++)
scanf("%d", &h_seg_aux[i]);
scanf("%d", &num_of_elements);
thrust::host_vector<int> h_vec(num_of_elements);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
thrust::host_vector<int> h_seg(num_of_elements);
for (i = 0; i < num_of_segments; ++i){
for(int j = h_seg_aux[i]; j < h_seg_aux[i+1]; ++j) {
h_seg[j] = h_seg_aux[i];
}
}
//print(h_seg); print(h_vec);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
thrust::device_vector<int> d_vec = h_vec;
thrust::device_vector<int> d_seg = h_seg;
cudaEventRecord(start);
thrust::sort_by_key(d_vec.begin(), d_vec.end(), d_seg.begin());
thrust::sort_by_key(d_seg.begin(), d_seg.end(), d_vec.begin());
cudaEventRecord(stop);
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
thrust::copy(d_seg.begin(), d_seg.end(), h_seg.begin());
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
}
else
print(h_vec);
return 0;
}
|
9d7a0a59c4be6efd24f85088e98545ecb17a48e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_data.cuh"
#include <algorithm>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
GPUData::GPUData(Info const & info) :
chunk_size_(0),
info_(info),
data_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_*info_.n_points_ : 0),
weights_(
(info_.use_weights_ && info_.data_location_ == HOST)
? info_.n_points_ * info_.max_chunk_size_ : 0 ),
parameters_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_*info_.n_parameters_ : 0 ),
user_info_(
(info_.data_location_ == HOST)
? info_.user_info_size_ : 0),
prev_parameters_( info_.max_chunk_size_*info_.n_parameters_ ),
parameters_to_fit_indices_( info_.n_parameters_to_fit_ ),
chi_squares_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_ : 0),
prev_chi_squares_( info_.max_chunk_size_ ),
gradients_( info_.max_chunk_size_ * info_.n_parameters_to_fit_),
hessians_( info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_ ),
deltas_(info_.max_chunk_size_ * info_.n_parameters_to_fit_),
scaling_vectors_(info_.max_chunk_size_ * info_.n_parameters_to_fit_),
subtotals_(
(info_.n_blocks_per_fit_ > 1)
? info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_blocks_per_fit_ : 0),
values_( info_.max_chunk_size_ * info_.n_points_ ),
derivatives_( info_.max_chunk_size_ * info_.n_points_ * info_.n_parameters_ ),
lambdas_( info_.max_chunk_size_ ),
states_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_ : 0),
finished_( info_.max_chunk_size_ ),
iteration_failed_(info_.max_chunk_size_),
all_finished_( 1 ),
n_iterations_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_ : 0),
solution_info_(info_.max_chunk_size_)
#ifdef USE_CUBLAS
,
decomposed_hessians_(info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_),
pointer_decomposed_hessians_(info_.max_chunk_size_),
pointer_deltas_(info_.max_chunk_size_),
pivot_vectors_(info_.max_chunk_size_ * info_.n_parameters_to_fit_)
#endif // USE_CUBLAS
{
#ifdef USE_CUBLAS
hipblasCreate(&cublas_handle_);
point_to_data_sets();
#endif // USE_CUBLAS
}
GPUData::~GPUData()
{
#ifdef USE_CUBLAS
hipblasDestroy(cublas_handle_);
#endif // USE_CUBLAS
}
void GPUData::init
(
int const chunk_size,
int const chunk_index,
float const * const data,
float const * const weights,
float const * const initial_parameters,
std::vector<int> const & parameters_to_fit_indices,
int * states,
float * chi_squares,
int * n_iterations)
{
chunk_size_ = chunk_size;
chunk_index_ = chunk_index;
if (info_.data_location_ == HOST)
{
write(
data_,
data + chunk_index_*info_.max_chunk_size_*info_.n_points_,
chunk_size_ * info_.n_points_);
write(
parameters_,
initial_parameters + chunk_index_*info_.max_chunk_size_*info_.n_parameters_,
chunk_size_ * info_.n_parameters_);
if (info_.use_weights_)
write(
weights_,
weights + chunk_index_*info_.max_chunk_size_*info_.n_points_,
chunk_size_ * info_.n_points_);
}
else if (info_.data_location_ == DEVICE)
{
data_.assign(
data + chunk_index_*info_.max_chunk_size_*info_.n_points_);
parameters_.assign(
initial_parameters + chunk_index_*info_.max_chunk_size_*info_.n_parameters_);
if (info_.use_weights_)
weights_.assign(
weights + chunk_index_*info_.max_chunk_size_*info_.n_points_);
states_.assign(
states + chunk_index_ * info_.max_chunk_size_);
chi_squares_.assign(
chi_squares + chunk_index_ * info_.max_chunk_size_);
n_iterations_.assign(
n_iterations + chunk_index_ * info_.max_chunk_size_);
}
write(parameters_to_fit_indices_, parameters_to_fit_indices);
set(prev_chi_squares_, 0.f, chunk_size_);
set(finished_, 0, chunk_size_);
set(scaling_vectors_, 0.f, chunk_size_ * info_.n_parameters_to_fit_);
set(states_, 0, chunk_size_);
set(lambdas_, 0.001f, chunk_size_);
}
void GPUData::init_user_info(char const * const user_info)
{
if (info_.user_info_size_ > 0)
{
if (info_.data_location_ == HOST)
{
write(user_info_, user_info, info_.user_info_size_);
}
else if (info_.data_location_ == DEVICE)
{
user_info_.assign(user_info);
}
}
}
void GPUData::read(bool * dst, int const * src)
{
int int_dst = 0;
CUDA_CHECK_STATUS(hipMemcpy(&int_dst, src, sizeof(int), hipMemcpyDeviceToHost));
* dst = (int_dst == 1) ? true : false;
}
void GPUData::write(float* dst, float const * src, int const count)
{
CUDA_CHECK_STATUS(hipMemcpy(dst, src, count * sizeof(float), hipMemcpyHostToDevice));
}
void GPUData::write(int* dst, std::vector<int> const & src)
{
std::size_t const size = src.size() * sizeof(int);
CUDA_CHECK_STATUS(hipMemcpy(dst, src.data(), size, hipMemcpyHostToDevice));
}
void GPUData::write(char* dst, char const * src, std::size_t const count)
{
CUDA_CHECK_STATUS(hipMemcpy(dst, src, count * sizeof(char), hipMemcpyHostToDevice));
}
void GPUData::copy(float * dst, float const * src, std::size_t const count)
{
CUDA_CHECK_STATUS(hipMemcpy(dst, src, count * sizeof(float), hipMemcpyDeviceToDevice));
}
__global__ void set_kernel(int* dst, int const value, int const count)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= count)
return;
dst[index] = value;
}
void GPUData::set(int* arr, int const value, int const count)
{
int const tx = 256;
int const bx = (count / tx) + 1;
dim3 threads(tx, 1, 1);
dim3 blocks(bx, 1, 1);
hipLaunchKernelGGL(( set_kernel), dim3(blocks), dim3(threads) , 0, 0, arr, value, count);
CUDA_CHECK_STATUS(hipGetLastError());
}
void GPUData::set(int* arr, int const value)
{
int const tx = 1;
int const bx = 1;
dim3 threads(tx, 1, 1);
dim3 blocks(bx, 1, 1);
hipLaunchKernelGGL(( set_kernel), dim3(blocks), dim3(threads) , 0, 0, arr, value, 1);
CUDA_CHECK_STATUS(hipGetLastError());
}
__global__ void set_kernel(float* dst, float const value, std::size_t const count)
{
std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= count)
return;
dst[index] = value;
}
void GPUData::set(float* arr, float const value, int const count)
{
int const tx = 256;
int const bx = (count / tx) + 1;
dim3 threads(tx, 1, 1);
dim3 blocks(bx, 1, 1);
hipLaunchKernelGGL(( set_kernel), dim3(blocks), dim3(threads) , 0, 0, arr, value, count);
CUDA_CHECK_STATUS(hipGetLastError());
}
__global__ void cuda_point_to_data_sets(
float ** pointer_to_pointers,
float * pointer,
std::size_t const n_pointers,
std::size_t const size)
{
std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_pointers)
return;
int const begin = index * size;
pointer_to_pointers[index] = pointer + begin;
}
#ifdef USE_CUBLAS
void GPUData::point_to_data_sets()
{
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
std::size_t max_threads = 256;
threads.x
= static_cast<unsigned int>
(::min(info_.max_chunk_size_, max_threads));
blocks.x
= static_cast<unsigned int>
(::ceil(float(info_.max_chunk_size_) / float(threads.x)));
hipLaunchKernelGGL(( cuda_point_to_data_sets) , dim3(blocks), dim3(threads) , 0, 0,
pointer_decomposed_hessians_,
decomposed_hessians_,
info_.max_chunk_size_,
info_.n_parameters_to_fit_*info_.n_parameters_to_fit_);
hipLaunchKernelGGL(( cuda_point_to_data_sets) , dim3(blocks), dim3(threads) , 0, 0,
pointer_deltas_,
deltas_,
info_.max_chunk_size_,
info_.n_parameters_to_fit_);
}
#endif // USE_CUBLAS | 9d7a0a59c4be6efd24f85088e98545ecb17a48e3.cu | #include "gpu_data.cuh"
#include <algorithm>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
GPUData::GPUData(Info const & info) :
chunk_size_(0),
info_(info),
data_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_*info_.n_points_ : 0),
weights_(
(info_.use_weights_ && info_.data_location_ == HOST)
? info_.n_points_ * info_.max_chunk_size_ : 0 ),
parameters_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_*info_.n_parameters_ : 0 ),
user_info_(
(info_.data_location_ == HOST)
? info_.user_info_size_ : 0),
prev_parameters_( info_.max_chunk_size_*info_.n_parameters_ ),
parameters_to_fit_indices_( info_.n_parameters_to_fit_ ),
chi_squares_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_ : 0),
prev_chi_squares_( info_.max_chunk_size_ ),
gradients_( info_.max_chunk_size_ * info_.n_parameters_to_fit_),
hessians_( info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_ ),
deltas_(info_.max_chunk_size_ * info_.n_parameters_to_fit_),
scaling_vectors_(info_.max_chunk_size_ * info_.n_parameters_to_fit_),
subtotals_(
(info_.n_blocks_per_fit_ > 1)
? info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_blocks_per_fit_ : 0),
values_( info_.max_chunk_size_ * info_.n_points_ ),
derivatives_( info_.max_chunk_size_ * info_.n_points_ * info_.n_parameters_ ),
lambdas_( info_.max_chunk_size_ ),
states_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_ : 0),
finished_( info_.max_chunk_size_ ),
iteration_failed_(info_.max_chunk_size_),
all_finished_( 1 ),
n_iterations_(
(info_.data_location_ == HOST)
? info_.max_chunk_size_ : 0),
solution_info_(info_.max_chunk_size_)
#ifdef USE_CUBLAS
,
decomposed_hessians_(info_.max_chunk_size_ * info_.n_parameters_to_fit_ * info_.n_parameters_to_fit_),
pointer_decomposed_hessians_(info_.max_chunk_size_),
pointer_deltas_(info_.max_chunk_size_),
pivot_vectors_(info_.max_chunk_size_ * info_.n_parameters_to_fit_)
#endif // USE_CUBLAS
{
#ifdef USE_CUBLAS
cublasCreate(&cublas_handle_);
point_to_data_sets();
#endif // USE_CUBLAS
}
GPUData::~GPUData()
{
#ifdef USE_CUBLAS
cublasDestroy(cublas_handle_);
#endif // USE_CUBLAS
}
void GPUData::init
(
int const chunk_size,
int const chunk_index,
float const * const data,
float const * const weights,
float const * const initial_parameters,
std::vector<int> const & parameters_to_fit_indices,
int * states,
float * chi_squares,
int * n_iterations)
{
chunk_size_ = chunk_size;
chunk_index_ = chunk_index;
if (info_.data_location_ == HOST)
{
write(
data_,
data + chunk_index_*info_.max_chunk_size_*info_.n_points_,
chunk_size_ * info_.n_points_);
write(
parameters_,
initial_parameters + chunk_index_*info_.max_chunk_size_*info_.n_parameters_,
chunk_size_ * info_.n_parameters_);
if (info_.use_weights_)
write(
weights_,
weights + chunk_index_*info_.max_chunk_size_*info_.n_points_,
chunk_size_ * info_.n_points_);
}
else if (info_.data_location_ == DEVICE)
{
data_.assign(
data + chunk_index_*info_.max_chunk_size_*info_.n_points_);
parameters_.assign(
initial_parameters + chunk_index_*info_.max_chunk_size_*info_.n_parameters_);
if (info_.use_weights_)
weights_.assign(
weights + chunk_index_*info_.max_chunk_size_*info_.n_points_);
states_.assign(
states + chunk_index_ * info_.max_chunk_size_);
chi_squares_.assign(
chi_squares + chunk_index_ * info_.max_chunk_size_);
n_iterations_.assign(
n_iterations + chunk_index_ * info_.max_chunk_size_);
}
write(parameters_to_fit_indices_, parameters_to_fit_indices);
set(prev_chi_squares_, 0.f, chunk_size_);
set(finished_, 0, chunk_size_);
set(scaling_vectors_, 0.f, chunk_size_ * info_.n_parameters_to_fit_);
set(states_, 0, chunk_size_);
set(lambdas_, 0.001f, chunk_size_);
}
void GPUData::init_user_info(char const * const user_info)
{
if (info_.user_info_size_ > 0)
{
if (info_.data_location_ == HOST)
{
write(user_info_, user_info, info_.user_info_size_);
}
else if (info_.data_location_ == DEVICE)
{
user_info_.assign(user_info);
}
}
}
void GPUData::read(bool * dst, int const * src)
{
int int_dst = 0;
CUDA_CHECK_STATUS(cudaMemcpy(&int_dst, src, sizeof(int), cudaMemcpyDeviceToHost));
* dst = (int_dst == 1) ? true : false;
}
void GPUData::write(float* dst, float const * src, int const count)
{
CUDA_CHECK_STATUS(cudaMemcpy(dst, src, count * sizeof(float), cudaMemcpyHostToDevice));
}
void GPUData::write(int* dst, std::vector<int> const & src)
{
std::size_t const size = src.size() * sizeof(int);
CUDA_CHECK_STATUS(cudaMemcpy(dst, src.data(), size, cudaMemcpyHostToDevice));
}
void GPUData::write(char* dst, char const * src, std::size_t const count)
{
CUDA_CHECK_STATUS(cudaMemcpy(dst, src, count * sizeof(char), cudaMemcpyHostToDevice));
}
void GPUData::copy(float * dst, float const * src, std::size_t const count)
{
CUDA_CHECK_STATUS(cudaMemcpy(dst, src, count * sizeof(float), cudaMemcpyDeviceToDevice));
}
__global__ void set_kernel(int* dst, int const value, int const count)
{
int const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= count)
return;
dst[index] = value;
}
void GPUData::set(int* arr, int const value, int const count)
{
int const tx = 256;
int const bx = (count / tx) + 1;
dim3 threads(tx, 1, 1);
dim3 blocks(bx, 1, 1);
set_kernel<<< blocks, threads >>>(arr, value, count);
CUDA_CHECK_STATUS(cudaGetLastError());
}
void GPUData::set(int* arr, int const value)
{
int const tx = 1;
int const bx = 1;
dim3 threads(tx, 1, 1);
dim3 blocks(bx, 1, 1);
set_kernel<<< blocks, threads >>>(arr, value, 1);
CUDA_CHECK_STATUS(cudaGetLastError());
}
__global__ void set_kernel(float* dst, float const value, std::size_t const count)
{
std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= count)
return;
dst[index] = value;
}
void GPUData::set(float* arr, float const value, int const count)
{
int const tx = 256;
int const bx = (count / tx) + 1;
dim3 threads(tx, 1, 1);
dim3 blocks(bx, 1, 1);
set_kernel<<< blocks, threads >>>(arr, value, count);
CUDA_CHECK_STATUS(cudaGetLastError());
}
__global__ void cuda_point_to_data_sets(
float ** pointer_to_pointers,
float * pointer,
std::size_t const n_pointers,
std::size_t const size)
{
std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_pointers)
return;
int const begin = index * size;
pointer_to_pointers[index] = pointer + begin;
}
#ifdef USE_CUBLAS
void GPUData::point_to_data_sets()
{
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
std::size_t max_threads = 256;
threads.x
= static_cast<unsigned int>
(std::min(info_.max_chunk_size_, max_threads));
blocks.x
= static_cast<unsigned int>
(std::ceil(float(info_.max_chunk_size_) / float(threads.x)));
cuda_point_to_data_sets <<< blocks, threads >>>(
pointer_decomposed_hessians_,
decomposed_hessians_,
info_.max_chunk_size_,
info_.n_parameters_to_fit_*info_.n_parameters_to_fit_);
cuda_point_to_data_sets <<< blocks, threads >>> (
pointer_deltas_,
deltas_,
info_.max_chunk_size_,
info_.n_parameters_to_fit_);
}
#endif // USE_CUBLAS |
7906a00e5c3c57ad80cd2c960360fbf8d615a8f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/multi_class_accuracy_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
__global__ void MultiClassAccuracyKernel(const int N, const int D, const float* Xdata,
const int* labeldata, float* accuracies, int* amounts) {
CUDA_1D_KERNEL_LOOP(i, N) {
float maxval = Xdata[i * D];
int maxid = 0;
for (int j = 1; j < D; ++j) {
if (Xdata[i * D + j] > maxval) {
maxval = Xdata[i * D + j];
maxid = j;
}
}
int labelid = labeldata[i];
if (maxid == labelid) {
atomicAdd(accuracies + labelid, static_cast<float>(1));
}
atomicAdd(amounts + labelid, static_cast<int>(1));
}
}
__global__ void MultiClassAccuracyDivideKernel(
const int D, float* accuracies, const int* amounts) {
CUDA_1D_KERNEL_LOOP(i, D) {
if (amounts[i]) {
accuracies[i] /= amounts[i];
}
}
}
} // namespace
template <>
bool MultiClassAccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
DCHECK_EQ(X.dim(), 2);
// amount, number of instances
int N = X.dim32(0);
// dimension, number of classes
int D = X.dim32(1);
DCHECK_EQ(label.dim(), 1);
DCHECK_EQ(label.dim32(0), N);
auto* Y0 = Output(0, {D}, at::dtype<float>());
auto* Y1 = Output(1, {D}, at::dtype<int>());
const float* Xdata = X.data<float>();
const int* labeldata = label.data<int>();
float* accuracies = Y0->template mutable_data<float>();
int* amounts = Y1->template mutable_data<int>();
math::Set<float, CUDAContext>(D, 0.0, accuracies, &context_);
math::Set<int, CUDAContext>(D, 0, amounts, &context_);
hipLaunchKernelGGL(( MultiClassAccuracyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, D, Xdata, labeldata, accuracies, amounts);
C10_HIP_KERNEL_LAUNCH_CHECK();
hipLaunchKernelGGL(( MultiClassAccuracyDivideKernel), dim3(CAFFE_GET_BLOCKS(D)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
D, accuracies, amounts);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
MultiClassAccuracy, MultiClassAccuracyOp<float, CUDAContext>);
} // namespace caffe2
| 7906a00e5c3c57ad80cd2c960360fbf8d615a8f7.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/multi_class_accuracy_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
__global__ void MultiClassAccuracyKernel(const int N, const int D, const float* Xdata,
const int* labeldata, float* accuracies, int* amounts) {
CUDA_1D_KERNEL_LOOP(i, N) {
float maxval = Xdata[i * D];
int maxid = 0;
for (int j = 1; j < D; ++j) {
if (Xdata[i * D + j] > maxval) {
maxval = Xdata[i * D + j];
maxid = j;
}
}
int labelid = labeldata[i];
if (maxid == labelid) {
atomicAdd(accuracies + labelid, static_cast<float>(1));
}
atomicAdd(amounts + labelid, static_cast<int>(1));
}
}
__global__ void MultiClassAccuracyDivideKernel(
const int D, float* accuracies, const int* amounts) {
CUDA_1D_KERNEL_LOOP(i, D) {
if (amounts[i]) {
accuracies[i] /= amounts[i];
}
}
}
} // namespace
template <>
bool MultiClassAccuracyOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(PREDICTION);
auto& label = Input(LABEL);
DCHECK_EQ(X.dim(), 2);
// amount, number of instances
int N = X.dim32(0);
// dimension, number of classes
int D = X.dim32(1);
DCHECK_EQ(label.dim(), 1);
DCHECK_EQ(label.dim32(0), N);
auto* Y0 = Output(0, {D}, at::dtype<float>());
auto* Y1 = Output(1, {D}, at::dtype<int>());
const float* Xdata = X.data<float>();
const int* labeldata = label.data<int>();
float* accuracies = Y0->template mutable_data<float>();
int* amounts = Y1->template mutable_data<int>();
math::Set<float, CUDAContext>(D, 0.0, accuracies, &context_);
math::Set<int, CUDAContext>(D, 0, amounts, &context_);
MultiClassAccuracyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, D, Xdata, labeldata, accuracies, amounts);
C10_CUDA_KERNEL_LAUNCH_CHECK();
MultiClassAccuracyDivideKernel<<<CAFFE_GET_BLOCKS(D), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
D, accuracies, amounts);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(
MultiClassAccuracy, MultiClassAccuracyOp<float, CUDAContext>);
} // namespace caffe2
|
d3eb6d2e6146b89530585e1127d98d04631c83b0.hip | // !!! This is a file automatically generated by hipify!!!
/*
deconv.cu
Author: Bob Pepin - (originally obtained from https://github.com/bobpepin/YacuDecu)
Author: Brian Northan - very minor changes to dimension order of FFT plan in deconv_device function in order for this function to work on arrays from imglib2.
License: LGPL
*/
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hipfft.h>
#include <rocblas.h>
#include <hip/hip_complex.h>
#include "deconv.h"
__global__ void ComplexMul(hipComplex *A, hipComplex *B, hipComplex *C)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
C[i] = cuCmulf(A[i], B[i]);
}
// BN 2018 add complex conjugate multiply
__host__ __device__ static __inline__ cuFloatComplex cuCconjmulf(cuFloatComplex x,
cuFloatComplex y)
{
cuFloatComplex prod;
prod = make_cuFloatComplex((cuCrealf(x) * cuCrealf(y)) +
(cuCimagf(x) * cuCimagf(y)),
-(cuCrealf(x) * cuCimagf(y)) +
(cuCimagf(x) * cuCrealf(y)));
return prod;
}
// BN 2018 add complex conjugate multiply kernel
__global__ void ComplexConjugateMul(hipComplex *A, hipComplex *B, hipComplex *C)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
C[i] = cuCconjmulf(A[i], B[i]);
}
__global__ void FloatDiv(float *A, float *B, float *C)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
if (B[i] != 0) {
C[i] = A[i] / B[i];
}
else {
C[i] = 0;
}
}
__global__ void FloatMul(float *A, float *B, float *C)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
C[i] = A[i] * B[i];
}
__global__ void FloatDivByConstant(float *A, float constant)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
A[i]=A[i]/constant;
}
static hipfftResult createPlans(size_t, size_t, size_t, hipfftHandle *planR2C, hipfftHandle *planC2R, void **workArea, size_t *workSize);
static hipError_t numBlocksThreads(unsigned int N, dim3 *numBlocks, dim3 *threadsPerBlock);
static float floatMean(float *a, int N) {
float m = 0;
for(float *p = a; p < a+N; p++) {
m += *p;
}
return m / (float)N;
}
static float devFloatMean(float *a_dev, int N) {
float *a = (float*)malloc(N*sizeof(float));
hipMemcpy(a, a_dev, N*sizeof(float), hipMemcpyDeviceToHost);
float m = floatMean(a, N);
free(a);
return m;
}
int deconv_device(unsigned int iter, size_t N1, size_t N2, size_t N3,
float *h_image, float *h_psf, float *h_object, float *h_normal) {
int retval = 0;
hipfftResult r;
hipError_t err;
hipfftHandle planR2C, planC2R;
std::cout<<"Arrived in Cuda deconvolution\n";
printf("input size: %d %d %d, N1, N2, N3");
float *image = 0; // convolved image (constant)
float *object = 0; // estimated object
float *psf=0;
float*temp=0;
float*normal = 0;
hipComplex *otf = 0; // Fourier transform of PSF (constant)
void *buf = 0; // intermediate results
void *workArea = 0; // cuFFT work area
size_t nSpatial = N1*N2*N3; // number of values in spatial domain
size_t nFreq = N1*N2*(N3/2+1); // number of values in frequency domain
//size_t nFreq = N1*(N2/2+1); // number of values in frequency domain
size_t mSpatial, mFreq;
dim3 freqThreadsPerBlock, spatialThreadsPerBlock, freqBlocks, spatialBlocks;
size_t workSize; // size of cuFFT work area in bytes
err = numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock);
if(err) goto cudaErr;
err = numBlocksThreads(nFreq, &freqBlocks, &freqThreadsPerBlock);
if(err) goto cudaErr;
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
mFreq = freqBlocks.x * freqBlocks.y * freqBlocks.z * freqThreadsPerBlock.x * sizeof(hipComplex);
printf("N: %ld, M: %ld\n", nSpatial, mSpatial);
printf("Blocks: %d x %d x %d, Threads: %d x %d x %d\n", spatialBlocks.x, spatialBlocks.y, spatialBlocks.z, spatialThreadsPerBlock.x, spatialThreadsPerBlock.y, spatialThreadsPerBlock.z);
fflush(stdin);
std::cout<<"N: "<<nSpatial<<" M: "<<mSpatial<<"\n"<<std::flush;
std::cout<<"Blocks: "<<spatialBlocks.x<<" x "<<spatialBlocks.y<<" x "<<spatialBlocks.z<<", Threads: "<<spatialThreadsPerBlock.x<<" x "<<spatialThreadsPerBlock.y<<" x "<<spatialThreadsPerBlock.z<<"\n";
hipDeviceReset();
hipProfilerStart();
err = hipMalloc(&image, mSpatial);
if(err) goto cudaErr;
err = hipMalloc(&object, mSpatial);
if(err) goto cudaErr;
err = hipMalloc(&psf, mSpatial);
if(err) goto cudaErr;
//err = hipMalloc(&temp, mSpatial);
//if(err) goto cudaErr;
if (h_normal!=NULL) {
err = hipMalloc(&normal, mSpatial);
if (err) goto cudaErr;
}
else {
normal = NULL;
}
err = hipMalloc(&otf, mFreq);
if(err) goto cudaErr;
err = hipMalloc(&buf, mFreq); // mFreq > mSpatial
if(err) goto cudaErr;
err = hipMemset(image, 0, mSpatial);
if(err) goto cudaErr;
err = hipMemset(object, 0, mSpatial);
if(err) goto cudaErr;
if (h_normal != NULL) {
err = hipMemset(normal, 0, mSpatial);
if (err) goto cudaErr;
}
printf("Memory allocated.\n");
err = hipMemcpy(image, h_image, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
printf("Image transferred.\n");
err = hipMemcpy(object, h_object, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
printf("Object transferred.\n");
err = hipMemcpy(psf, h_psf, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
printf("PSF transferred.\n");
if (h_normal != NULL) {
err = hipMemcpy(normal, h_normal, nSpatial * sizeof(float), hipMemcpyHostToDevice);
if (err) goto cudaErr;
printf("Normal transferred.\n");
}
// BN it looks like this function was originall written for the array organization used in matlab. I Changed the order of the dimensions
// to be compatible with imglib2 (java). TODO - add param for array organization
r = createPlans(N1, N2, N3, &planR2C, &planC2R, &workArea, &workSize);
if(r) goto cufftError;
printf("Plans created.\n");
r = hipfftExecR2C(planR2C, psf, otf);
if(r) goto cufftError;
// since we don't the psf anymore (we just used it to get the OTF) use the psf buffer
// as the temp buffer
temp = psf;
for(unsigned int i=0; i < iter; i++) {
printf("Iteration %d!!!\n", i);
// BN flush the buffer for debugging in Java.
fflush(stdout);
std::cout<<"Iteration "<<i<<"\n"<<std::flush;
r = hipfftExecR2C(planR2C, object, (hipfftComplex*)buf);
if(r) goto cufftError;
hipLaunchKernelGGL(( ComplexMul), dim3(freqBlocks), dim3(freqThreadsPerBlock), 0, 0, (hipComplex*)buf, otf, (hipComplex*)buf);
r = hipfftExecC2R(planC2R, (hipfftComplex*)buf, (float*)temp);
if(r) goto cufftError;
hipLaunchKernelGGL(( FloatDivByConstant), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, 0, (float*)temp,(float)nSpatial);
hipLaunchKernelGGL(( FloatDiv), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, 0, image, (float*)temp, (float*)temp);
r = hipfftExecR2C(planR2C, (float*)temp, (hipfftComplex*)buf);
if(r) goto cufftError;
// BN 2018 Changed to complex conjugate multiply
hipLaunchKernelGGL(( ComplexConjugateMul), dim3(freqBlocks), dim3(freqThreadsPerBlock), 0, 0, (hipComplex*)buf, otf, (hipComplex*)buf);
r = hipfftExecC2R(planC2R, (hipfftComplex*)buf, (float*)temp);
if(r) goto cufftError;
hipLaunchKernelGGL(( FloatDivByConstant), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, 0, (float*)temp,(float)nSpatial);
hipLaunchKernelGGL(( FloatMul), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, 0, (float*)temp, object, object);
if (normal != NULL) {
std::cout << "Divide by normal " << i << "\n" << std::flush;
hipLaunchKernelGGL(( FloatDiv), dim3(spatialBlocks), dim3(spatialThreadsPerBlock) , 0, 0, (float*)object, normal, object);
}
}
err = hipMemcpy(h_object, object, nSpatial*sizeof(float), hipMemcpyDeviceToHost);
if(err) goto cudaErr;
retval = 0;
goto cleanup;
cudaErr:
fprintf(stderr, "CUDA error: %d\n", err);
std::cout << "CUDA error: " << err << std::endl;
retval = err;
goto cleanup;
cufftError:
fprintf(stderr, "CuFFT error IS: %d\n", r);
std::cout << "CuFFT error is: " << r << std::endl;
retval = r;
goto cleanup;
cleanup:
if(image) hipFree(image);
if(object) hipFree(object);
if(otf) hipFree(otf);
if(buf) hipFree(buf);
if(workArea) hipFree(workArea);
hipProfilerStop();
hipDeviceReset();
return retval;
}
extern "C" int deconv_host(unsigned int iter, size_t N1, size_t N2, size_t N3,
float *h_image, float *h_psf, float *h_object, float *h_normal) {
int retval = 0;
hipfftResult r;
hipError_t err;
hipfftHandle planR2C, planC2R;
float *image = 0; // convolved image (constant)
float *object = 0; // estimated object
hipComplex *otf = 0; // Fourier transform of PSF (constant)
void *buf = 0; // intermediate results
void *workArea = 0; // cuFFT work area
hipComplex *h_otf = 0;
void *h_buf = 0;
float *h_image_pad = 0;
float *h_object_pad = 0;
size_t nSpatial = N1*N2*N3; // number of values in spatial domain
size_t nFreq = N1*N2*(N3/2+1); // number of values in frequency domain
//size_t nFreq = N1*(N2/2+1); // number of values in frequency domain
size_t mSpatial, mFreq;
dim3 freqThreadsPerBlock, spatialThreadsPerBlock, freqBlocks, spatialBlocks;
size_t workSize; // size of cuFFT work area in bytes
err = numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock);
if(err) goto cudaErr;
err = numBlocksThreads(nFreq, &freqBlocks, &freqThreadsPerBlock);
if(err) goto cudaErr;
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
mFreq = freqBlocks.x * freqBlocks.y * freqBlocks.z * freqThreadsPerBlock.x * sizeof(hipComplex);
printf("N: %ld, M: %ld\n", nSpatial, mSpatial);
printf("Blocks: %d x %d x %d, Threads: %d x %d x %d\n", spatialBlocks.x, spatialBlocks.y, spatialBlocks.z, spatialThreadsPerBlock.x, spatialThreadsPerBlock.y, spatialThreadsPerBlock.z);
hipDeviceReset();
err = hipSetDeviceFlags(hipDeviceMapHost);
printf("Set Device Flags: %d\n", err);
hipProfilerStart();
err = hipHostMalloc(&h_otf, mFreq, hipHostMallocMapped | hipHostMallocWriteCombined);
if(err) goto cudaErr;
err = hipHostMalloc(&h_buf, mFreq, hipHostMallocMapped | hipHostMallocWriteCombined);
if(err) goto cudaErr;
printf("Host memory allocated.\n");
if(mSpatial > nSpatial*sizeof(float)) {
err = hipHostMalloc(&h_image_pad, mSpatial, hipHostMallocMapped | hipHostMallocWriteCombined);
if(err) goto cudaErr;
err = hipHostMalloc(&h_object_pad, mSpatial, hipHostMallocMapped | hipHostMallocWriteCombined);
if(err) goto cudaErr;
err = hipHostGetDevicePointer(&image, h_image_pad, 0);
if(err) goto cudaErr;
err = hipHostGetDevicePointer(&object, h_object_pad, 0);
if(err) goto cudaErr;
err = hipMemcpy(image, h_image, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
err = hipMemcpy(object, h_object, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
} else {
err = hipHostRegister(h_image, mSpatial, hipHostRegisterMapped);
if(err) goto cudaErr;
err = hipHostRegister(h_object, mSpatial, hipHostRegisterMapped);
if(err) goto cudaErr;
err = hipHostGetDevicePointer(&image, h_image, 0);
if(err) goto cudaErr;
err = hipHostGetDevicePointer(&object, h_object, 0);
if(err) goto cudaErr;
}
err = hipHostGetDevicePointer(&otf, h_otf, 0);
if(err) goto cudaErr;
err = hipHostGetDevicePointer(&buf, h_buf, 0);
if(err) goto cudaErr;
printf("Host pointers registered.\n");
err = hipMemcpy(otf, h_psf, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
printf("PSF transferred.\n");
r = createPlans(N1, N2, N3, &planR2C, &planC2R, &workArea, &workSize);
if(r) goto cufftError;
printf("Plans created.\n");
r = hipfftExecR2C(planR2C, (float*)otf, otf);
if(r) goto cufftError;
for(unsigned int i=0; i < iter; i++) {
printf("Iteration %d\n", i);
r = hipfftExecR2C(planR2C, object, (hipfftComplex*)buf);
if(r) goto cufftError;
hipLaunchKernelGGL(( ComplexMul), dim3(freqBlocks), dim3(freqThreadsPerBlock), 0, 0, (hipComplex*)buf, otf, (hipComplex*)buf);
r = hipfftExecC2R(planC2R, (hipfftComplex*)buf, (float*)buf);
if(r) goto cufftError;
printf("a: m = %f\n", devFloatMean((float*)buf, nSpatial));
hipLaunchKernelGGL(( FloatDiv), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, 0, image, (float*)buf, (float*)buf);
r = hipfftExecR2C(planR2C, (float*)buf, (hipfftComplex*)buf);
if(r) goto cufftError;
hipLaunchKernelGGL(( ComplexMul), dim3(freqBlocks), dim3(freqThreadsPerBlock), 0, 0, (hipComplex*)buf, otf, (hipComplex*)buf);
r = hipfftExecC2R(planC2R, (hipfftComplex*)buf, (float*)buf);
if(r) goto cufftError;
hipLaunchKernelGGL(( FloatMul), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, 0, (float*)buf, object, object);
}
printf("object: m = %f\n", devFloatMean((float*)object, nSpatial));
err = hipMemcpy(h_object, object, nSpatial*sizeof(float), hipMemcpyDeviceToHost);
if(err) goto cudaErr;
retval = 0;
goto cleanup;
cudaErr:
fprintf(stderr, "CUDA error: %d\n", err);
retval = err;
goto cleanup;
cufftError:
fprintf(stderr, "CuFFT error: %d\n", r);
retval = r;
goto cleanup;
cleanup:
printf("h_image: %p, h_object: %p, h_psf: %p, h_buf: %p, h_otf: %p\n", h_image, h_object, h_psf, h_buf, h_otf);
if(image) {
if(h_image_pad) {
hipHostUnregister(h_image_pad);
hipHostFree(h_image_pad);
} else {
hipHostUnregister(h_image);
}
}
if(object) {
if(h_object_pad) {
hipHostUnregister(h_object_pad);
hipHostFree(h_object_pad);
} else {
hipHostUnregister(h_object);
}
}
if(otf) {
hipHostUnregister(h_otf);
hipHostFree(h_otf);
}
if(buf) {
hipHostUnregister(h_buf);
hipHostFree(h_buf);
}
if(workArea) hipFree(workArea);
hipProfilerStop();
hipDeviceReset();
return retval;
}
int deconv_stream(unsigned int iter, size_t N1, size_t N2, size_t N3,
float *h_image, float *h_psf, float *h_object, float *h_normal) {
int retval = 0;
hipfftResult r;
hipError_t err;
hipfftHandle planR2C, planC2R;
hipStream_t fftStream = 0, memStream = 0;
void *result = 0; // estimated object
void *buf = 0; // intermediate results
void *workArea = 0; // cuFFT work area
hipComplex *h_otf = 0;
size_t nSpatial = N1*N2*N3; // number of values in spatial domain
size_t nFreq = N1*N2*(N3/2+1); // number of values in frequency domain
//size_t nFreq = N1*(N2/2+1); // number of values in frequency domain
size_t mSpatial, mFreq;
size_t workSize; // size of cuFFT work area in bytes
dim3 freqThreadsPerBlock, spatialThreadsPerBlock, freqBlocks, spatialBlocks;
err = numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock);
if(err) goto cudaErr;
err = numBlocksThreads(nFreq, &freqBlocks, &freqThreadsPerBlock);
if(err) goto cudaErr;
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
mFreq = freqBlocks.x * freqBlocks.y * freqBlocks.z * freqThreadsPerBlock.x * sizeof(hipComplex);
printf("N: %ld, M: %ld\n", nSpatial, mSpatial);
printf("Blocks: %d x %d x %d, Threads: %d x %d x %d\n", spatialBlocks.x, spatialBlocks.y, spatialBlocks.z, spatialThreadsPerBlock.x, spatialThreadsPerBlock.y, spatialThreadsPerBlock.z);
hipDeviceReset();
hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
err = hipStreamCreate(&fftStream);
if(err) goto cudaErr;
err = hipStreamCreate(&memStream);
if(err) goto cudaErr;
#if 0
err = hipEventCreateWithFlags(&memSync, hipEventDisableTiming);
if(err) goto cudaErr;
#endif
hipProfilerStart();
err = hipMalloc(&result, mFreq);
if(err) goto cudaErr;
err = hipMalloc(&buf, mFreq); // mFreq > mSpatial
if(err) goto cudaErr;
h_otf = (hipComplex*)malloc(nFreq*sizeof(hipComplex));
printf("Memory allocated.\n");
err = hipHostRegister(h_image, nSpatial*sizeof(float), 0);
if(err) goto cudaErr;
err = hipHostRegister(h_object, nSpatial*sizeof(float), 0);
if(err) goto cudaErr;
err = hipHostRegister(h_otf, nFreq*sizeof(hipComplex), 0);
if(err) goto cudaErr;
r = createPlans(N1, N2, N3, &planR2C, &planC2R, &workArea, &workSize);
if(r) goto cufftError;
r = hipfftSetStream(planR2C, fftStream);
if(r) goto cufftError;
r = hipfftSetStream(planC2R, fftStream);
if(r) goto cufftError;
printf("Plans created.\n");
err = hipMemcpyAsync(buf, h_psf, nSpatial*sizeof(float), hipMemcpyHostToDevice, fftStream);
if(err) goto cudaErr;
r = hipfftExecR2C(planR2C, (float*)buf, (hipComplex*)buf);
if(r) goto cufftError;
err = hipMemcpyAsync(h_otf, buf, nFreq*sizeof(hipComplex), hipMemcpyDeviceToHost, fftStream);
err = hipStreamSynchronize(fftStream);
if(err) goto cudaErr;
printf("OTF generated.\n");
err = hipMemcpyAsync(result, h_object, nSpatial*sizeof(float), hipMemcpyHostToDevice, fftStream);
if(err) goto cudaErr;
for(unsigned int i=0; i < iter; i++) {
printf("Iteration %d\n", i);
err = hipMemcpyAsync(buf, h_otf, nFreq*sizeof(hipComplex), hipMemcpyHostToDevice, memStream);
if(err) goto cudaErr;
r = hipfftExecR2C(planR2C, (float*)result, (hipComplex*)result);
if(r) goto cufftError;
hipDeviceSynchronize();
hipLaunchKernelGGL(( ComplexMul), dim3(freqBlocks), dim3(freqThreadsPerBlock), 0, fftStream, (hipComplex*)result, (hipComplex*)buf, (hipComplex*)result);
hipDeviceSynchronize();
err = hipMemcpyAsync(buf, h_image, nSpatial*sizeof(float), hipMemcpyHostToDevice, memStream);
if(err) goto cudaErr;
r = hipfftExecC2R(planC2R, (hipComplex*)result, (float*)result);
if(r) goto cufftError;
hipDeviceSynchronize();
hipLaunchKernelGGL(( FloatDiv), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, fftStream, (float*)buf, (float*)result, (float*)result);
hipDeviceSynchronize();
err = hipMemcpyAsync(buf, h_otf, nFreq*sizeof(hipComplex), hipMemcpyHostToDevice, memStream);
if(err) goto cudaErr;
r = hipfftExecR2C(planR2C, (float*)result, (hipComplex*)result);
if(r) goto cufftError;
hipDeviceSynchronize();
hipLaunchKernelGGL(( ComplexMul), dim3(freqBlocks), dim3(freqThreadsPerBlock), 0, fftStream, (hipComplex*)result, (hipComplex*)buf, (hipComplex*)result);
hipDeviceSynchronize();
err = hipMemcpyAsync(buf, h_object, nSpatial*sizeof(float), hipMemcpyHostToDevice, memStream);
if(err) goto cudaErr;
r = hipfftExecC2R(planC2R, (hipComplex*)result, (float*)result);
if(r) goto cufftError;
hipDeviceSynchronize();
hipLaunchKernelGGL(( FloatMul), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, fftStream, (float*)buf, (float*)result, (float*)result);
hipDeviceSynchronize();
err = hipMemcpyAsync(h_object, result, nSpatial*sizeof(float), hipMemcpyDeviceToHost, fftStream);
if(err) goto cudaErr;
}
hipDeviceSynchronize();
retval = 0;
goto cleanup;
cudaErr:
fprintf(stderr, "CUDA error: %d\n", err);
retval = err;
goto cleanup;
cufftError:
fprintf(stderr, "CuFFT error: %d\n", r);
retval = r;
goto cleanup;
cleanup:
if(fftStream) hipStreamDestroy(fftStream);
if(memStream) hipStreamDestroy(memStream);
if(result) hipFree(result);
if(buf) hipFree(buf);
if(workArea) hipFree(workArea);
if(h_otf) {
hipHostUnregister(h_otf);
free(h_otf);
}
hipHostUnregister(h_image);
hipHostUnregister(h_object);
hipProfilerStop();
hipDeviceReset();
return retval;
}
hipfftResult createPlans(size_t N1, size_t N2, size_t N3, hipfftHandle *planR2C, hipfftHandle *planC2R, void **workArea, size_t *workSize) {
hipfftResult r;
r = hipfftCreate(planR2C);
if(r) return r;
// r = cufftSetCompatibilityMode(*planR2C, CUFFT_COMPATIBILITY_FFT_PADDING);
// if(r) return r;
r = hipfftSetAutoAllocation(*planR2C, 0);
if(r) return r;
r = hipfftCreate(planC2R);
if(r) return r;
// r = cufftSetCompatibilityMode(*planC2R, CUFFT_COMPATIBILITY_FFT_PADDING);
// if(r) return r;
r = hipfftSetAutoAllocation(*planC2R, 0);
if(r) return r;
size_t tmp;
r = hipfftGetSize3d(*planR2C, N1, N2, N3, HIPFFT_R2C, workSize);
//r = hipfftGetSize2d(*planR2C, N1, N2, HIPFFT_R2C, workSize);
if(r) return r;
r = hipfftGetSize3d(*planC2R, N1, N2, N3, HIPFFT_R2C, &tmp);
//r = hipfftGetSize2d(*planC2R, N1, N2, HIPFFT_R2C, &tmp);
if(r) return r;
if(tmp > *workSize)
*workSize = tmp;
hipError_t err = hipMalloc(workArea, *workSize);
if(err) return HIPFFT_ALLOC_FAILED;
r = hipfftSetWorkArea(*planR2C, *workArea);
if(r) goto error;
r = hipfftMakePlan3d(*planR2C, N1, N2, N3, HIPFFT_R2C, &tmp);
//r = hipfftMakePlan2d(*planR2C, N1, N2, HIPFFT_R2C, &tmp);
if(r) goto error;
r = hipfftSetWorkArea(*planC2R, *workArea);
if(r) goto error;
r = hipfftMakePlan3d(*planC2R, N1, N2, N3, HIPFFT_C2R, &tmp);
//r = hipfftMakePlan2d(*planC2R, N1, N2, HIPFFT_C2R, &tmp);
if(r) goto error;
return HIPFFT_SUCCESS;
error:
hipFree(*workArea);
return r;
}
static hipError_t numBlocksThreads(unsigned int N, dim3 *numBlocks, dim3 *threadsPerBlock) {
unsigned int BLOCKSIZE = 128;
int Nx, Ny, Nz;
int device;
hipError_t err;
if(N < BLOCKSIZE) {
numBlocks->x = 1;
numBlocks->y = 1;
numBlocks->z = 1;
threadsPerBlock->x = N;
threadsPerBlock->y = 1;
threadsPerBlock->z = 1;
return hipSuccess;
}
threadsPerBlock->x = BLOCKSIZE;
threadsPerBlock->y = 1;
threadsPerBlock->z = 1;
err = hipGetDevice(&device);
if(err) return err;
err = hipDeviceGetAttribute(&Nx, hipDeviceAttributeMaxBlockDimX, device);
if(err) return err;
err = hipDeviceGetAttribute(&Ny, hipDeviceAttributeMaxBlockDimY, device);
if(err) return err;
err = hipDeviceGetAttribute(&Nz, hipDeviceAttributeMaxBlockDimZ, device);
if(err) return err;
printf("Nx: %d, Ny: %d, Nz: %d\n", Nx, Ny, Nz);
unsigned int n = (N-1) / BLOCKSIZE + 1;
unsigned int x = (n-1) / (Ny*Nz) + 1;
unsigned int y = (n-1) / (x*Nz) + 1;
unsigned int z = (n-1) / (x*y) + 1;
if(x > Nx || y > Ny || z > Nz) {
return hipErrorInvalidConfiguration;
}
numBlocks->x = x;
numBlocks->y = y;
numBlocks->z = z;
return hipSuccess;
}
int conv_device(size_t N1, size_t N2, size_t N3,
float *h_image, float *h_psf, float *h_out, unsigned int correlate) {
int retval = 0;
hipfftResult r;
hipError_t err;
hipfftHandle planR2C, planC2R;
std::cout<<"Arrived in Cuda convolution\n";
printf("input size: %d %d %d, N1, N2, N3");
float *image = 0; // convolved image (constant)
float *psf=0;
float *out = 0; // estimated object
hipComplex *otf = 0; // Fourier transform of PSF (constant)
void *buf = 0; // intermediate results
void *workArea = 0; // cuFFT work area
size_t nSpatial = N1*N2*N3; // number of values in spatial domain
size_t nFreq = N1*N2*(N3/2+1); // number of values in frequency domain
//size_t nFreq = N1*(N2/2+1); // number of values in frequency domain
size_t mSpatial, mFreq;
dim3 freqThreadsPerBlock, spatialThreadsPerBlock, freqBlocks, spatialBlocks;
size_t workSize; // size of cuFFT work area in bytes
err = numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock);
if(err) goto cudaErr;
err = numBlocksThreads(nFreq, &freqBlocks, &freqThreadsPerBlock);
if(err) goto cudaErr;
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
mFreq = freqBlocks.x * freqBlocks.y * freqBlocks.z * freqThreadsPerBlock.x * sizeof(hipComplex);
printf("N: %ld, M: %ld\n", nSpatial, mSpatial);
printf("Blocks: %d x %d x %d, Threads: %d x %d x %d\n", spatialBlocks.x, spatialBlocks.y, spatialBlocks.z, spatialThreadsPerBlock.x, spatialThreadsPerBlock.y, spatialThreadsPerBlock.z);
fflush(stdin);
std::cout<<"N: "<<nSpatial<<" M: "<<mSpatial<<"\n"<<std::flush;
std::cout<<"Blocks: "<<spatialBlocks.x<<" x "<<spatialBlocks.y<<" x "<<spatialBlocks.z<<", Threads: "<<spatialThreadsPerBlock.x<<" x "<<spatialThreadsPerBlock.y<<" x "<<spatialThreadsPerBlock.z<<"\n";
hipDeviceReset();
hipProfilerStart();
err = hipMalloc(&image, mSpatial);
if(err) goto cudaErr;
err = hipMalloc(&out, mSpatial);
if(err) goto cudaErr;
err = hipMalloc(&psf, mSpatial);
if(err) goto cudaErr;
err = hipMalloc(&buf, mFreq); // mFreq > mSpatial
if(err) goto cudaErr;
err = hipMalloc(&otf, mFreq); // mFreq > mSpatial
if(err) goto cudaErr;
err = hipMemset(image, 0, mSpatial);
if(err) goto cudaErr;
err = hipMemset(out, 0, mSpatial);
if(err) goto cudaErr;
printf("Memory allocated.\n");
err = hipMemcpy(image, h_image, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
printf("Image transferred.\n");
err = hipMemcpy(out, h_out, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
printf("Object transferred.\n");
err = hipMemcpy(psf, h_psf, nSpatial*sizeof(float), hipMemcpyHostToDevice);
if(err) goto cudaErr;
printf("PSF transferred.\n");
// BN it looks like this function was originall written for the array organization used in matlab. I Changed the order of the dimensions
// to be compatible with imglib2 (java). TODO - add param for array organization
r = createPlans(N1, N2, N3, &planR2C, &planC2R, &workArea, &workSize);
if(r) goto cufftError;
printf("Plans created.\n");
r = hipfftExecR2C(planR2C, psf, otf);
if(r) goto cufftError;
printf("Convolving!!\n");
// BN flush the buffer for debugging in Java.
fflush(stdout);
r = hipfftExecR2C(planR2C, image, (hipfftComplex*)buf);
if(r) goto cufftError;
if (correlate==1) {
hipLaunchKernelGGL(( ComplexConjugateMul), dim3(freqBlocks), dim3(freqThreadsPerBlock), 0, 0, (hipComplex*)buf, otf, (hipComplex*)buf);
}
else {
hipLaunchKernelGGL(( ComplexMul), dim3(freqBlocks), dim3(freqThreadsPerBlock), 0, 0, (hipComplex*)buf, otf, (hipComplex*)buf);
}
r = hipfftExecC2R(planC2R, (hipfftComplex*)buf, (float*)out);
if(r) goto cufftError;
hipLaunchKernelGGL(( FloatDivByConstant), dim3(spatialBlocks), dim3(spatialThreadsPerBlock), 0, 0, (float*)out,(float)nSpatial);
err = hipMemcpy(h_out, out, nSpatial*sizeof(float), hipMemcpyDeviceToHost);
retval = 0;
goto cleanup;
cudaErr:
fprintf(stderr, "CUDA error: %d\n", err);
retval = err;
goto cleanup;
cufftError:
fprintf(stderr, "CuFFT error: %d\n", r);
retval = r;
goto cleanup;
cleanup:
if(image) hipFree(image);
if(out) hipFree(out);
if(otf) hipFree(otf);
if(buf) hipFree(buf);
if(workArea) hipFree(workArea);
hipProfilerStop();
hipDeviceReset();
return retval;
}
| d3eb6d2e6146b89530585e1127d98d04631c83b0.cu | /*
deconv.cu
Author: Bob Pepin - (originally obtained from https://github.com/bobpepin/YacuDecu)
Author: Brian Northan - very minor changes to dimension order of FFT plan in deconv_device function in order for this function to work on arrays from imglib2.
License: LGPL
*/
#include <stdio.h>
#include <iostream>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#include <cufft.h>
#include <cublas.h>
#include <cuComplex.h>
#include "deconv.h"
__global__ void ComplexMul(cuComplex *A, cuComplex *B, cuComplex *C)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
C[i] = cuCmulf(A[i], B[i]);
}
// BN 2018 add complex conjugate multiply
__host__ __device__ static __inline__ cuFloatComplex cuCconjmulf(cuFloatComplex x,
cuFloatComplex y)
{
cuFloatComplex prod;
prod = make_cuFloatComplex((cuCrealf(x) * cuCrealf(y)) +
(cuCimagf(x) * cuCimagf(y)),
-(cuCrealf(x) * cuCimagf(y)) +
(cuCimagf(x) * cuCrealf(y)));
return prod;
}
// BN 2018 add complex conjugate multiply kernel
__global__ void ComplexConjugateMul(cuComplex *A, cuComplex *B, cuComplex *C)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
C[i] = cuCconjmulf(A[i], B[i]);
}
__global__ void FloatDiv(float *A, float *B, float *C)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
if (B[i] != 0) {
C[i] = A[i] / B[i];
}
else {
C[i] = 0;
}
}
__global__ void FloatMul(float *A, float *B, float *C)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
C[i] = A[i] * B[i];
}
__global__ void FloatDivByConstant(float *A, float constant)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
A[i]=A[i]/constant;
}
static cufftResult createPlans(size_t, size_t, size_t, cufftHandle *planR2C, cufftHandle *planC2R, void **workArea, size_t *workSize);
static cudaError_t numBlocksThreads(unsigned int N, dim3 *numBlocks, dim3 *threadsPerBlock);
static float floatMean(float *a, int N) {
float m = 0;
for(float *p = a; p < a+N; p++) {
m += *p;
}
return m / (float)N;
}
static float devFloatMean(float *a_dev, int N) {
float *a = (float*)malloc(N*sizeof(float));
cudaMemcpy(a, a_dev, N*sizeof(float), cudaMemcpyDeviceToHost);
float m = floatMean(a, N);
free(a);
return m;
}
int deconv_device(unsigned int iter, size_t N1, size_t N2, size_t N3,
float *h_image, float *h_psf, float *h_object, float *h_normal) {
int retval = 0;
cufftResult r;
cudaError_t err;
cufftHandle planR2C, planC2R;
std::cout<<"Arrived in Cuda deconvolution\n";
printf("input size: %d %d %d, N1, N2, N3");
float *image = 0; // convolved image (constant)
float *object = 0; // estimated object
float *psf=0;
float*temp=0;
float*normal = 0;
cuComplex *otf = 0; // Fourier transform of PSF (constant)
void *buf = 0; // intermediate results
void *workArea = 0; // cuFFT work area
size_t nSpatial = N1*N2*N3; // number of values in spatial domain
size_t nFreq = N1*N2*(N3/2+1); // number of values in frequency domain
//size_t nFreq = N1*(N2/2+1); // number of values in frequency domain
size_t mSpatial, mFreq;
dim3 freqThreadsPerBlock, spatialThreadsPerBlock, freqBlocks, spatialBlocks;
size_t workSize; // size of cuFFT work area in bytes
err = numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock);
if(err) goto cudaErr;
err = numBlocksThreads(nFreq, &freqBlocks, &freqThreadsPerBlock);
if(err) goto cudaErr;
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
mFreq = freqBlocks.x * freqBlocks.y * freqBlocks.z * freqThreadsPerBlock.x * sizeof(cuComplex);
printf("N: %ld, M: %ld\n", nSpatial, mSpatial);
printf("Blocks: %d x %d x %d, Threads: %d x %d x %d\n", spatialBlocks.x, spatialBlocks.y, spatialBlocks.z, spatialThreadsPerBlock.x, spatialThreadsPerBlock.y, spatialThreadsPerBlock.z);
fflush(stdin);
std::cout<<"N: "<<nSpatial<<" M: "<<mSpatial<<"\n"<<std::flush;
std::cout<<"Blocks: "<<spatialBlocks.x<<" x "<<spatialBlocks.y<<" x "<<spatialBlocks.z<<", Threads: "<<spatialThreadsPerBlock.x<<" x "<<spatialThreadsPerBlock.y<<" x "<<spatialThreadsPerBlock.z<<"\n";
cudaDeviceReset();
cudaProfilerStart();
err = cudaMalloc(&image, mSpatial);
if(err) goto cudaErr;
err = cudaMalloc(&object, mSpatial);
if(err) goto cudaErr;
err = cudaMalloc(&psf, mSpatial);
if(err) goto cudaErr;
//err = cudaMalloc(&temp, mSpatial);
//if(err) goto cudaErr;
if (h_normal!=NULL) {
err = cudaMalloc(&normal, mSpatial);
if (err) goto cudaErr;
}
else {
normal = NULL;
}
err = cudaMalloc(&otf, mFreq);
if(err) goto cudaErr;
err = cudaMalloc(&buf, mFreq); // mFreq > mSpatial
if(err) goto cudaErr;
err = cudaMemset(image, 0, mSpatial);
if(err) goto cudaErr;
err = cudaMemset(object, 0, mSpatial);
if(err) goto cudaErr;
if (h_normal != NULL) {
err = cudaMemset(normal, 0, mSpatial);
if (err) goto cudaErr;
}
printf("Memory allocated.\n");
err = cudaMemcpy(image, h_image, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
printf("Image transferred.\n");
err = cudaMemcpy(object, h_object, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
printf("Object transferred.\n");
err = cudaMemcpy(psf, h_psf, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
printf("PSF transferred.\n");
if (h_normal != NULL) {
err = cudaMemcpy(normal, h_normal, nSpatial * sizeof(float), cudaMemcpyHostToDevice);
if (err) goto cudaErr;
printf("Normal transferred.\n");
}
// BN it looks like this function was originall written for the array organization used in matlab. I Changed the order of the dimensions
// to be compatible with imglib2 (java). TODO - add param for array organization
r = createPlans(N1, N2, N3, &planR2C, &planC2R, &workArea, &workSize);
if(r) goto cufftError;
printf("Plans created.\n");
r = cufftExecR2C(planR2C, psf, otf);
if(r) goto cufftError;
// since we don't the psf anymore (we just used it to get the OTF) use the psf buffer
// as the temp buffer
temp = psf;
for(unsigned int i=0; i < iter; i++) {
printf("Iteration %d!!!\n", i);
// BN flush the buffer for debugging in Java.
fflush(stdout);
std::cout<<"Iteration "<<i<<"\n"<<std::flush;
r = cufftExecR2C(planR2C, object, (cufftComplex*)buf);
if(r) goto cufftError;
ComplexMul<<<freqBlocks, freqThreadsPerBlock>>>((cuComplex*)buf, otf, (cuComplex*)buf);
r = cufftExecC2R(planC2R, (cufftComplex*)buf, (float*)temp);
if(r) goto cufftError;
FloatDivByConstant<<<spatialBlocks, spatialThreadsPerBlock>>>((float*)temp,(float)nSpatial);
FloatDiv<<<spatialBlocks, spatialThreadsPerBlock>>>(image, (float*)temp, (float*)temp);
r = cufftExecR2C(planR2C, (float*)temp, (cufftComplex*)buf);
if(r) goto cufftError;
// BN 2018 Changed to complex conjugate multiply
ComplexConjugateMul<<<freqBlocks, freqThreadsPerBlock>>>((cuComplex*)buf, otf, (cuComplex*)buf);
r = cufftExecC2R(planC2R, (cufftComplex*)buf, (float*)temp);
if(r) goto cufftError;
FloatDivByConstant<<<spatialBlocks, spatialThreadsPerBlock>>>((float*)temp,(float)nSpatial);
FloatMul<<<spatialBlocks, spatialThreadsPerBlock>>>((float*)temp, object, object);
if (normal != NULL) {
std::cout << "Divide by normal " << i << "\n" << std::flush;
FloatDiv<<<spatialBlocks, spatialThreadsPerBlock >>>((float*)object, normal, object);
}
}
err = cudaMemcpy(h_object, object, nSpatial*sizeof(float), cudaMemcpyDeviceToHost);
if(err) goto cudaErr;
retval = 0;
goto cleanup;
cudaErr:
fprintf(stderr, "CUDA error: %d\n", err);
std::cout << "CUDA error: " << err << std::endl;
retval = err;
goto cleanup;
cufftError:
fprintf(stderr, "CuFFT error IS: %d\n", r);
std::cout << "CuFFT error is: " << r << std::endl;
retval = r;
goto cleanup;
cleanup:
if(image) cudaFree(image);
if(object) cudaFree(object);
if(otf) cudaFree(otf);
if(buf) cudaFree(buf);
if(workArea) cudaFree(workArea);
cudaProfilerStop();
cudaDeviceReset();
return retval;
}
extern "C" int deconv_host(unsigned int iter, size_t N1, size_t N2, size_t N3,
float *h_image, float *h_psf, float *h_object, float *h_normal) {
int retval = 0;
cufftResult r;
cudaError_t err;
cufftHandle planR2C, planC2R;
float *image = 0; // convolved image (constant)
float *object = 0; // estimated object
cuComplex *otf = 0; // Fourier transform of PSF (constant)
void *buf = 0; // intermediate results
void *workArea = 0; // cuFFT work area
cuComplex *h_otf = 0;
void *h_buf = 0;
float *h_image_pad = 0;
float *h_object_pad = 0;
size_t nSpatial = N1*N2*N3; // number of values in spatial domain
size_t nFreq = N1*N2*(N3/2+1); // number of values in frequency domain
//size_t nFreq = N1*(N2/2+1); // number of values in frequency domain
size_t mSpatial, mFreq;
dim3 freqThreadsPerBlock, spatialThreadsPerBlock, freqBlocks, spatialBlocks;
size_t workSize; // size of cuFFT work area in bytes
err = numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock);
if(err) goto cudaErr;
err = numBlocksThreads(nFreq, &freqBlocks, &freqThreadsPerBlock);
if(err) goto cudaErr;
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
mFreq = freqBlocks.x * freqBlocks.y * freqBlocks.z * freqThreadsPerBlock.x * sizeof(cuComplex);
printf("N: %ld, M: %ld\n", nSpatial, mSpatial);
printf("Blocks: %d x %d x %d, Threads: %d x %d x %d\n", spatialBlocks.x, spatialBlocks.y, spatialBlocks.z, spatialThreadsPerBlock.x, spatialThreadsPerBlock.y, spatialThreadsPerBlock.z);
cudaDeviceReset();
err = cudaSetDeviceFlags(cudaDeviceMapHost);
printf("Set Device Flags: %d\n", err);
cudaProfilerStart();
err = cudaHostAlloc(&h_otf, mFreq, cudaHostAllocMapped | cudaHostAllocWriteCombined);
if(err) goto cudaErr;
err = cudaHostAlloc(&h_buf, mFreq, cudaHostAllocMapped | cudaHostAllocWriteCombined);
if(err) goto cudaErr;
printf("Host memory allocated.\n");
if(mSpatial > nSpatial*sizeof(float)) {
err = cudaHostAlloc(&h_image_pad, mSpatial, cudaHostAllocMapped | cudaHostAllocWriteCombined);
if(err) goto cudaErr;
err = cudaHostAlloc(&h_object_pad, mSpatial, cudaHostAllocMapped | cudaHostAllocWriteCombined);
if(err) goto cudaErr;
err = cudaHostGetDevicePointer(&image, h_image_pad, 0);
if(err) goto cudaErr;
err = cudaHostGetDevicePointer(&object, h_object_pad, 0);
if(err) goto cudaErr;
err = cudaMemcpy(image, h_image, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
err = cudaMemcpy(object, h_object, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
} else {
err = cudaHostRegister(h_image, mSpatial, cudaHostRegisterMapped);
if(err) goto cudaErr;
err = cudaHostRegister(h_object, mSpatial, cudaHostRegisterMapped);
if(err) goto cudaErr;
err = cudaHostGetDevicePointer(&image, h_image, 0);
if(err) goto cudaErr;
err = cudaHostGetDevicePointer(&object, h_object, 0);
if(err) goto cudaErr;
}
err = cudaHostGetDevicePointer(&otf, h_otf, 0);
if(err) goto cudaErr;
err = cudaHostGetDevicePointer(&buf, h_buf, 0);
if(err) goto cudaErr;
printf("Host pointers registered.\n");
err = cudaMemcpy(otf, h_psf, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
printf("PSF transferred.\n");
r = createPlans(N1, N2, N3, &planR2C, &planC2R, &workArea, &workSize);
if(r) goto cufftError;
printf("Plans created.\n");
r = cufftExecR2C(planR2C, (float*)otf, otf);
if(r) goto cufftError;
for(unsigned int i=0; i < iter; i++) {
printf("Iteration %d\n", i);
r = cufftExecR2C(planR2C, object, (cufftComplex*)buf);
if(r) goto cufftError;
ComplexMul<<<freqBlocks, freqThreadsPerBlock>>>((cuComplex*)buf, otf, (cuComplex*)buf);
r = cufftExecC2R(planC2R, (cufftComplex*)buf, (float*)buf);
if(r) goto cufftError;
printf("a: m = %f\n", devFloatMean((float*)buf, nSpatial));
FloatDiv<<<spatialBlocks, spatialThreadsPerBlock>>>(image, (float*)buf, (float*)buf);
r = cufftExecR2C(planR2C, (float*)buf, (cufftComplex*)buf);
if(r) goto cufftError;
ComplexMul<<<freqBlocks, freqThreadsPerBlock>>>((cuComplex*)buf, otf, (cuComplex*)buf);
r = cufftExecC2R(planC2R, (cufftComplex*)buf, (float*)buf);
if(r) goto cufftError;
FloatMul<<<spatialBlocks, spatialThreadsPerBlock>>>((float*)buf, object, object);
}
printf("object: m = %f\n", devFloatMean((float*)object, nSpatial));
err = cudaMemcpy(h_object, object, nSpatial*sizeof(float), cudaMemcpyDeviceToHost);
if(err) goto cudaErr;
retval = 0;
goto cleanup;
cudaErr:
fprintf(stderr, "CUDA error: %d\n", err);
retval = err;
goto cleanup;
cufftError:
fprintf(stderr, "CuFFT error: %d\n", r);
retval = r;
goto cleanup;
cleanup:
printf("h_image: %p, h_object: %p, h_psf: %p, h_buf: %p, h_otf: %p\n", h_image, h_object, h_psf, h_buf, h_otf);
if(image) {
if(h_image_pad) {
cudaHostUnregister(h_image_pad);
cudaFreeHost(h_image_pad);
} else {
cudaHostUnregister(h_image);
}
}
if(object) {
if(h_object_pad) {
cudaHostUnregister(h_object_pad);
cudaFreeHost(h_object_pad);
} else {
cudaHostUnregister(h_object);
}
}
if(otf) {
cudaHostUnregister(h_otf);
cudaFreeHost(h_otf);
}
if(buf) {
cudaHostUnregister(h_buf);
cudaFreeHost(h_buf);
}
if(workArea) cudaFree(workArea);
cudaProfilerStop();
cudaDeviceReset();
return retval;
}
int deconv_stream(unsigned int iter, size_t N1, size_t N2, size_t N3,
float *h_image, float *h_psf, float *h_object, float *h_normal) {
int retval = 0;
cufftResult r;
cudaError_t err;
cufftHandle planR2C, planC2R;
cudaStream_t fftStream = 0, memStream = 0;
void *result = 0; // estimated object
void *buf = 0; // intermediate results
void *workArea = 0; // cuFFT work area
cuComplex *h_otf = 0;
size_t nSpatial = N1*N2*N3; // number of values in spatial domain
size_t nFreq = N1*N2*(N3/2+1); // number of values in frequency domain
//size_t nFreq = N1*(N2/2+1); // number of values in frequency domain
size_t mSpatial, mFreq;
size_t workSize; // size of cuFFT work area in bytes
dim3 freqThreadsPerBlock, spatialThreadsPerBlock, freqBlocks, spatialBlocks;
err = numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock);
if(err) goto cudaErr;
err = numBlocksThreads(nFreq, &freqBlocks, &freqThreadsPerBlock);
if(err) goto cudaErr;
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
mFreq = freqBlocks.x * freqBlocks.y * freqBlocks.z * freqThreadsPerBlock.x * sizeof(cuComplex);
printf("N: %ld, M: %ld\n", nSpatial, mSpatial);
printf("Blocks: %d x %d x %d, Threads: %d x %d x %d\n", spatialBlocks.x, spatialBlocks.y, spatialBlocks.z, spatialThreadsPerBlock.x, spatialThreadsPerBlock.y, spatialThreadsPerBlock.z);
cudaDeviceReset();
cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
err = cudaStreamCreate(&fftStream);
if(err) goto cudaErr;
err = cudaStreamCreate(&memStream);
if(err) goto cudaErr;
#if 0
err = cudaEventCreateWithFlags(&memSync, cudaEventDisableTiming);
if(err) goto cudaErr;
#endif
cudaProfilerStart();
err = cudaMalloc(&result, mFreq);
if(err) goto cudaErr;
err = cudaMalloc(&buf, mFreq); // mFreq > mSpatial
if(err) goto cudaErr;
h_otf = (cuComplex*)malloc(nFreq*sizeof(cuComplex));
printf("Memory allocated.\n");
err = cudaHostRegister(h_image, nSpatial*sizeof(float), 0);
if(err) goto cudaErr;
err = cudaHostRegister(h_object, nSpatial*sizeof(float), 0);
if(err) goto cudaErr;
err = cudaHostRegister(h_otf, nFreq*sizeof(cuComplex), 0);
if(err) goto cudaErr;
r = createPlans(N1, N2, N3, &planR2C, &planC2R, &workArea, &workSize);
if(r) goto cufftError;
r = cufftSetStream(planR2C, fftStream);
if(r) goto cufftError;
r = cufftSetStream(planC2R, fftStream);
if(r) goto cufftError;
printf("Plans created.\n");
err = cudaMemcpyAsync(buf, h_psf, nSpatial*sizeof(float), cudaMemcpyHostToDevice, fftStream);
if(err) goto cudaErr;
r = cufftExecR2C(planR2C, (float*)buf, (cuComplex*)buf);
if(r) goto cufftError;
err = cudaMemcpyAsync(h_otf, buf, nFreq*sizeof(cuComplex), cudaMemcpyDeviceToHost, fftStream);
err = cudaStreamSynchronize(fftStream);
if(err) goto cudaErr;
printf("OTF generated.\n");
err = cudaMemcpyAsync(result, h_object, nSpatial*sizeof(float), cudaMemcpyHostToDevice, fftStream);
if(err) goto cudaErr;
for(unsigned int i=0; i < iter; i++) {
printf("Iteration %d\n", i);
err = cudaMemcpyAsync(buf, h_otf, nFreq*sizeof(cuComplex), cudaMemcpyHostToDevice, memStream);
if(err) goto cudaErr;
r = cufftExecR2C(planR2C, (float*)result, (cuComplex*)result);
if(r) goto cufftError;
cudaDeviceSynchronize();
ComplexMul<<<freqBlocks, freqThreadsPerBlock, 0, fftStream>>>((cuComplex*)result, (cuComplex*)buf, (cuComplex*)result);
cudaDeviceSynchronize();
err = cudaMemcpyAsync(buf, h_image, nSpatial*sizeof(float), cudaMemcpyHostToDevice, memStream);
if(err) goto cudaErr;
r = cufftExecC2R(planC2R, (cuComplex*)result, (float*)result);
if(r) goto cufftError;
cudaDeviceSynchronize();
FloatDiv<<<spatialBlocks, spatialThreadsPerBlock, 0, fftStream>>>((float*)buf, (float*)result, (float*)result);
cudaDeviceSynchronize();
err = cudaMemcpyAsync(buf, h_otf, nFreq*sizeof(cuComplex), cudaMemcpyHostToDevice, memStream);
if(err) goto cudaErr;
r = cufftExecR2C(planR2C, (float*)result, (cuComplex*)result);
if(r) goto cufftError;
cudaDeviceSynchronize();
ComplexMul<<<freqBlocks, freqThreadsPerBlock, 0, fftStream>>>((cuComplex*)result, (cuComplex*)buf, (cuComplex*)result);
cudaDeviceSynchronize();
err = cudaMemcpyAsync(buf, h_object, nSpatial*sizeof(float), cudaMemcpyHostToDevice, memStream);
if(err) goto cudaErr;
r = cufftExecC2R(planC2R, (cuComplex*)result, (float*)result);
if(r) goto cufftError;
cudaDeviceSynchronize();
FloatMul<<<spatialBlocks, spatialThreadsPerBlock, 0, fftStream>>>((float*)buf, (float*)result, (float*)result);
cudaDeviceSynchronize();
err = cudaMemcpyAsync(h_object, result, nSpatial*sizeof(float), cudaMemcpyDeviceToHost, fftStream);
if(err) goto cudaErr;
}
cudaDeviceSynchronize();
retval = 0;
goto cleanup;
cudaErr:
fprintf(stderr, "CUDA error: %d\n", err);
retval = err;
goto cleanup;
cufftError:
fprintf(stderr, "CuFFT error: %d\n", r);
retval = r;
goto cleanup;
cleanup:
if(fftStream) cudaStreamDestroy(fftStream);
if(memStream) cudaStreamDestroy(memStream);
if(result) cudaFree(result);
if(buf) cudaFree(buf);
if(workArea) cudaFree(workArea);
if(h_otf) {
cudaHostUnregister(h_otf);
free(h_otf);
}
cudaHostUnregister(h_image);
cudaHostUnregister(h_object);
cudaProfilerStop();
cudaDeviceReset();
return retval;
}
cufftResult createPlans(size_t N1, size_t N2, size_t N3, cufftHandle *planR2C, cufftHandle *planC2R, void **workArea, size_t *workSize) {
cufftResult r;
r = cufftCreate(planR2C);
if(r) return r;
// r = cufftSetCompatibilityMode(*planR2C, CUFFT_COMPATIBILITY_FFT_PADDING);
// if(r) return r;
r = cufftSetAutoAllocation(*planR2C, 0);
if(r) return r;
r = cufftCreate(planC2R);
if(r) return r;
// r = cufftSetCompatibilityMode(*planC2R, CUFFT_COMPATIBILITY_FFT_PADDING);
// if(r) return r;
r = cufftSetAutoAllocation(*planC2R, 0);
if(r) return r;
size_t tmp;
r = cufftGetSize3d(*planR2C, N1, N2, N3, CUFFT_R2C, workSize);
//r = cufftGetSize2d(*planR2C, N1, N2, CUFFT_R2C, workSize);
if(r) return r;
r = cufftGetSize3d(*planC2R, N1, N2, N3, CUFFT_R2C, &tmp);
//r = cufftGetSize2d(*planC2R, N1, N2, CUFFT_R2C, &tmp);
if(r) return r;
if(tmp > *workSize)
*workSize = tmp;
cudaError_t err = cudaMalloc(workArea, *workSize);
if(err) return CUFFT_ALLOC_FAILED;
r = cufftSetWorkArea(*planR2C, *workArea);
if(r) goto error;
r = cufftMakePlan3d(*planR2C, N1, N2, N3, CUFFT_R2C, &tmp);
//r = cufftMakePlan2d(*planR2C, N1, N2, CUFFT_R2C, &tmp);
if(r) goto error;
r = cufftSetWorkArea(*planC2R, *workArea);
if(r) goto error;
r = cufftMakePlan3d(*planC2R, N1, N2, N3, CUFFT_C2R, &tmp);
//r = cufftMakePlan2d(*planC2R, N1, N2, CUFFT_C2R, &tmp);
if(r) goto error;
return CUFFT_SUCCESS;
error:
cudaFree(*workArea);
return r;
}
static cudaError_t numBlocksThreads(unsigned int N, dim3 *numBlocks, dim3 *threadsPerBlock) {
unsigned int BLOCKSIZE = 128;
int Nx, Ny, Nz;
int device;
cudaError_t err;
if(N < BLOCKSIZE) {
numBlocks->x = 1;
numBlocks->y = 1;
numBlocks->z = 1;
threadsPerBlock->x = N;
threadsPerBlock->y = 1;
threadsPerBlock->z = 1;
return cudaSuccess;
}
threadsPerBlock->x = BLOCKSIZE;
threadsPerBlock->y = 1;
threadsPerBlock->z = 1;
err = cudaGetDevice(&device);
if(err) return err;
err = cudaDeviceGetAttribute(&Nx, cudaDevAttrMaxBlockDimX, device);
if(err) return err;
err = cudaDeviceGetAttribute(&Ny, cudaDevAttrMaxBlockDimY, device);
if(err) return err;
err = cudaDeviceGetAttribute(&Nz, cudaDevAttrMaxBlockDimZ, device);
if(err) return err;
printf("Nx: %d, Ny: %d, Nz: %d\n", Nx, Ny, Nz);
unsigned int n = (N-1) / BLOCKSIZE + 1;
unsigned int x = (n-1) / (Ny*Nz) + 1;
unsigned int y = (n-1) / (x*Nz) + 1;
unsigned int z = (n-1) / (x*y) + 1;
if(x > Nx || y > Ny || z > Nz) {
return cudaErrorInvalidConfiguration;
}
numBlocks->x = x;
numBlocks->y = y;
numBlocks->z = z;
return cudaSuccess;
}
int conv_device(size_t N1, size_t N2, size_t N3,
float *h_image, float *h_psf, float *h_out, unsigned int correlate) {
int retval = 0;
cufftResult r;
cudaError_t err;
cufftHandle planR2C, planC2R;
std::cout<<"Arrived in Cuda convolution\n";
printf("input size: %d %d %d, N1, N2, N3");
float *image = 0; // convolved image (constant)
float *psf=0;
float *out = 0; // estimated object
cuComplex *otf = 0; // Fourier transform of PSF (constant)
void *buf = 0; // intermediate results
void *workArea = 0; // cuFFT work area
size_t nSpatial = N1*N2*N3; // number of values in spatial domain
size_t nFreq = N1*N2*(N3/2+1); // number of values in frequency domain
//size_t nFreq = N1*(N2/2+1); // number of values in frequency domain
size_t mSpatial, mFreq;
dim3 freqThreadsPerBlock, spatialThreadsPerBlock, freqBlocks, spatialBlocks;
size_t workSize; // size of cuFFT work area in bytes
err = numBlocksThreads(nSpatial, &spatialBlocks, &spatialThreadsPerBlock);
if(err) goto cudaErr;
err = numBlocksThreads(nFreq, &freqBlocks, &freqThreadsPerBlock);
if(err) goto cudaErr;
mSpatial = spatialBlocks.x * spatialBlocks.y * spatialBlocks.z * spatialThreadsPerBlock.x * sizeof(float);
mFreq = freqBlocks.x * freqBlocks.y * freqBlocks.z * freqThreadsPerBlock.x * sizeof(cuComplex);
printf("N: %ld, M: %ld\n", nSpatial, mSpatial);
printf("Blocks: %d x %d x %d, Threads: %d x %d x %d\n", spatialBlocks.x, spatialBlocks.y, spatialBlocks.z, spatialThreadsPerBlock.x, spatialThreadsPerBlock.y, spatialThreadsPerBlock.z);
fflush(stdin);
std::cout<<"N: "<<nSpatial<<" M: "<<mSpatial<<"\n"<<std::flush;
std::cout<<"Blocks: "<<spatialBlocks.x<<" x "<<spatialBlocks.y<<" x "<<spatialBlocks.z<<", Threads: "<<spatialThreadsPerBlock.x<<" x "<<spatialThreadsPerBlock.y<<" x "<<spatialThreadsPerBlock.z<<"\n";
cudaDeviceReset();
cudaProfilerStart();
err = cudaMalloc(&image, mSpatial);
if(err) goto cudaErr;
err = cudaMalloc(&out, mSpatial);
if(err) goto cudaErr;
err = cudaMalloc(&psf, mSpatial);
if(err) goto cudaErr;
err = cudaMalloc(&buf, mFreq); // mFreq > mSpatial
if(err) goto cudaErr;
err = cudaMalloc(&otf, mFreq); // mFreq > mSpatial
if(err) goto cudaErr;
err = cudaMemset(image, 0, mSpatial);
if(err) goto cudaErr;
err = cudaMemset(out, 0, mSpatial);
if(err) goto cudaErr;
printf("Memory allocated.\n");
err = cudaMemcpy(image, h_image, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
printf("Image transferred.\n");
err = cudaMemcpy(out, h_out, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
printf("Object transferred.\n");
err = cudaMemcpy(psf, h_psf, nSpatial*sizeof(float), cudaMemcpyHostToDevice);
if(err) goto cudaErr;
printf("PSF transferred.\n");
// BN it looks like this function was originall written for the array organization used in matlab. I Changed the order of the dimensions
// to be compatible with imglib2 (java). TODO - add param for array organization
r = createPlans(N1, N2, N3, &planR2C, &planC2R, &workArea, &workSize);
if(r) goto cufftError;
printf("Plans created.\n");
r = cufftExecR2C(planR2C, psf, otf);
if(r) goto cufftError;
printf("Convolving!!\n");
// BN flush the buffer for debugging in Java.
fflush(stdout);
r = cufftExecR2C(planR2C, image, (cufftComplex*)buf);
if(r) goto cufftError;
if (correlate==1) {
ComplexConjugateMul<<<freqBlocks, freqThreadsPerBlock>>>((cuComplex*)buf, otf, (cuComplex*)buf);
}
else {
ComplexMul<<<freqBlocks, freqThreadsPerBlock>>>((cuComplex*)buf, otf, (cuComplex*)buf);
}
r = cufftExecC2R(planC2R, (cufftComplex*)buf, (float*)out);
if(r) goto cufftError;
FloatDivByConstant<<<spatialBlocks, spatialThreadsPerBlock>>>((float*)out,(float)nSpatial);
err = cudaMemcpy(h_out, out, nSpatial*sizeof(float), cudaMemcpyDeviceToHost);
retval = 0;
goto cleanup;
cudaErr:
fprintf(stderr, "CUDA error: %d\n", err);
retval = err;
goto cleanup;
cufftError:
fprintf(stderr, "CuFFT error: %d\n", r);
retval = r;
goto cleanup;
cleanup:
if(image) cudaFree(image);
if(out) cudaFree(out);
if(otf) cudaFree(otf);
if(buf) cudaFree(buf);
if(workArea) cudaFree(workArea);
cudaProfilerStop();
cudaDeviceReset();
return retval;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.