| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| #ifndef _CUDA_PIPELINE_H_
|
| # define _CUDA_PIPELINE_H_
|
|
|
| # include "cuda_pipeline_primitives.h"
|
|
|
| # if !defined(_CUDA_PIPELINE_CPLUSPLUS_11_OR_LATER)
|
| # error This file requires compiler support for the ISO C++ 2011 standard. This support must be enabled with the \
|
| -std=c++11 compiler option.
|
| # endif
|
|
|
| # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
|
| # include "cuda_awbarrier.h"
|
| # endif
|
|
|
|
|
|
|
| # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
|
| # if defined(_LIBCUDACXX_CUDA_ABI_VERSION)
|
| # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION _LIBCUDACXX_CUDA_ABI_VERSION
|
| # else
|
| # define _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION 4
|
| # endif
|
|
|
| # define _LIBCUDACXX_PIPELINE_CONCAT(X, Y) X ## Y
|
| # define _LIBCUDACXX_PIPELINE_CONCAT2(X, Y) _LIBCUDACXX_PIPELINE_CONCAT(X, Y)
|
| # define _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE _LIBCUDACXX_PIPELINE_CONCAT2(__, _LIBCUDACXX_PIPELINE_ASSUMED_ABI_VERSION)
|
|
|
| namespace cuda { inline namespace _LIBCUDACXX_PIPELINE_INLINE_NAMESPACE {
|
| struct __block_scope_barrier_base;
|
| }}
|
|
|
| # endif
|
|
|
| _CUDA_PIPELINE_BEGIN_NAMESPACE
|
|
|
| template<size_t N, typename T>
|
| _CUDA_PIPELINE_QUALIFIER
|
| auto segment(T* ptr) -> T(*)[N];
|
|
|
| class pipeline {
|
| public:
|
| pipeline(const pipeline&) = delete;
|
| pipeline(pipeline&&) = delete;
|
| pipeline& operator=(const pipeline&) = delete;
|
| pipeline& operator=(pipeline&&) = delete;
|
|
|
| _CUDA_PIPELINE_QUALIFIER pipeline();
|
| _CUDA_PIPELINE_QUALIFIER size_t commit();
|
| _CUDA_PIPELINE_QUALIFIER void commit_and_wait();
|
| _CUDA_PIPELINE_QUALIFIER void wait(size_t batch);
|
| template<unsigned N>
|
| _CUDA_PIPELINE_QUALIFIER void wait_prior();
|
|
|
| # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
|
| _CUDA_PIPELINE_QUALIFIER void arrive_on(awbarrier& barrier);
|
| _CUDA_PIPELINE_QUALIFIER void arrive_on(cuda::__block_scope_barrier_base& barrier);
|
| # endif
|
|
|
| private:
|
| size_t current_batch;
|
| };
|
|
|
| template<class T>
|
| _CUDA_PIPELINE_QUALIFIER
|
| void memcpy_async(T& dst, const T& src, pipeline& pipe);
|
|
|
| template<class T, size_t DstN, size_t SrcN>
|
| _CUDA_PIPELINE_QUALIFIER
|
| void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe);
|
|
|
| template<size_t N, typename T>
|
| _CUDA_PIPELINE_QUALIFIER
|
| auto segment(T* ptr) -> T(*)[N]
|
| {
|
| return (T(*)[N])ptr;
|
| }
|
|
|
| _CUDA_PIPELINE_QUALIFIER
|
| pipeline::pipeline()
|
| : current_batch(0)
|
| {
|
| }
|
|
|
| _CUDA_PIPELINE_QUALIFIER
|
| size_t pipeline::commit()
|
| {
|
| _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_commit();
|
| return this->current_batch++;
|
| }
|
|
|
| _CUDA_PIPELINE_QUALIFIER
|
| void pipeline::commit_and_wait()
|
| {
|
| (void)pipeline::commit();
|
| pipeline::wait_prior<0>();
|
| }
|
|
|
| _CUDA_PIPELINE_QUALIFIER
|
| void pipeline::wait(size_t batch)
|
| {
|
| const size_t prior = this->current_batch > batch ? this->current_batch - batch : 0;
|
|
|
| switch (prior) {
|
| case 0 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<0>(); break;
|
| case 1 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<1>(); break;
|
| case 2 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<2>(); break;
|
| case 3 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<3>(); break;
|
| case 4 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<4>(); break;
|
| case 5 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<5>(); break;
|
| case 6 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<6>(); break;
|
| case 7 : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<7>(); break;
|
| default : _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<8>(); break;
|
| }
|
| }
|
|
|
| template<unsigned N>
|
| _CUDA_PIPELINE_QUALIFIER
|
| void pipeline::wait_prior()
|
| {
|
| _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_wait_prior<N>();
|
| }
|
|
|
| # if defined(_CUDA_PIPELINE_ARCH_700_OR_LATER)
|
| _CUDA_PIPELINE_QUALIFIER
|
| void pipeline::arrive_on(awbarrier& barrier)
|
| {
|
| _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(&barrier.barrier);
|
| }
|
|
|
| _CUDA_PIPELINE_QUALIFIER
|
| void pipeline::arrive_on(cuda::__block_scope_barrier_base & barrier)
|
| {
|
| _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_arrive_on(reinterpret_cast<uint64_t *>(&barrier));
|
| }
|
| # endif
|
|
|
| template<class T>
|
| _CUDA_PIPELINE_QUALIFIER
|
| void memcpy_async(T& dst, const T& src, pipeline& pipe)
|
| {
|
| _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&src) & (alignof(T) - 1)));
|
| _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(&dst) & (alignof(T) - 1)));
|
|
|
| if (__is_trivially_copyable(T)) {
|
| _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_relaxed<sizeof(T), alignof(T)>(
|
| reinterpret_cast<void*>(&dst), reinterpret_cast<const void*>(&src));
|
| } else {
|
| dst = src;
|
| }
|
| }
|
|
|
| template<class T, size_t DstN, size_t SrcN>
|
| _CUDA_PIPELINE_QUALIFIER
|
| void memcpy_async(T(*dst)[DstN], const T(*src)[SrcN], pipeline& pipe)
|
| {
|
| constexpr size_t dst_size = sizeof(*dst);
|
| constexpr size_t src_size = sizeof(*src);
|
| static_assert(dst_size == 4 || dst_size == 8 || dst_size == 16, "Unsupported copy size.");
|
| static_assert(src_size <= dst_size, "Source size must be less than or equal to destination size.");
|
| _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(src) & (dst_size - 1)));
|
| _CUDA_PIPELINE_ASSERT(!(reinterpret_cast<uintptr_t>(dst) & (dst_size - 1)));
|
|
|
| if (__is_trivially_copyable(T)) {
|
| _CUDA_PIPELINE_INTERNAL_NAMESPACE::pipeline_copy_strict<sizeof(*dst), sizeof(*src)>(
|
| reinterpret_cast<void*>(*dst), reinterpret_cast<const void*>(*src));
|
| } else {
|
| for (size_t i = 0; i < DstN; ++i) {
|
| (*dst)[i] = (i < SrcN) ? (*src)[i] : T();
|
| }
|
| }
|
| }
|
|
|
| _CUDA_PIPELINE_END_NAMESPACE
|
|
|
| #endif
|
|
|