diff --git a/.gitattributes b/.gitattributes index 00c301bd890c67e869caaa4c00cb2237a1034765..8df93e4cca7d7f533fcd7581258311ca666b739d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -95,3 +95,4 @@ miniCUDA124/bin/nvprof.exe filter=lfs diff=lfs merge=lfs -text miniCUDA124/bin/npps64_12.dll filter=lfs diff=lfs merge=lfs -text miniCUDA124/bin/nvrtc64_120_0.dll filter=lfs diff=lfs merge=lfs -text miniCUDA124/bin/nvrtc-builtins64_124.dll filter=lfs diff=lfs merge=lfs -text +miniCUDA124/bin/nvjpeg64_12.dll filter=lfs diff=lfs merge=lfs -text diff --git a/miniCUDA124/bin/nvjpeg64_12.dll b/miniCUDA124/bin/nvjpeg64_12.dll new file mode 100644 index 0000000000000000000000000000000000000000..e226f84878a76876bff23ae0b2c9c67603410544 --- /dev/null +++ b/miniCUDA124/bin/nvjpeg64_12.dll @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae60b58ff90f87e2b5002c6ddc3b7eef0538e0537a9011042303e646175ce7af +size 4913152 diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/copy.h b/miniCUDA124/include/thrust/system/cuda/detail/async/copy.h new file mode 100644 index 0000000000000000000000000000000000000000..6c0abd12f0cffb00676a957d0e0b200550d2b395 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/copy.h @@ -0,0 +1,545 @@ +/****************************************************************************** + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +// TODO: Move into system::cuda + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace system { namespace cuda { namespace detail +{ + +// ContiguousIterator input and output iterators +// TriviallyCopyable elements +// Host to device, device to host, device to device +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename OutputIt, typename Size +> +auto async_copy_n( + FromPolicy& from_exec +, ToPolicy& to_exec +, ForwardIt first +, Size n +, OutputIt output +) -> + typename std::enable_if< + is_indirectly_trivially_relocatable_to::value + , unique_eager_event + >::type +{ + using T = typename iterator_traits::value_type; + + auto const device_alloc = get_async_device_allocator( + select_device_system(from_exec, to_exec) + ); + + using pointer + = typename thrust::detail::allocator_traits:: + template rebind_traits::pointer; + + unique_eager_event e; + + // Set up stream with dependencies. + + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream( + select_device_system(from_exec, to_exec) + ); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + unique_stream(nonowning, user_raw_stream) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(from_exec)) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(to_exec)) + ) + ) + ); + } + else + { + e = make_dependent_event( + std::tuple_cat( + extract_dependencies( + std::move(thrust::detail::derived_cast(from_exec)) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(to_exec)) + ) + ) + ); + } + + // Run copy. + + thrust::cuda_cub::throw_on_error( + cudaMemcpyAsync( + thrust::raw_pointer_cast(&*output) + , thrust::raw_pointer_cast(&*first) + , sizeof(T) * n + , direction_of_copy(from_exec, to_exec) + , e.stream().native_handle() + ) + , "after copy launch" + ); + + return e; +} + +// Non-ContiguousIterator input or output, or non-TriviallyRelocatable value type +// Device to device +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename OutputIt, typename Size +> +auto async_copy_n( + thrust::cuda::execution_policy& from_exec +, thrust::cuda::execution_policy& to_exec +, ForwardIt first +, Size n +, OutputIt output +) -> + typename std::enable_if< + conjunction< + negation< + is_indirectly_trivially_relocatable_to + > + , decltype(is_device_to_device_copy(from_exec, to_exec)) + >::value + , unique_eager_event + >::type +{ + using T = typename iterator_traits::value_type; + + return async_transform_n( + select_device_system(from_exec, to_exec) + , first, n, output, thrust::identity() + ); +} + +template +void async_copy_n_compile_failure_no_cuda_to_non_contiguous_output() +{ + THRUST_STATIC_ASSERT_MSG( + (negation>::value) + , "copying to non-ContiguousIterators in another system from the CUDA system " + "is not supported; use `THRUST_PROCLAIM_CONTIGUOUS_ITERATOR(Iterator)` to " + "indicate that an iterator points to elements that are contiguous in memory." + ); +} + +// Non-ContiguousIterator output iterator +// TriviallyRelocatable value type +// Device to host, host to device +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename OutputIt, typename Size +> +auto async_copy_n( + FromPolicy& from_exec +, ToPolicy& to_exec +, ForwardIt first +, Size n +, OutputIt output +) -> + typename std::enable_if< + conjunction< + negation> + , is_trivially_relocatable_to< + typename iterator_traits::value_type + , typename iterator_traits::value_type + > + , disjunction< + decltype(is_host_to_device_copy(from_exec, to_exec)) + , decltype(is_device_to_host_copy(from_exec, to_exec)) + > + >::value + , unique_eager_event + >::type +{ + async_copy_n_compile_failure_no_cuda_to_non_contiguous_output(); + + return {}; +} + +// Workaround for MSVC's lack of expression SFINAE and also for an NVCC bug. +// In NVCC, when two SFINAE-enabled overloads are only distinguishable by a +// part of a SFINAE condition that is in a `decltype`, NVCC thinks they are the +// same overload and emits an error. +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename OutputIt + // MSVC2015 WAR: doesn't like decltype(...)::value in superclass definition +, typename IsH2DCopy = decltype(is_host_to_device_copy( + std::declval() + , std::declval())) +> +struct is_buffered_trivially_relocatable_host_to_device_copy + : thrust::integral_constant< + bool + , !is_contiguous_iterator::value + && is_contiguous_iterator::value + && is_trivially_relocatable_to< + typename iterator_traits::value_type + , typename iterator_traits::value_type + >::value + && IsH2DCopy::value + > +{}; + +// Non-ContiguousIterator input iterator, ContiguousIterator output iterator +// TriviallyRelocatable value type +// Host to device +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename OutputIt, typename Size +> +auto async_copy_n( + FromPolicy& from_exec +, thrust::cuda::execution_policy& to_exec +, ForwardIt first +, Size n +, OutputIt output +) -> + typename std::enable_if< + is_buffered_trivially_relocatable_host_to_device_copy< + FromPolicy + , thrust::cuda::execution_policy + , ForwardIt, OutputIt + >::value + , unique_eager_event + >::type +{ + using T = typename iterator_traits::value_type; + + auto const host_alloc = get_async_host_allocator( + from_exec + ); + + // Create host-side buffer. + + auto buffer = uninitialized_allocate_unique_n(host_alloc, n); + + auto const buffer_ptr = buffer.get(); + + // Copy into host-side buffer. + + // TODO: Switch to an async call once we have async interfaces for host + // systems and support for cross system dependencies. + uninitialized_copy_n(from_exec, first, n, buffer_ptr); + + // Run device-side copy. + + auto new_to_exec = thrust::detail::derived_cast(to_exec).rebind_after( + std::tuple_cat( + std::make_tuple( + std::move(buffer) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(from_exec)) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(to_exec)) + ) + ) + ); + + THRUST_STATIC_ASSERT(( + std::tuple_size::value + 1 + <= + std::tuple_size::value + )); + + return async_copy_n( + from_exec + // TODO: We have to cast back to the right execution_policy class. Ideally, + // we should be moving here. + , new_to_exec + , buffer_ptr + , n + , output + ); +} + +// Workaround for MSVC's lack of expression SFINAE and also for an NVCC bug. +// In NVCC, when two SFINAE-enabled overloads are only distinguishable by a +// part of a SFINAE condition that is in a `decltype`, NVCC thinks they are the +// same overload and emits an error. +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename OutputIt + // MSVC2015 WAR: doesn't like decltype(...)::value in superclass definition +, typename IsD2HCopy = decltype(is_device_to_host_copy( + std::declval() + , std::declval())) +> +struct is_buffered_trivially_relocatable_device_to_host_copy + : thrust::integral_constant< + bool + , !is_contiguous_iterator::value + && is_contiguous_iterator::value + && is_trivially_relocatable_to< + typename iterator_traits::value_type + , typename iterator_traits::value_type + >::value + && IsD2HCopy::value + > +{}; + +// Non-ContiguousIterator input iterator, ContiguousIterator output iterator +// TriviallyRelocatable value type +// Device to host +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename OutputIt, typename Size +> +auto async_copy_n( + thrust::cuda::execution_policy& from_exec +, ToPolicy& to_exec +, ForwardIt first +, Size n +, OutputIt output +) -> + typename std::enable_if< + is_buffered_trivially_relocatable_device_to_host_copy< + thrust::cuda::execution_policy + , ToPolicy + , ForwardIt, OutputIt + >::value + , unique_eager_event + >::type +{ + using T = typename iterator_traits::value_type; + + auto const device_alloc = get_async_device_allocator( + from_exec + ); + + // Create device-side buffer. + + auto buffer = uninitialized_allocate_unique_n(device_alloc, n); + + auto const buffer_ptr = buffer.get(); + + // Run device-side copy. + + auto f0 = async_copy_n( + from_exec + , from_exec + , first + , n + , buffer_ptr + ); + + // Run copy back to host. + + auto new_from_exec = thrust::detail::derived_cast(from_exec).rebind_after( + std::move(buffer) + , std::move(f0) + ); + + THRUST_STATIC_ASSERT(( + std::tuple_size::value + 1 + <= + std::tuple_size::value + )); + + return async_copy_n( + new_from_exec + , to_exec + , buffer_ptr + , n + , output + ); +} + +template +void async_copy_n_compile_failure_non_trivially_relocatable_elements() +{ + THRUST_STATIC_ASSERT_MSG( + (is_trivially_relocatable_to::value) + , "only sequences of TriviallyRelocatable elements can be copied to and from " + "the CUDA system; use `THRUST_PROCLAIM_TRIVIALLY_RELOCATABLE(T)` to " + "indicate that a type can be copied by bitwise (e.g. by `memcpy`)" + ); +} + +// Non-TriviallyRelocatable value type +// Host to device, device to host +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename OutputIt, typename Size +> +auto async_copy_n( + FromPolicy& from_exec +, ToPolicy& to_exec +, ForwardIt first +, Size n +, OutputIt output +) -> + typename std::enable_if< + conjunction< + negation< + is_trivially_relocatable_to< + typename iterator_traits::value_type + , typename iterator_traits::value_type + > + > + , disjunction< + decltype(is_host_to_device_copy(from_exec, to_exec)) + , decltype(is_device_to_host_copy(from_exec, to_exec)) + > + >::value + , unique_eager_event + >::type +{ + // TODO: We could do more here with cudaHostRegister. + + async_copy_n_compile_failure_non_trivially_relocatable_elements< + typename thrust::iterator_traits::value_type + , typename std::add_lvalue_reference< + typename thrust::iterator_traits::value_type + >::type + >(); + + return {}; +} + +}}} // namespace system::cuda::detail + +namespace cuda_cub +{ + +// ADL entry point. +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename Sentinel, typename OutputIt +> +auto async_copy( + thrust::cuda::execution_policy& from_exec +, thrust::cpp::execution_policy& to_exec +, ForwardIt first +, Sentinel last +, OutputIt output +) +THRUST_RETURNS( + thrust::system::cuda::detail::async_copy_n( + from_exec, to_exec, first, distance(first, last), output + ) +) + +// ADL entry point. +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename Sentinel, typename OutputIt +> +auto async_copy( + thrust::cpp::execution_policy& from_exec +, thrust::cuda::execution_policy& to_exec +, ForwardIt first +, Sentinel last +, OutputIt output +) +THRUST_RETURNS( + thrust::system::cuda::detail::async_copy_n( + from_exec, to_exec, first, distance(first, last), output + ) +) + +// ADL entry point. +template < + typename FromPolicy, typename ToPolicy +, typename ForwardIt, typename Sentinel, typename OutputIt +> +auto async_copy( + thrust::cuda::execution_policy& from_exec +, thrust::cuda::execution_policy& to_exec +, ForwardIt first +, Sentinel last +, OutputIt output +) +THRUST_RETURNS( + thrust::system::cuda::detail::async_copy_n( + from_exec, to_exec, first, distance(first, last), output + ) +) + +} // cuda_cub + +THRUST_NAMESPACE_END + +#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#endif + diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/customization.h b/miniCUDA124/include/thrust/system/cuda/detail/async/customization.h new file mode 100644 index 0000000000000000000000000000000000000000..73ad89131a44bc86d38cb4e57bf04538ac629fc3 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/customization.h @@ -0,0 +1,135 @@ +/****************************************************************************** + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +// TODO: Move into system::cuda + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +THRUST_NAMESPACE_BEGIN + +namespace system { namespace cuda { namespace detail +{ + +using default_async_host_resource = + thrust::mr::synchronized_pool_resource< + thrust::host_memory_resource + >; + +template +auto get_async_host_allocator( + thrust::detail::execution_policy_base& +) +THRUST_RETURNS( + thrust::mr::stateless_resource_allocator< + thrust::detail::uint8_t, default_async_host_resource + >{} +) + +/////////////////////////////////////////////////////////////////////////////// + +using default_async_device_resource = + thrust::mr::disjoint_synchronized_pool_resource< + thrust::system::cuda::memory_resource + , thrust::mr::new_delete_resource + >; + +template +auto get_async_device_allocator( + thrust::detail::execution_policy_base& +) +THRUST_RETURNS( + thrust::per_device_allocator< + thrust::detail::uint8_t, default_async_device_resource, par_t + >{} +) + +template class BaseSystem> +auto get_async_device_allocator( + thrust::detail::execute_with_allocator& exec +) +THRUST_RETURNS(exec.get_allocator()) + +template class BaseSystem> +auto get_async_device_allocator( + thrust::detail::execute_with_allocator_and_dependencies< + Allocator, BaseSystem + >& exec +) +THRUST_RETURNS(exec.get_allocator()) + +/////////////////////////////////////////////////////////////////////////////// + +using default_async_universal_host_pinned_resource = + thrust::mr::synchronized_pool_resource< + thrust::system::cuda::universal_host_pinned_memory_resource + >; + +template +auto get_async_universal_host_pinned_allocator( + thrust::detail::execution_policy_base& +) +THRUST_RETURNS( + thrust::mr::stateless_resource_allocator< + thrust::detail::uint8_t, default_async_universal_host_pinned_resource + >{} +) + +}}} // namespace system::cuda::detail + +THRUST_NAMESPACE_END + +#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#endif + diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/exclusive_scan.h b/miniCUDA124/include/thrust/system/cuda/detail/async/exclusive_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..e127e1d6c16878bb8759130388696a49cf81767d --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/exclusive_scan.h @@ -0,0 +1,209 @@ +/****************************************************************************** + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#include + +#include +#include +#include +#include + +#include + +#include + +#include + +// TODO specialize for thrust::plus to use e.g. ExclusiveSum instead of ExcScan +// - Note that thrust::plus<> is transparent, cub::Sum is not. This should be +// fixed in CUB first). +// - Need to check if CUB actually optimizes for sums before putting in effort + +THRUST_NAMESPACE_BEGIN +namespace system +{ +namespace cuda +{ +namespace detail +{ + +template +unique_eager_event +async_exclusive_scan_n(execution_policy& policy, + ForwardIt first, + Size n, + OutputIt out, + InitialValueType init, + BinaryOp op) +{ + using InputValueT = cub::detail::InputValue; + using Dispatch32 = cub::DispatchScan; + using Dispatch64 = cub::DispatchScan; + + InputValueT init_value(init); + + auto const device_alloc = get_async_device_allocator(policy); + unique_eager_event ev; + + // Determine temporary device storage requirements. + cudaError_t status; + size_t tmp_size = 0; + { + THRUST_INDEX_TYPE_DISPATCH2(status, + Dispatch32::Dispatch, + Dispatch64::Dispatch, + n, + (nullptr, + tmp_size, + first, + out, + op, + init_value, + n_fixed, + nullptr)); + thrust::cuda_cub::throw_on_error(status, + "after determining tmp storage " + "requirements for exclusive_scan"); + } + + // Allocate temporary storage. + auto content = uninitialized_allocate_unique_n( + device_alloc, tmp_size + ); + void* const tmp_ptr = raw_pointer_cast(content.get()); + + // Set up stream with dependencies. + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + ev = make_dependent_event( + std::tuple_cat( + std::make_tuple( + std::move(content), + unique_stream(nonowning, user_raw_stream) + ), + extract_dependencies(std::move(thrust::detail::derived_cast(policy))))); + } + else + { + ev = make_dependent_event( + std::tuple_cat( + std::make_tuple(std::move(content)), + extract_dependencies(std::move(thrust::detail::derived_cast(policy))))); + } + + // Run scan. + { + THRUST_INDEX_TYPE_DISPATCH2(status, + Dispatch32::Dispatch, + Dispatch64::Dispatch, + n, + (tmp_ptr, + tmp_size, + first, + out, + op, + init_value, + n_fixed, + user_raw_stream)); + thrust::cuda_cub::throw_on_error(status, + "after dispatching exclusive_scan kernel"); + } + + return ev; +} + +}}} // namespace system::cuda::detail + +namespace cuda_cub +{ + +// ADL entry point. +template +auto async_exclusive_scan(execution_policy& policy, + ForwardIt first, + Sentinel&& last, + OutputIt&& out, + InitialValueType &&init, + BinaryOp&& op) +THRUST_RETURNS( + thrust::system::cuda::detail::async_exclusive_scan_n( + policy, + first, + distance(first, THRUST_FWD(last)), + THRUST_FWD(out), + THRUST_FWD(init), + THRUST_FWD(op) + ) +) + +} // namespace cuda_cub + +THRUST_NAMESPACE_END + +#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#endif // C++14 + diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/for_each.h b/miniCUDA124/include/thrust/system/cuda/detail/async/for_each.h new file mode 100644 index 0000000000000000000000000000000000000000..748604b7bd18796366feb2884c9cd65656db4be8 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/for_each.h @@ -0,0 +1,165 @@ + +/****************************************************************************** + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +// TODO: Move into system::cuda + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#include + +#include +#include +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace system { namespace cuda { namespace detail +{ + +template +struct async_for_each_fn +{ + ForwardIt first; + UnaryFunction f; + + __host__ __device__ + async_for_each_fn(ForwardIt&& first_, UnaryFunction&& f_) + : first(std::move(first_)), f(std::move(f_)) + {} + + template + __host__ __device__ + void operator()(Index idx) + { + f(thrust::raw_reference_cast(first[idx])); + } +}; + +template < + typename DerivedPolicy +, typename ForwardIt, typename Size, typename UnaryFunction +> +unique_eager_event async_for_each_n( + execution_policy& policy, + ForwardIt first, + Size n, + UnaryFunction func +) { + unique_eager_event e; + + // Set up stream with dependencies. + + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + unique_stream(nonowning, user_raw_stream) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + else + { + e = make_dependent_event( + extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ); + } + + // Run for_each. + + async_for_each_fn wrapped( + std::move(first), std::move(func) + ); + + thrust::cuda_cub::throw_on_error( + thrust::cuda_cub::__parallel_for::parallel_for( + n, std::move(wrapped), e.stream().native_handle() + ) + , "after for_each launch" + ); + + return e; +} + +}}} // namespace system::cuda::detail + +namespace cuda_cub +{ + +// ADL entry point. +template < + typename DerivedPolicy +, typename ForwardIt, typename Sentinel, typename UnaryFunction +> +auto async_for_each( + execution_policy& policy, + ForwardIt first, + Sentinel last, + UnaryFunction&& func +) +THRUST_RETURNS( + thrust::system::cuda::detail::async_for_each_n( + policy, first, distance(first, last), THRUST_FWD(func) + ) +); + +} // cuda_cub + +THRUST_NAMESPACE_END + +#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#endif + diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/inclusive_scan.h b/miniCUDA124/include/thrust/system/cuda/detail/async/inclusive_scan.h new file mode 100644 index 0000000000000000000000000000000000000000..995037988be03b43219a5f0fd2557231dbf620a9 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/inclusive_scan.h @@ -0,0 +1,202 @@ +/****************************************************************************** + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#include + +#include +#include +#include +#include + +#include + +#include + +#include + +// TODO specialize for thrust::plus to use e.g. InclusiveSum instead of IncScan +// - Note that thrust::plus<> is transparent, cub::Sum is not. This should be +// fixed in CUB first). +// - Need to check if CUB actually optimizes for sums before putting in effort + +THRUST_NAMESPACE_BEGIN +namespace system +{ +namespace cuda +{ +namespace detail +{ + +template +unique_eager_event +async_inclusive_scan_n(execution_policy& policy, + ForwardIt first, + Size n, + OutputIt out, + BinaryOp op) +{ + using AccumT = typename thrust::iterator_traits::value_type; + using Dispatch32 = cub::DispatchScan; + using Dispatch64 = cub::DispatchScan; + + auto const device_alloc = get_async_device_allocator(policy); + unique_eager_event ev; + + // Determine temporary device storage requirements. + cudaError_t status; + size_t tmp_size = 0; + { + THRUST_INDEX_TYPE_DISPATCH2(status, + Dispatch32::Dispatch, + Dispatch64::Dispatch, + n, + (nullptr, + tmp_size, + first, + out, + op, + cub::NullType{}, + n_fixed, + nullptr)); + thrust::cuda_cub::throw_on_error(status, + "after determining tmp storage " + "requirements for inclusive_scan"); + } + + // Allocate temporary storage. + auto content = uninitialized_allocate_unique_n( + device_alloc, tmp_size + ); + void* const tmp_ptr = raw_pointer_cast(content.get()); + + // Set up stream with dependencies. + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + ev = make_dependent_event( + std::tuple_cat( + std::make_tuple( + std::move(content), + unique_stream(nonowning, user_raw_stream) + ), + extract_dependencies(std::move(thrust::detail::derived_cast(policy))))); + } + else + { + ev = make_dependent_event( + std::tuple_cat( + std::make_tuple(std::move(content)), + extract_dependencies(std::move(thrust::detail::derived_cast(policy))))); + } + + // Run scan. + { + THRUST_INDEX_TYPE_DISPATCH2(status, + Dispatch32::Dispatch, + Dispatch64::Dispatch, + n, + (tmp_ptr, + tmp_size, + first, + out, + op, + cub::NullType{}, + n_fixed, + user_raw_stream)); + thrust::cuda_cub::throw_on_error(status, + "after dispatching inclusive_scan kernel"); + } + + return ev; +} + +}}} // namespace system::cuda::detail + +namespace cuda_cub +{ + +// ADL entry point. +template +auto async_inclusive_scan(execution_policy& policy, + ForwardIt first, + Sentinel&& last, + OutputIt&& out, + BinaryOp&& op) +THRUST_RETURNS( + thrust::system::cuda::detail::async_inclusive_scan_n( + policy, + first, + distance(first, THRUST_FWD(last)), + THRUST_FWD(out), + THRUST_FWD(op) + ) +) + +} // namespace cuda_cub + +THRUST_NAMESPACE_END + +#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#endif // C++14 + diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/reduce.h b/miniCUDA124/include/thrust/system/cuda/detail/async/reduce.h new file mode 100644 index 0000000000000000000000000000000000000000..2d83e7e5f91d0d28aafde0af7b69c8aea0377ec5 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/reduce.h @@ -0,0 +1,351 @@ +/****************************************************************************** + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +// TODO: Optimize for thrust::plus + +// TODO: Move into system::cuda + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#include + +#include +#include +#include +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace system { namespace cuda { namespace detail +{ + +template < + typename DerivedPolicy +, typename ForwardIt, typename Size, typename T, typename BinaryOp +> +unique_eager_future> async_reduce_n( + execution_policy& policy +, ForwardIt first +, Size n +, T init +, BinaryOp op +) { + using U = remove_cvref_t; + + auto const device_alloc = get_async_device_allocator(policy); + + using pointer + = typename thrust::detail::allocator_traits:: + template rebind_traits::pointer; + + unique_eager_future_promise_pair fp; + + // Determine temporary device storage requirements. + + size_t tmp_size = 0; + thrust::cuda_cub::throw_on_error( + cub::DeviceReduce::Reduce( + nullptr + , tmp_size + , first + , static_cast(nullptr) + , n + , op + , init + , nullptr // Null stream, just for sizing. + ) + , "after reduction sizing" + ); + + // Allocate temporary storage. + + auto content = uninitialized_allocate_unique_n( + device_alloc, sizeof(U) + tmp_size + ); + + // The array was dynamically allocated, so we assume that it's suitably + // aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator` + // make this guarantee. + auto const content_ptr = content.get(); + U* const ret_ptr = thrust::detail::aligned_reinterpret_cast( + raw_pointer_cast(content_ptr) + ); + void* const tmp_ptr = static_cast( + raw_pointer_cast(content_ptr + sizeof(U)) + ); + + // Set up stream with dependencies. + + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + fp = make_dependent_future( + [] (decltype(content) const& c) + { + return pointer( + thrust::detail::aligned_reinterpret_cast( + raw_pointer_cast(c.get()) + ) + ); + } + , std::tuple_cat( + std::make_tuple( + std::move(content) + , unique_stream(nonowning, user_raw_stream) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + else + { + fp = make_dependent_future( + [] (decltype(content) const& c) + { + return pointer( + thrust::detail::aligned_reinterpret_cast( + raw_pointer_cast(c.get()) + ) + ); + } + , std::tuple_cat( + std::make_tuple( + std::move(content) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + + // Run reduction. + + thrust::cuda_cub::throw_on_error( + cub::DeviceReduce::Reduce( + tmp_ptr + , tmp_size + , first + , ret_ptr + , n + , op + , init + , fp.future.stream().native_handle() + ) + , "after reduction launch" + ); + + return std::move(fp.future); +} + +}}} // namespace system::cuda::detail + +namespace cuda_cub +{ + +// ADL entry point. +template < + typename DerivedPolicy +, typename ForwardIt, typename Sentinel, typename T, typename BinaryOp +> +auto async_reduce( + execution_policy& policy +, ForwardIt first +, Sentinel last +, T init +, BinaryOp op +) +THRUST_RETURNS( + thrust::system::cuda::detail::async_reduce_n( + policy, first, distance(first, last), init, op + ) +) + +} // cuda_cub + +/////////////////////////////////////////////////////////////////////////////// + +namespace system { namespace cuda { namespace detail +{ + +template < + typename DerivedPolicy +, typename ForwardIt, typename Size, typename OutputIt +, typename T, typename BinaryOp +> +unique_eager_event async_reduce_into_n( + execution_policy& policy +, ForwardIt first +, Size n +, OutputIt output +, T init +, BinaryOp op +) { + using U = remove_cvref_t; + + auto const device_alloc = get_async_device_allocator(policy); + + unique_eager_event e; + + // Determine temporary device storage requirements. + + size_t tmp_size = 0; + thrust::cuda_cub::throw_on_error( + cub::DeviceReduce::Reduce( + nullptr + , tmp_size + , first + , static_cast(nullptr) + , n + , op + , init + , nullptr // Null stream, just for sizing. + ) + , "after reduction sizing" + ); + + // Allocate temporary storage. + + auto content = uninitialized_allocate_unique_n( + device_alloc, tmp_size + ); + + // The array was dynamically allocated, so we assume that it's suitably + // aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator` + // make this guarantee. + auto const content_ptr = content.get(); + + void* const tmp_ptr = static_cast( + raw_pointer_cast(content_ptr) + ); + + // Set up stream with dependencies. + + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + std::move(content) + , unique_stream(nonowning, user_raw_stream) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + else + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + std::move(content) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + + // Run reduction. + + thrust::cuda_cub::throw_on_error( + cub::DeviceReduce::Reduce( + tmp_ptr + , tmp_size + , first + , output + , n + , op + , init + , e.stream().native_handle() + ) + , "after reduction launch" + ); + + return e; +} + +}}} // namespace system::cuda::detail + +namespace cuda_cub +{ + +// ADL entry point. +template < + typename DerivedPolicy +, typename ForwardIt, typename Sentinel, typename OutputIt +, typename T, typename BinaryOp +> +auto async_reduce_into( + execution_policy& policy +, ForwardIt first +, Sentinel last +, OutputIt output +, T init +, BinaryOp op +) +THRUST_RETURNS( + thrust::system::cuda::detail::async_reduce_into_n( + policy, first, distance(first, last), output, init, op + ) +) + +} // cuda_cub + +THRUST_NAMESPACE_END + +#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#endif + diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/scan.h b/miniCUDA124/include/thrust/system/cuda/detail/async/scan.h new file mode 100644 index 0000000000000000000000000000000000000000..804ccd0231a4af8c261d47fb4c5cc1e0baae58c9 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/scan.h @@ -0,0 +1,43 @@ +/****************************************************************************** + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +#include +#include diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/sort.h b/miniCUDA124/include/thrust/system/cuda/detail/async/sort.h new file mode 100644 index 0000000000000000000000000000000000000000..2ff75212dcf00eeea3cc888f3b22fbd96be2c6ab --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/sort.h @@ -0,0 +1,525 @@ +/****************************************************************************** + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +// TODO: Move into system::cuda + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace system { namespace cuda { namespace detail +{ + +// Non-ContiguousIterator input and output iterators +template < + typename DerivedPolicy +, typename ForwardIt, typename Size, typename StrictWeakOrdering +> +auto async_stable_sort_n( + execution_policy& policy, + ForwardIt first, + Size n, + StrictWeakOrdering comp +) -> + typename std::enable_if< + negation>::value + , unique_eager_event + >::type +{ + using T = typename iterator_traits::value_type; + + auto const device_alloc = get_async_device_allocator(policy); + + // Create device-side buffer. + + // FIXME: Combine this temporary allocation with the main one for CUB. + auto device_buffer = uninitialized_allocate_unique_n(device_alloc, n); + + auto const device_buffer_ptr = device_buffer.get(); + + // Synthesize a suitable new execution policy, because we don't want to + // try and extract twice from the one we were passed. + typename remove_cvref_t::tag_type tag_policy{}; + + // Copy from the input into the buffer. + + auto new_policy0 = thrust::detail::derived_cast(policy).rebind_after( + std::move(device_buffer) + ); + + THRUST_STATIC_ASSERT(( + std::tuple_size::value + 1 + <= + std::tuple_size::value + )); + + auto f0 = async_copy_n( + new_policy0 + , tag_policy + , first + , n + , device_buffer_ptr + ); + + // Sort the buffer. + + auto new_policy1 = thrust::detail::derived_cast(policy).rebind_after( + std::move(f0) + ); + + THRUST_STATIC_ASSERT(( + std::tuple_size::value + 1 + <= + std::tuple_size::value + )); + + auto f1 = async_sort_n( + new_policy1 + , tag_policy + , device_buffer_ptr + , n + , comp + ); + + // Copy from the buffer into the input. + // FIXME: Combine this with the potential memcpy at the end of the main sort + // routine. + + auto new_policy2 = thrust::detail::derived_cast(policy).rebind_after( + std::move(f1) + ); + + THRUST_STATIC_ASSERT(( + std::tuple_size::value + 1 + <= + std::tuple_size::value + )); + + return async_copy_n( + new_policy2 + , tag_policy + , device_buffer_ptr + , n + , first + ); +} + +// ContiguousIterator iterators +// Non-Scalar value type or user-defined StrictWeakOrdering +template < + typename DerivedPolicy +, typename ForwardIt, typename Size, typename StrictWeakOrdering +> +auto async_stable_sort_n( + execution_policy& policy, + ForwardIt first, + Size n, + StrictWeakOrdering comp +) -> + typename std::enable_if< + conjunction< + is_contiguous_iterator + , disjunction< + negation< + std::is_scalar< + typename iterator_traits::value_type + > + > + , negation< + is_operator_less_or_greater_function_object + > + > + >::value + , unique_eager_event + >::type +{ + auto const device_alloc = get_async_device_allocator(policy); + + unique_eager_event e; + + // Determine temporary device storage requirements. + + size_t tmp_size = 0; + thrust::cuda_cub::throw_on_error( + thrust::cuda_cub::__merge_sort::doit_step< + /* Sort items? */ std::false_type, /* Stable? */ std::true_type + >( + nullptr + , tmp_size + , first + , static_cast(nullptr) // Items. + , n + , comp + , nullptr // Null stream, just for sizing. + ) + , "after merge sort sizing" + ); + + // Allocate temporary storage. + + auto content = uninitialized_allocate_unique_n( + device_alloc, tmp_size + ); + + // The array was dynamically allocated, so we assume that it's suitably + // aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator` + // make this guarantee. + auto const content_ptr = content.get(); + + void* const tmp_ptr = static_cast( + raw_pointer_cast(content_ptr) + ); + + // Set up stream with dependencies. + + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + std::move(content) + , unique_stream(nonowning, user_raw_stream) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + else + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + std::move(content) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + + // Run merge sort. + + thrust::cuda_cub::throw_on_error( + thrust::cuda_cub::__merge_sort::doit_step< + /* Sort items? */ std::false_type, /* Stable? */ std::true_type + >( + tmp_ptr + , tmp_size + , first + , static_cast(nullptr) // Items. + , n + , comp + , e.stream().native_handle() + ) + , "after merge sort sizing" + ); + + return e; +} + +template +typename std::enable_if< + is_operator_less_function_object::value +, cudaError_t +>::type +invoke_radix_sort( + cudaStream_t stream +, void* tmp_ptr +, std::size_t& tmp_size +, cub::DoubleBuffer& keys +, Size& n +, StrictWeakOrdering +) +{ + return cub::DeviceRadixSort::SortKeys( + tmp_ptr + , tmp_size + , keys + , n + , 0 + , sizeof(T) * 8 + , stream + ); +} + +template +typename std::enable_if< + is_operator_greater_function_object::value +, cudaError_t +>::type +invoke_radix_sort( + cudaStream_t stream +, void* tmp_ptr +, std::size_t& tmp_size +, cub::DoubleBuffer& keys +, Size& n +, StrictWeakOrdering +) +{ + return cub::DeviceRadixSort::SortKeysDescending( + tmp_ptr + , tmp_size + , keys + , n + , 0 + , sizeof(T) * 8 + , stream + ); +} + +// ContiguousIterator iterators +// Scalar value type +// operator< or operator> +template < + typename DerivedPolicy +, typename ForwardIt, typename Size, typename StrictWeakOrdering +> +auto async_stable_sort_n( + execution_policy& policy +, ForwardIt first +, Size n +, StrictWeakOrdering comp +) -> + typename std::enable_if< + conjunction< + is_contiguous_iterator + , std::is_scalar< + typename iterator_traits::value_type + > + , is_operator_less_or_greater_function_object + >::value + , unique_eager_event + >::type +{ + using T = typename iterator_traits::value_type; + + auto const device_alloc = get_async_device_allocator(policy); + + unique_eager_event e; + + cub::DoubleBuffer keys( + raw_pointer_cast(&*first), nullptr + ); + + // Determine temporary device storage requirements. + + size_t tmp_size = 0; + thrust::cuda_cub::throw_on_error( + invoke_radix_sort( + nullptr // Null stream, just for sizing. + , nullptr + , tmp_size + , keys + , n + , comp + ) + , "after radix sort sizing" + ); + + // Allocate temporary storage. + + size_t keys_temp_storage = thrust::detail::aligned_storage_size( + sizeof(T) * n, 128 + ); + + auto content = uninitialized_allocate_unique_n( + device_alloc, keys_temp_storage + tmp_size + ); + + // The array was dynamically allocated, so we assume that it's suitably + // aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator` + // make this guarantee. + auto const content_ptr = content.get(); + + keys.d_buffers[1] = thrust::detail::aligned_reinterpret_cast( + raw_pointer_cast(content_ptr) + ); + + void* const tmp_ptr = static_cast( + raw_pointer_cast(content_ptr + keys_temp_storage) + ); + + // Set up stream with dependencies. + + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + std::move(content) + , unique_stream(nonowning, user_raw_stream) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + else + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + std::move(content) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + + // Run radix sort. + + thrust::cuda_cub::throw_on_error( + invoke_radix_sort( + e.stream().native_handle() + , tmp_ptr + , tmp_size + , keys + , n + , comp + ) + , "after radix sort launch" + ); + + if (0 != keys.selector) + { + auto new_policy0 = thrust::detail::derived_cast(policy).rebind_after( + std::move(e) + ); + + THRUST_STATIC_ASSERT(( + std::tuple_size::value + 1 + <= + std::tuple_size::value + )); + + // Synthesize a suitable new execution policy, because we don't want to + // try and extract twice from the one we were passed. + typename remove_cvref_t::tag_type tag_policy{}; + + using return_future = decltype(e); + return return_future(async_copy_n( + new_policy0 + , tag_policy + , keys.d_buffers[1] + , n + , keys.d_buffers[0] + )); + } + else + return e; +} + +}}} // namespace system::cuda::detail + +namespace cuda_cub +{ + +// ADL entry point. +template < + typename DerivedPolicy +, typename ForwardIt, typename Sentinel, typename StrictWeakOrdering +> +auto async_stable_sort( + execution_policy& policy, + ForwardIt first, + Sentinel last, + StrictWeakOrdering comp +) +// A GCC 5 bug requires an explicit trailing return type here, so stick with +// THRUST_DECLTYPE_RETURNS for now. +THRUST_DECLTYPE_RETURNS( + thrust::system::cuda::detail::async_stable_sort_n( + policy, first, distance(first, last), comp + ) +) + +} // cuda_cub + +THRUST_NAMESPACE_END + +#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#endif + diff --git a/miniCUDA124/include/thrust/system/cuda/detail/async/transform.h b/miniCUDA124/include/thrust/system/cuda/detail/async/transform.h new file mode 100644 index 0000000000000000000000000000000000000000..a969531f9294c1e657f13db0d1b998478e1b0c16 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/async/transform.h @@ -0,0 +1,169 @@ +/****************************************************************************** + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +// TODO: Move into system::cuda + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include + +#if THRUST_CPP_DIALECT >= 2014 + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#include + +#include +#include +#include +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace system { namespace cuda { namespace detail +{ + +template +struct async_transform_fn +{ + ForwardIt first_; + OutputIt output_; + UnaryOperation op_; + + __host__ __device__ + async_transform_fn(ForwardIt&& first, OutputIt&& output, UnaryOperation&& op) + : first_(std::move(first)), output_(std::move(output)), op_(std::move(op)) + {} + + template + __host__ __device__ + void operator()(Index idx) + { + output_[idx] = op_(thrust::raw_reference_cast(first_[idx])); + } +}; + +template < + typename DerivedPolicy +, typename ForwardIt, typename Size, typename OutputIt, typename UnaryOperation +> +unique_eager_event async_transform_n( + execution_policy& policy, + ForwardIt first, + Size n, + OutputIt output, + UnaryOperation op +) { + unique_eager_event e; + + // Set up stream with dependencies. + + cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy); + + if (thrust::cuda_cub::default_stream() != user_raw_stream) + { + e = make_dependent_event( + std::tuple_cat( + std::make_tuple( + unique_stream(nonowning, user_raw_stream) + ) + , extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ) + ); + } + else + { + e = make_dependent_event( + extract_dependencies( + std::move(thrust::detail::derived_cast(policy)) + ) + ); + } + + // Run transform. + + async_transform_fn wrapped( + std::move(first), std::move(output), std::move(op) + ); + + thrust::cuda_cub::throw_on_error( + thrust::cuda_cub::__parallel_for::parallel_for( + n, std::move(wrapped), e.stream().native_handle() + ) + , "after transform launch" + ); + + return e; +} + +}}} // namespace system::cuda::detail + +namespace cuda_cub +{ + +// ADL entry point. +template < + typename DerivedPolicy +, typename ForwardIt, typename Sentinel, typename OutputIt +, typename UnaryOperation +> +auto async_transform( + execution_policy& policy, + ForwardIt first, + Sentinel last, + OutputIt output, + UnaryOperation&& op +) +THRUST_RETURNS( + thrust::system::cuda::detail::async_transform_n( + policy, first, distance(first, last), output, THRUST_FWD(op) + ) +); + +} // cuda_cub + +THRUST_NAMESPACE_END + +#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +#endif + diff --git a/miniCUDA124/include/thrust/system/cuda/detail/core/agent_launcher.h b/miniCUDA124/include/thrust/system/cuda/detail/core/agent_launcher.h new file mode 100644 index 0000000000000000000000000000000000000000..5e43782ba6f42ec62583dabb50b5df79dbb512e8 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/core/agent_launcher.h @@ -0,0 +1,1172 @@ +/****************************************************************************** + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC +#include +#include +#include + +#include + +#include + +/** + * @def THRUST_DISABLE_KERNEL_VISIBILITY_WARNING_SUPPRESSION + * If defined, the default suppression of kernel visibility attribute warning is disabled. + */ +#if !defined(THRUST_DISABLE_KERNEL_VISIBILITY_WARNING_SUPPRESSION) +_CCCL_DIAG_SUPPRESS_GCC("-Wattributes") +_CCCL_DIAG_SUPPRESS_CLANG("-Wattributes") +#if !defined(_CCCL_CUDA_COMPILER_NVHPC) +_CCCL_DIAG_SUPPRESS_NVHPC(attribute_requires_external_linkage) +#endif // !_LIBCUDACXX_COMPILER_NVHPC_CUDA +#endif // !THRUST_DISABLE_KERNEL_VISIBILITY_WARNING_SUPPRESSION + +THRUST_NAMESPACE_BEGIN + +namespace cuda_cub { +namespace core { + +#ifndef THRUST_DETAIL_KERNEL_ATTRIBUTES +#define THRUST_DETAIL_KERNEL_ATTRIBUTES CCCL_DETAIL_KERNEL_ATTRIBUTES +#endif + +#if defined(__CUDA_ARCH__) || defined(_NVHPC_CUDA) +#if 0 + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void + __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(Args... args) + { + extern __shared__ char shmem[]; + Agent::entry(args..., shmem); + } +#else + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0) + { + extern __shared__ char shmem[]; + Agent::entry(x0, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, shmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) + { + extern __shared__ char shmem[]; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, shmem); + } +#endif + + //////////////////////////////////////////////////////////// + + +#if 0 + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void + __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, Args... args) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(args..., vshmem); + } +#else + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, vshmem); + } + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void __launch_bounds__(Agent::ptx_plan::BLOCK_THREADS) + _kernel_agent_vshmem(char* vshmem, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) + { + extern __shared__ char shmem[]; + vshmem = vshmem == NULL ? shmem : vshmem + blockIdx.x * temp_storage_size::value; + Agent::entry(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE, vshmem); + } +#endif +#else +#if 0 + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(Args... args) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*, Args... args) {} +#else + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4, _5) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4, _5, _6) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4, _5, _6, _7) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0,_1,_2,_3, _4, _5, _6, _7, _8) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC, _xD) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB,_xC, _xD, _xE) {} + //////////////////////////////////////////////////////////// + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6, _7) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0,_1,_2,_3, _4, _5, _6, _7, _8) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD) {} + template + THRUST_DETAIL_KERNEL_ATTRIBUTES void _kernel_agent_vshmem(char*,_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD, _xE) {} +#endif +#endif + + + template + struct AgentLauncher : Agent + { + core::AgentPlan plan; + size_t count; + cudaStream_t stream; + char const* name; + unsigned int grid; + char* vshmem; + bool has_shmem; + size_t shmem_size; + + enum + { + MAX_SHMEM_PER_BLOCK = 48 * 1024, + }; + typedef + typename has_enough_shmem::type has_enough_shmem_t; + typedef + has_enough_shmem shm1; + + template + THRUST_RUNTIME_FUNCTION + AgentLauncher(AgentPlan plan_, + Size count_, + cudaStream_t stream_, + char const* name_) + : plan(plan_), + count((size_t)count_), + stream(stream_), + name(name_), + grid(static_cast((count + plan.items_per_tile - 1) / plan.items_per_tile)), + vshmem(NULL), + has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size), + shmem_size(has_shmem ? plan.shared_memory_size : 0) + { + assert(count > 0); + } + + template + THRUST_RUNTIME_FUNCTION + AgentLauncher(AgentPlan plan_, + Size count_, + cudaStream_t stream_, + char* vshmem, + char const* name_) + : plan(plan_), + count((size_t)count_), + stream(stream_), + name(name_), + grid(static_cast((count + plan.items_per_tile - 1) / plan.items_per_tile)), + vshmem(vshmem), + has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size), + shmem_size(has_shmem ? plan.shared_memory_size : 0) + { + assert(count > 0); + } + + THRUST_RUNTIME_FUNCTION + AgentLauncher(AgentPlan plan_, + cudaStream_t stream_, + char const* name_) + : plan(plan_), + count(0), + stream(stream_), + name(name_), + grid(plan.grid_size), + vshmem(NULL), + has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size), + shmem_size(has_shmem ? plan.shared_memory_size : 0) + { + assert(plan.grid_size > 0); + } + + THRUST_RUNTIME_FUNCTION + AgentLauncher(AgentPlan plan_, + cudaStream_t stream_, + char* vshmem, + char const* name_) + : plan(plan_), + count(0), + stream(stream_), + name(name_), + grid(plan.grid_size), + vshmem(vshmem), + has_shmem((size_t)core::get_max_shared_memory_per_block() >= (size_t)plan.shared_memory_size), + shmem_size(has_shmem ? plan.shared_memory_size : 0) + { + assert(plan.grid_size > 0); + } + +#if 0 + THRUST_RUNTIME_FUNCTION + AgentPlan static get_plan(cudaStream_t s, void* d_ptr = 0) + { + // in separable compilation mode, we have no choice + // but to call kernel to get agent_plan + // otherwise the risk is something may fail + // if user mix & match ptx versions in a separably compiled function + // http://nvbugs/1772071 + // XXX may be it is too string of a requirements, consider relaxing it in + // the future +#ifdef __CUDACC_RDC__ + return core::get_agent_plan(s, d_ptr); +#else + return get_agent_plan(core::get_ptx_version()); +#endif + } + THRUST_RUNTIME_FUNCTION + AgentPlan static get_plan_default() + { + return get_agent_plan(sm_arch<0>::type::ver); + } +#endif + + THRUST_RUNTIME_FUNCTION + typename core::get_plan::type static get_plan(cudaStream_t , void* d_ptr = 0) + { + THRUST_UNUSED_VAR(d_ptr); + return get_agent_plan(core::get_ptx_version()); + } + + THRUST_RUNTIME_FUNCTION + typename core::get_plan::type static get_plan() + { + return get_agent_plan(lowest_supported_sm_arch::ver); + } + + THRUST_RUNTIME_FUNCTION void sync() const + { + CubDebug(cub::detail::DebugSyncStream(stream)); + } + + template + static cuda_optional THRUST_RUNTIME_FUNCTION + max_blocks_per_sm_impl(K k, int block_threads) + { + int occ; + cudaError_t status = cub::MaxSmOccupancy(occ, k, block_threads); + return cuda_optional(status == cudaSuccess ? occ : -1, status); + } + + template + cuda_optional THRUST_RUNTIME_FUNCTION + max_sm_occupancy(K k) const + { + return max_blocks_per_sm_impl(k, plan.block_threads); + } + + template + THRUST_RUNTIME_FUNCTION + void print_info(K k) const + { + #if THRUST_DEBUG_SYNC_FLAG + cuda_optional occ = max_sm_occupancy(k); + const int ptx_version = core::get_ptx_version(); + if (count > 0) + { + _CubLog("Invoking %s<<<%u, %d, %d, %lld>>>(), %llu items total, %d items per thread, %d SM occupancy, %d vshmem size, %d ptx_version \n", + name, + grid, + plan.block_threads, + (has_shmem ? (int)plan.shared_memory_size : 0), + (long long)stream, + (long long)count, + plan.items_per_thread, + (int)occ, + (!has_shmem ? (int)plan.shared_memory_size : 0), + (int)ptx_version); + } + else + { + _CubLog("Invoking %s<<<%u, %d, %d, %lld>>>(), %d items per thread, %d SM occupancy, %d vshmem size, %d ptx_version\n", + name, + grid, + plan.block_threads, + (has_shmem ? (int)plan.shared_memory_size : 0), + (long long)stream, + plan.items_per_thread, + (int)occ, + (!has_shmem ? (int)plan.shared_memory_size : 0), + (int)ptx_version); + } + #else + (void)k; + #endif + } + + //////////////////// + // Variadic code + //////////////////// + +#if 0 + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + return max_blocks_per_sm_impl(_kernel_agent, plan.block_threads); + } +#else + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0, _1) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } + template + static cuda_optional THRUST_RUNTIME_FUNCTION + get_max_blocks_per_sm(AgentPlan plan) + { + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD,_xE) = _kernel_agent; + return max_blocks_per_sm_impl(ptr, plan.block_threads); + } +#endif + + + +#if 0 + + // If we are guaranteed to have enough shared memory + // don't compile other kernel which accepts pointer + // and save on compilations + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, Args... args) const + { + assert(has_shmem && vshmem == NULL); + print_info(_kernel_agent); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(_kernel_agent, args...); + } + + // If there is a risk of not having enough shared memory + // we compile generic kernel instead. + // This kernel is likely to be somewhat slower, but it can accomodate + // both shared and virtualized shared memories. + // Alternative option is to compile two kernels, one using shared and one + // using virtualized shared memory. While this can be slightly faster if we + // do actually have enough shared memory, the compilation time will double. + // + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, Args... args) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + print_info(_kernel_agent_vshmem); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(_kernel_agent_vshmem, vshmem, args...); + } + + template + void THRUST_RUNTIME_FUNCTION + launch(Args... args) const + { + launch_impl(has_enough_shmem_t(),args...); + sync(); + } +#else + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8) = _kernel_agent_vshmem; + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC,_xD xD) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::false_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9,_xA xA,_xB xB,_xC xC,_xD xD,_xE xE) const + { + assert((has_shmem && vshmem == NULL) || (!has_shmem && vshmem != NULL && shmem_size == 0)); + void (*ptr)(char*, _0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _xA, _xB, _xC, _xD, _xE) = _kernel_agent_vshmem; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, shmem_size, stream) + .doit(ptr, vshmem, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE); + } + + //////////////////////////////////////////////////////// + //////////////////////////////////////////////////////// + //////////////////////////////////////////////////////// + + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0, _1) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5, x6); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD); + } + template + void THRUST_RUNTIME_FUNCTION + launch_impl(thrust::detail::true_type, _0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) const + { + assert(has_shmem && vshmem == NULL); + void (*ptr)(_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_xA,_xB,_xC,_xD,_xE) = _kernel_agent; + print_info(ptr); + launcher::triple_chevron(grid, plan.block_threads, plan.shared_memory_size, stream) + .doit(ptr,x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE); + } + + //////////////////////////////////////////////////////// + //////////////////////////////////////////////////////// + //////////////////////////////////////////////////////// + + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0) const + { + launch_impl(has_enough_shmem_t(), x0); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1) const + { + launch_impl(has_enough_shmem_t(), x0, x1); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD); + sync(); + } + template + void THRUST_RUNTIME_FUNCTION + launch(_0 x0, _1 x1, _2 x2, _3 x3, _4 x4, _5 x5, _6 x6, _7 x7, _8 x8, _9 x9, _xA xA, _xB xB, _xC xC, _xD xD, _xE xE) const + { + launch_impl(has_enough_shmem_t(), x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, xA, xB, xC, xD, xE); + sync(); + } +#endif + + + }; + +} // namespace core +} // namespace cuda_cub + +THRUST_NAMESPACE_END + +#endif diff --git a/miniCUDA124/include/thrust/system/cuda/detail/core/alignment.h b/miniCUDA124/include/thrust/system/cuda/detail/core/alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..20d297099f4a75fc771382e9f1a22eb2e68ebfba --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/core/alignment.h @@ -0,0 +1,258 @@ +/* + * Copyright 2008-2013 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// TODO: This can probably be removed. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +THRUST_NAMESPACE_BEGIN +namespace cuda_cub { +namespace alignment_of_detail { + + + template + class alignment_of_impl; + + template + struct helper + { + static const std::size_t value = size_diff; + }; + + template + class helper + { + public: + static const std::size_t value = alignment_of_impl::value; + }; + + template + class alignment_of_impl + { + private: + struct big + { + T x; + char c; + }; + + public: + static const std::size_t value = helper::value; + }; + + +} // end alignment_of_detail + + +template +struct alignment_of + : alignment_of_detail::alignment_of_impl +{ +}; + + +template +struct aligned_type; + +// __align__ is CUDA-specific, so guard it +#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC + +// implementing aligned_type portably is tricky: + +#if THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC +// implement aligned_type with specialization because MSVC +// requires literals as arguments to declspec(align(n)) +template <> +struct aligned_type<1> +{ + struct __align__(1) type{}; +}; + +template <> +struct aligned_type<2> +{ + struct __align__(2) type{}; +}; + +template <> +struct aligned_type<4> +{ + struct __align__(4) type{}; +}; + +template <> +struct aligned_type<8> +{ + struct __align__(8) type{}; +}; + +template <> +struct aligned_type<16> +{ + struct __align__(16) type{}; +}; + +template <> +struct aligned_type<32> +{ + struct __align__(32) type{}; +}; + +template <> +struct aligned_type<64> +{ + struct __align__(64) type{}; +}; + +template <> +struct aligned_type<128> +{ + struct __align__(128) type{}; +}; + +template <> +struct aligned_type<256> +{ + struct __align__(256) type{}; +}; + +template <> +struct aligned_type<512> +{ + struct __align__(512) type{}; +}; + +template <> +struct aligned_type<1024> +{ + struct __align__(1024) type{}; +}; + +template <> +struct aligned_type<2048> +{ + struct __align__(2048) type{}; +}; + +template <> +struct aligned_type<4096> +{ + struct __align__(4096) type{}; +}; + +template <> +struct aligned_type<8192> +{ + struct __align__(8192) type{}; +}; +#elif (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) && (THRUST_GCC_VERSION < 40300) +// implement aligned_type with specialization because gcc 4.2 +// requires literals as arguments to __attribute__(aligned(n)) +template <> +struct aligned_type<1> +{ + struct __align__(1) type{}; +}; + +template <> +struct aligned_type<2> +{ + struct __align__(2) type{}; +}; + +template <> +struct aligned_type<4> +{ + struct __align__(4) type{}; +}; + +template <> +struct aligned_type<8> +{ + struct __align__(8) type{}; +}; + +template <> +struct aligned_type<16> +{ + struct __align__(16) type{}; +}; + +template <> +struct aligned_type<32> +{ + struct __align__(32) type{}; +}; + +template <> +struct aligned_type<64> +{ + struct __align__(64) type{}; +}; + +template <> +struct aligned_type<128> +{ + struct __align__(128) type{}; +}; + +#else +// assume the compiler allows template parameters as +// arguments to __align__ +template +struct aligned_type +{ + struct __align__(Align) type{}; +}; +#endif // THRUST_HOST_COMPILER +#else +template +struct aligned_type +{ + struct type + { + }; +}; +#endif // THRUST_DEVICE_COMPILER + + +template +struct aligned_storage +{ + union type + { + unsigned char data[Len]; + + typename aligned_type::type align; + }; +}; + + +} // end cuda_ + +THRUST_NAMESPACE_END diff --git a/miniCUDA124/include/thrust/system/cuda/detail/core/triple_chevron_launch.h b/miniCUDA124/include/thrust/system/cuda/detail/core/triple_chevron_launch.h new file mode 100644 index 0000000000000000000000000000000000000000..97eeabab09fbb9b281336988923ddb3341e24499 --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/core/triple_chevron_launch.h @@ -0,0 +1,160 @@ +/****************************************************************************** + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace cuda_cub { +namespace launcher { + + struct _CCCL_ATTRIBUTE_HIDDEN triple_chevron + { + typedef size_t Size; + dim3 const grid; + dim3 const block; + Size const shared_mem; + cudaStream_t const stream; + + THRUST_RUNTIME_FUNCTION + triple_chevron(dim3 grid_, + dim3 block_, + Size shared_mem_ = 0, + cudaStream_t stream_ = 0) + : grid(grid_), + block(block_), + shared_mem(shared_mem_), + stream(stream_) {} + + template + cudaError_t __host__ + doit_host(K k, Args const&... args) const + { + k<<>>(args...); + return cudaPeekAtLastError(); + } + + template + size_t __device__ + align_up(size_t offset) const + { + size_t alignment = alignment_of::value; + return alignment * ((offset + (alignment - 1))/ alignment); + } + + size_t __device__ argument_pack_size(size_t size) const { return size; } + template + size_t __device__ + argument_pack_size(size_t size, Arg const& arg, Args const&... args) const + { + size = align_up(size); + return argument_pack_size(size + sizeof(Arg), args...); + } + + template + size_t __device__ copy_arg(char* buffer, size_t offset, Arg arg) const + { + offset = align_up(offset); + for (int i = 0; i != sizeof(Arg); ++i) + buffer[offset+i] = *((char*)&arg + i); + return offset + sizeof(Arg); + } + + __device__ + void fill_arguments(char*, size_t) const + {} + + template + __device__ + void fill_arguments(char* buffer, + size_t offset, + Arg const& arg, + Args const& ... args) const + { + fill_arguments(buffer, copy_arg(buffer, offset, arg), args...); + } + + #ifdef THRUST_RDC_ENABLED + template + cudaError_t __device__ + doit_device(K k, Args const&... args) const + { + const size_t size = argument_pack_size(0,args...); + void *param_buffer = cudaGetParameterBuffer(64,size); + fill_arguments((char*)param_buffer, 0, args...); + return launch_device(k, param_buffer); + } + + template + cudaError_t __device__ + launch_device(K k, void* buffer) const + { + return cudaLaunchDevice((void*)k, + buffer, + dim3(grid), + dim3(block), + shared_mem, + stream); + } + #else + template + cudaError_t __device__ + doit_device(K, Args const&... ) const + { + return cudaErrorNotSupported; + } + #endif + + __thrust_exec_check_disable__ + template + THRUST_FUNCTION + cudaError_t doit(K k, Args const&... args) const + { + NV_IF_TARGET(NV_IS_HOST, + (return doit_host(k, args...);), + (return doit_device(k, args...);)); + } + + }; // struct triple_chevron + +} // namespace launcher +} // namespace cuda_ + +THRUST_NAMESPACE_END diff --git a/miniCUDA124/include/thrust/system/cuda/detail/core/util.h b/miniCUDA124/include/thrust/system/cuda/detail/core/util.h new file mode 100644 index 0000000000000000000000000000000000000000..9857777f6c06c03c0c95802f1d48cff5e3b5521d --- /dev/null +++ b/miniCUDA124/include/thrust/system/cuda/detail/core/util.h @@ -0,0 +1,811 @@ +/****************************************************************************** + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +THRUST_NAMESPACE_BEGIN + +namespace cuda_cub { +namespace core { + +#ifdef _NVHPC_CUDA +# if (__NVCOMPILER_CUDA_ARCH__ >= 600) +# define THRUST_TUNING_ARCH sm60 +# elif (__NVCOMPILER_CUDA_ARCH__ >= 520) +# define THRUST_TUNING_ARCH sm52 +# elif (__NVCOMPILER_CUDA_ARCH__ >= 350) +# define THRUST_TUNING_ARCH sm35 +# else +# define THRUST_TUNING_ARCH sm30 +# endif +#else +# if (__CUDA_ARCH__ >= 600) +# define THRUST_TUNING_ARCH sm60 +# elif (__CUDA_ARCH__ >= 520) +# define THRUST_TUNING_ARCH sm52 +# elif (__CUDA_ARCH__ >= 350) +# define THRUST_TUNING_ARCH sm35 +# elif (__CUDA_ARCH__ >= 300) +# define THRUST_TUNING_ARCH sm30 +# elif !defined (__CUDA_ARCH__) +# define THRUST_TUNING_ARCH sm30 +# endif +#endif + + // Typelist - a container of types, supports up to 10 types + // -------------------------------------------------------------------------- + + class _; + template + struct typelist; + + // ------------------------------------- + + // supported SM arch + // --------------------- + struct sm30 { enum { ver = 300, warpSize = 32 }; }; + struct sm35 { enum { ver = 350, warpSize = 32 }; }; + struct sm52 { enum { ver = 520, warpSize = 32 }; }; + struct sm60 { enum { ver = 600, warpSize = 32 }; }; + + // list of sm, checked from left to right order + // the rightmost is the lowest sm arch supported + // -------------------------------------------- + typedef typelist sm_list; + + // lowest supported SM arch + // -------------------------------------------------------------------------- + + template + struct lowest_supported_sm_arch_impl; + + template + struct lowest_supported_sm_arch_impl > + : lowest_supported_sm_arch_impl<_0, typelist< _1, _2, _3, _4, _5, _6, _7, _8, _9> > {}; + template + struct lowest_supported_sm_arch_impl > + { + typedef SM type; + }; + + typedef typename lowest_supported_sm_arch_impl<_,sm_list>::type lowest_supported_sm_arch; + + // metafunction to match next viable PtxPlan specialization + // -------------------------------------------------------------------------- + + __THRUST_DEFINE_HAS_NESTED_TYPE(has_tuning_t, tuning) + __THRUST_DEFINE_HAS_NESTED_TYPE(has_type_t, type) + + template