diff --git a/.gitattributes b/.gitattributes index 5b6a09ce85e5abe3676af5d40de4bbe8cd975be4..ac838963a42e315415d8c1d506bbf06947ac1c17 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1708,3 +1708,12 @@ valley/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff vllm/lib/python3.10/site-packages/cupy/_util.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/cupy/cuda/common.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/cupy/cuda/pinned_memory.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/cuda/function.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/cuda/texture.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/cuda/device.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/fft/_cache.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/cuda/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/cuda/memory_hook.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/cuda/cufft.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/fft/_callback.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/cupy/cuda/graph.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h new file mode 100644 index 0000000000000000000000000000000000000000..ac718acd4fa3188c56c6896126b070ea9db7a174 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/data.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include +#include +#include + +// Some "exports". +namespace torch { +namespace data { +using datasets::BatchDataset; +using datasets::Dataset; +} // namespace data +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h new file mode 100644 index 0000000000000000000000000000000000000000..debfc6c785856059bf07eccec212ef97833a555f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/enum.h @@ -0,0 +1,212 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#define TORCH_ENUM_DECLARE(name) \ + namespace torch { \ + namespace enumtype { \ + /* \ + NOTE: We need to provide the default constructor for each struct, \ + otherwise Clang 3.8 would complain: \ + ``` \ + error: default initialization of an object of const type 'const \ + enumtype::Enum1' without a user-provided default constructor \ + ``` \ + */ \ + struct k##name { \ + k##name() {} \ + }; \ + } \ + TORCH_API extern const enumtype::k##name k##name; \ + } + +#define TORCH_ENUM_DEFINE(name) \ + namespace torch { \ + const enumtype::k##name k##name; \ + } + +#define TORCH_ENUM_PRETTY_PRINT(name) \ + std::string operator()(const enumtype::k##name& v) const { \ + std::string k("k"); \ + return k + #name; \ + } + +// NOTE: Backstory on why we need the following two macros: +// +// Consider the following options class: +// +// ``` +// struct TORCH_API SomeOptions { +// typedef std::variant +// reduction_t; SomeOptions(reduction_t reduction = torch::kMean) : +// reduction_(reduction) {} +// +// TORCH_ARG(reduction_t, reduction); +// }; +// ``` +// +// and the functional that uses it: +// +// ``` +// Tensor some_functional( +// const Tensor& input, +// SomeOptions options = {}) { +// ... +// } +// ``` +// +// Normally, we would expect this to work: +// +// `F::some_functional(input, torch::kNone)` +// +// However, it throws the following error instead: +// +// ``` +// error: could not convert `torch::kNone` from `const torch::enumtype::kNone` +// to `torch::nn::SomeOptions` +// ``` +// +// To get around this problem, we explicitly provide the following constructors +// for `SomeOptions`: +// +// ``` +// SomeOptions(torch::enumtype::kNone reduction) : reduction_(torch::kNone) {} +// SomeOptions(torch::enumtype::kMean reduction) : reduction_(torch::kMean) {} +// SomeOptions(torch::enumtype::kSum reduction) : reduction_(torch::kSum) {} +// ``` +// +// so that the conversion from `torch::kNone` to `SomeOptions` would work. +// +// Note that we also provide the default constructor `SomeOptions() {}`, so that +// `SomeOptions options = {}` can work. +#define TORCH_OPTIONS_CTOR_VARIANT_ARG3( \ + OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3) \ + OPTIONS_NAME() = default; \ + OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \ + OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \ + OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} + +#define TORCH_OPTIONS_CTOR_VARIANT_ARG4( \ + OPTIONS_NAME, ARG_NAME, TYPE1, TYPE2, TYPE3, TYPE4) \ + OPTIONS_NAME() = default; \ + OPTIONS_NAME(torch::enumtype::TYPE1 ARG_NAME) : ARG_NAME##_(torch::TYPE1) {} \ + OPTIONS_NAME(torch::enumtype::TYPE2 ARG_NAME) : ARG_NAME##_(torch::TYPE2) {} \ + OPTIONS_NAME(torch::enumtype::TYPE3 ARG_NAME) : ARG_NAME##_(torch::TYPE3) {} \ + OPTIONS_NAME(torch::enumtype::TYPE4 ARG_NAME) : ARG_NAME##_(torch::TYPE4) {} + +TORCH_ENUM_DECLARE(Linear) +TORCH_ENUM_DECLARE(Conv1D) +TORCH_ENUM_DECLARE(Conv2D) +TORCH_ENUM_DECLARE(Conv3D) +TORCH_ENUM_DECLARE(ConvTranspose1D) +TORCH_ENUM_DECLARE(ConvTranspose2D) +TORCH_ENUM_DECLARE(ConvTranspose3D) +TORCH_ENUM_DECLARE(Sigmoid) +TORCH_ENUM_DECLARE(Tanh) +TORCH_ENUM_DECLARE(ReLU) +TORCH_ENUM_DECLARE(GELU) +TORCH_ENUM_DECLARE(SiLU) +TORCH_ENUM_DECLARE(Mish) +TORCH_ENUM_DECLARE(LeakyReLU) +TORCH_ENUM_DECLARE(FanIn) +TORCH_ENUM_DECLARE(FanOut) +TORCH_ENUM_DECLARE(Constant) +TORCH_ENUM_DECLARE(Reflect) +TORCH_ENUM_DECLARE(Replicate) +TORCH_ENUM_DECLARE(Circular) +TORCH_ENUM_DECLARE(Nearest) +TORCH_ENUM_DECLARE(Bilinear) +TORCH_ENUM_DECLARE(Bicubic) +TORCH_ENUM_DECLARE(Trilinear) +TORCH_ENUM_DECLARE(Area) +TORCH_ENUM_DECLARE(NearestExact) +TORCH_ENUM_DECLARE(Sum) +TORCH_ENUM_DECLARE(Mean) +TORCH_ENUM_DECLARE(Max) +TORCH_ENUM_DECLARE(None) +TORCH_ENUM_DECLARE(BatchMean) +TORCH_ENUM_DECLARE(Zeros) +TORCH_ENUM_DECLARE(Border) +TORCH_ENUM_DECLARE(Reflection) +TORCH_ENUM_DECLARE(RNN_TANH) +TORCH_ENUM_DECLARE(RNN_RELU) +TORCH_ENUM_DECLARE(LSTM) +TORCH_ENUM_DECLARE(GRU) +TORCH_ENUM_DECLARE(Valid) +TORCH_ENUM_DECLARE(Same) + +namespace torch { +namespace enumtype { + +struct _compute_enum_name { + TORCH_ENUM_PRETTY_PRINT(Linear) + TORCH_ENUM_PRETTY_PRINT(Conv1D) + TORCH_ENUM_PRETTY_PRINT(Conv2D) + TORCH_ENUM_PRETTY_PRINT(Conv3D) + TORCH_ENUM_PRETTY_PRINT(ConvTranspose1D) + TORCH_ENUM_PRETTY_PRINT(ConvTranspose2D) + TORCH_ENUM_PRETTY_PRINT(ConvTranspose3D) + TORCH_ENUM_PRETTY_PRINT(Sigmoid) + TORCH_ENUM_PRETTY_PRINT(Tanh) + TORCH_ENUM_PRETTY_PRINT(ReLU) + TORCH_ENUM_PRETTY_PRINT(GELU) + TORCH_ENUM_PRETTY_PRINT(SiLU) + TORCH_ENUM_PRETTY_PRINT(Mish) + TORCH_ENUM_PRETTY_PRINT(LeakyReLU) + TORCH_ENUM_PRETTY_PRINT(FanIn) + TORCH_ENUM_PRETTY_PRINT(FanOut) + TORCH_ENUM_PRETTY_PRINT(Constant) + TORCH_ENUM_PRETTY_PRINT(Reflect) + TORCH_ENUM_PRETTY_PRINT(Replicate) + TORCH_ENUM_PRETTY_PRINT(Circular) + TORCH_ENUM_PRETTY_PRINT(Nearest) + TORCH_ENUM_PRETTY_PRINT(Bilinear) + TORCH_ENUM_PRETTY_PRINT(Bicubic) + TORCH_ENUM_PRETTY_PRINT(Trilinear) + TORCH_ENUM_PRETTY_PRINT(Area) + TORCH_ENUM_PRETTY_PRINT(NearestExact) + TORCH_ENUM_PRETTY_PRINT(Sum) + TORCH_ENUM_PRETTY_PRINT(Mean) + TORCH_ENUM_PRETTY_PRINT(Max) + TORCH_ENUM_PRETTY_PRINT(None) + TORCH_ENUM_PRETTY_PRINT(BatchMean) + TORCH_ENUM_PRETTY_PRINT(Zeros) + TORCH_ENUM_PRETTY_PRINT(Border) + TORCH_ENUM_PRETTY_PRINT(Reflection) + TORCH_ENUM_PRETTY_PRINT(RNN_TANH) + TORCH_ENUM_PRETTY_PRINT(RNN_RELU) + TORCH_ENUM_PRETTY_PRINT(LSTM) + TORCH_ENUM_PRETTY_PRINT(GRU) + TORCH_ENUM_PRETTY_PRINT(Valid) + TORCH_ENUM_PRETTY_PRINT(Same) +}; + +template +std::string get_enum_name(V variant_enum) { + return std::visit(enumtype::_compute_enum_name{}, variant_enum); +} + +template +at::Reduction::Reduction reduction_get_enum(V variant_enum) { + if (std::holds_alternative(variant_enum)) { + return at::Reduction::None; + } else if (std::holds_alternative(variant_enum)) { + return at::Reduction::Mean; + } else if (std::holds_alternative(variant_enum)) { + return at::Reduction::Sum; + } else { + TORCH_CHECK( + false, + get_enum_name(variant_enum), + " is not a valid value for reduction"); + return at::Reduction::END; + } +} + +} // namespace enumtype +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h new file mode 100644 index 0000000000000000000000000000000000000000..aa4fecf4ff37c35c62d93563c440d10786368abb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/expanding_array.h @@ -0,0 +1,182 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace torch { + +/// A utility class that accepts either a container of `D`-many values, or a +/// single value, which is internally repeated `D` times. This is useful to +/// represent parameters that are multidimensional, but often equally sized in +/// all dimensions. For example, the kernel size of a 2D convolution has an `x` +/// and `y` length, but `x` and `y` are often equal. In such a case you could +/// just pass `3` to an `ExpandingArray<2>` and it would "expand" to `{3, 3}`. +template +class ExpandingArray { + public: + /// Constructs an `ExpandingArray` from an `initializer_list`. The extent of + /// the length is checked against the `ExpandingArray`'s extent parameter `D` + /// at runtime. + /*implicit*/ ExpandingArray(std::initializer_list list) + : ExpandingArray(at::ArrayRef(list)) {} + + /// Constructs an `ExpandingArray` from an `std::vector`. The extent of + /// the length is checked against the `ExpandingArray`'s extent parameter `D` + /// at runtime. + /*implicit*/ ExpandingArray(std::vector vec) + : ExpandingArray(at::ArrayRef(vec)) {} + + /// Constructs an `ExpandingArray` from an `at::ArrayRef`. The extent of + /// the length is checked against the `ExpandingArray`'s extent parameter `D` + /// at runtime. + /*implicit*/ ExpandingArray(at::ArrayRef values) { + // clang-format off + TORCH_CHECK( + values.size() == D, + "Expected ", D, " values, but instead got ", values.size()); + // clang-format on + std::copy(values.begin(), values.end(), values_.begin()); + } + + /// Constructs an `ExpandingArray` from a single value, which is repeated `D` + /// times (where `D` is the extent parameter of the `ExpandingArray`). + /*implicit*/ ExpandingArray(T single_size) { + values_.fill(single_size); + } + + /// Constructs an `ExpandingArray` from a correctly sized `std::array`. + /*implicit*/ ExpandingArray(const std::array& values) + : values_(values) {} + + /// Accesses the underlying `std::array`. + std::array& operator*() { + return values_; + } + + /// Accesses the underlying `std::array`. + const std::array& operator*() const { + return values_; + } + + /// Accesses the underlying `std::array`. + std::array* operator->() { + return &values_; + } + + /// Accesses the underlying `std::array`. + const std::array* operator->() const { + return &values_; + } + + /// Returns an `ArrayRef` to the underlying `std::array`. + operator at::ArrayRef() const { + return values_; + } + + /// Returns the extent of the `ExpandingArray`. + size_t size() const noexcept { + return D; + } + + protected: + /// The backing array. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::array values_; +}; + +template +std::ostream& operator<<( + std::ostream& stream, + const ExpandingArray& expanding_array) { + if (expanding_array.size() == 1) { + return stream << expanding_array->at(0); + } + return stream << static_cast>(expanding_array); +} + +/// A utility class that accepts either a container of `D`-many +/// `c10::optional` values, or a single `c10::optional` value, which is +/// internally repeated `D` times. It has the additional ability to accept +/// containers of the underlying type `T` and convert them to a container of +/// `c10::optional`. +template +class ExpandingArrayWithOptionalElem + : public ExpandingArray> { + public: + using ExpandingArray>::ExpandingArray; + + /// Constructs an `ExpandingArrayWithOptionalElem` from an `initializer_list` + /// of the underlying type `T`. The extent of the length is checked against + /// the `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. + /*implicit*/ ExpandingArrayWithOptionalElem(std::initializer_list list) + : ExpandingArrayWithOptionalElem(at::ArrayRef(list)) {} + + /// Constructs an `ExpandingArrayWithOptionalElem` from an `std::vector` of + /// the underlying type `T`. The extent of the length is checked against the + /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. + /*implicit*/ ExpandingArrayWithOptionalElem(std::vector vec) + : ExpandingArrayWithOptionalElem(at::ArrayRef(vec)) {} + + /// Constructs an `ExpandingArrayWithOptionalElem` from an `at::ArrayRef` of + /// the underlying type `T`. The extent of the length is checked against the + /// `ExpandingArrayWithOptionalElem`'s extent parameter `D` at runtime. + /*implicit*/ ExpandingArrayWithOptionalElem(at::ArrayRef values) + : ExpandingArray>(0) { + // clang-format off + TORCH_CHECK( + values.size() == D, + "Expected ", D, " values, but instead got ", values.size()); + // clang-format on + for (const auto i : c10::irange(this->values_.size())) { + this->values_[i] = values[i]; + } + } + + /// Constructs an `ExpandingArrayWithOptionalElem` from a single value of the + /// underlying type `T`, which is repeated `D` times (where `D` is the extent + /// parameter of the `ExpandingArrayWithOptionalElem`). + /*implicit*/ ExpandingArrayWithOptionalElem(T single_size) + : ExpandingArray>(0) { + for (const auto i : c10::irange(this->values_.size())) { + this->values_[i] = single_size; + } + } + + /// Constructs an `ExpandingArrayWithOptionalElem` from a correctly sized + /// `std::array` of the underlying type `T`. + /*implicit*/ ExpandingArrayWithOptionalElem(const std::array& values) + : ExpandingArray>(0) { + for (const auto i : c10::irange(this->values_.size())) { + this->values_[i] = values[i]; + } + } +}; + +template +std::ostream& operator<<( + std::ostream& stream, + const ExpandingArrayWithOptionalElem& expanding_array_with_opt_elem) { + if (expanding_array_with_opt_elem.size() == 1) { + const auto& elem = expanding_array_with_opt_elem->at(0); + stream << (elem.has_value() ? c10::str(elem.value()) : "None"); + } else { + std::vector str_array; + for (const auto& elem : *expanding_array_with_opt_elem) { + str_array.emplace_back( + elem.has_value() ? c10::str(elem.value()) : "None"); + } + stream << at::ArrayRef(str_array); + } + return stream; +} + +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h new file mode 100644 index 0000000000000000000000000000000000000000..86ab5050a5f7df459660512d4be5bd50a9bb68a1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/fft.h @@ -0,0 +1,389 @@ +#pragma once + +#include + +namespace torch { +namespace fft { + +/// Computes the 1 dimensional fast Fourier transform over a given dimension. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fft. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kComplexDouble); +/// torch::fft::fft(t); +/// ``` +inline Tensor fft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_fft_symint(self, n, dim, norm); +} + +/// Computes the 1 dimensional inverse Fourier transform over a given dimension. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifft. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128, dtype=kComplexDouble); +/// torch::fft::ifft(t); +/// ``` +inline Tensor ifft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_ifft_symint(self, n, dim, norm); +} + +/// Computes the 2-dimensional fast Fourier transform over the given dimensions. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fft2. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::fft2(t); +/// ``` +inline Tensor fft2( + const Tensor& self, + OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_fft2(self, s, dim, norm); +} + +/// Computes the inverse of torch.fft.fft2 +/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifft2. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::ifft2(t); +/// ``` +inline Tensor ifft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_ifft2(self, s, dim, norm); +} + +/// Computes the N dimensional fast Fourier transform over given dimensions. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftn. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::fftn(t); +/// ``` +inline Tensor fftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef dim = c10::nullopt, + c10::optional norm = c10::nullopt) { + return torch::fft_fftn(self, s, dim, norm); +} + +/// Computes the N dimensional fast Fourier transform over given dimensions. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifftn. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::ifftn(t); +/// ``` +inline Tensor ifftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef dim = c10::nullopt, + c10::optional norm = c10::nullopt) { + return torch::fft_ifftn(self, s, dim, norm); +} + +/// Computes the 1 dimensional FFT of real input with onesided Hermitian output. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfft. +/// +/// Example: +/// ``` +/// auto t = torch::randn(128); +/// auto T = torch::fft::rfft(t); +/// assert(T.is_complex() && T.numel() == 128 / 2 + 1); +/// ``` +inline Tensor rfft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_rfft_symint(self, n, dim, norm); +} + +/// Computes the inverse of torch.fft.rfft +/// +/// The input is a onesided Hermitian Fourier domain signal, with real-valued +/// output. See https://pytorch.org/docs/master/fft.html#torch.fft.irfft +/// +/// Example: +/// ``` +/// auto T = torch::randn(128 / 2 + 1, torch::kComplexDouble); +/// auto t = torch::fft::irfft(t, /*n=*/128); +/// assert(t.is_floating_point() && T.numel() == 128); +/// ``` +inline Tensor irfft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_irfft_symint(self, n, dim, norm); +} + +/// Computes the 2-dimensional FFT of real input. Returns a onesided Hermitian +/// output. See https://pytorch.org/docs/master/fft.html#torch.fft.rfft2 +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kDouble); +/// torch::fft::rfft2(t); +/// ``` +inline Tensor rfft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_rfft2(self, s, dim, norm); +} + +/// Computes the inverse of torch.fft.rfft2. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.irfft2. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::irfft2(t); +/// ``` +inline Tensor irfft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_irfft2(self, s, dim, norm); +} + +/// Computes the N dimensional FFT of real input with onesided Hermitian output. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfftn +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kDouble); +/// torch::fft::rfftn(t); +/// ``` +inline Tensor rfftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef dim = c10::nullopt, + c10::optional norm = c10::nullopt) { + return torch::fft_rfftn(self, s, dim, norm); +} + +/// Computes the inverse of torch.fft.rfftn. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.irfftn. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 128}, dtype=kComplexDouble); +/// torch::fft::irfftn(t); +/// ``` +inline Tensor irfftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + at::OptionalIntArrayRef dim = c10::nullopt, + c10::optional norm = c10::nullopt) { + return torch::fft_irfftn(self, s, dim, norm); +} + +/// Computes the 1 dimensional FFT of a onesided Hermitian signal +/// +/// The input represents a Hermitian symmetric time domain signal. The returned +/// Fourier domain representation of such a signal is a real-valued. See +/// https://pytorch.org/docs/master/fft.html#torch.fft.hfft +/// +/// Example: +/// ``` +/// auto t = torch::randn(128 / 2 + 1, torch::kComplexDouble); +/// auto T = torch::fft::hfft(t, /*n=*/128); +/// assert(T.is_floating_point() && T.numel() == 128); +/// ``` +inline Tensor hfft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_hfft_symint(self, n, dim, norm); +} + +/// Computes the inverse FFT of a real-valued Fourier domain signal. +/// +/// The output is a onesided representation of the Hermitian symmetric time +/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.ihfft. +/// +/// Example: +/// ``` +/// auto T = torch::randn(128, torch::kDouble); +/// auto t = torch::fft::ihfft(T); +/// assert(t.is_complex() && T.numel() == 128 / 2 + 1); +/// ``` +inline Tensor ihfft( + const Tensor& self, + c10::optional n = c10::nullopt, + int64_t dim = -1, + c10::optional norm = c10::nullopt) { + return torch::fft_ihfft_symint(self, n, dim, norm); +} + +/// Computes the 2-dimensional FFT of a Hermitian symmetric input signal. +/// +/// The input is a onesided representation of the Hermitian symmetric time +/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfft2. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 65}, torch::kComplexDouble); +/// auto T = torch::fft::hfft2(t, /*s=*/{128, 128}); +/// assert(T.is_floating_point() && T.numel() == 128 * 128); +/// ``` +inline Tensor hfft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_hfft2(self, s, dim, norm); +} + +/// Computes the 2-dimensional IFFT of a real input signal. +/// +/// The output is a onesided representation of the Hermitian symmetric time +/// domain signal. See +/// https://pytorch.org/docs/master/fft.html#torch.fft.ihfft2. +/// +/// Example: +/// ``` +/// auto T = torch::randn({128, 128}, torch::kDouble); +/// auto t = torch::fft::hfft2(T); +/// assert(t.is_complex() && t.size(1) == 65); +/// ``` +inline Tensor ihfft2( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_ihfft2(self, s, dim, norm); +} + +/// Computes the N-dimensional FFT of a Hermitian symmetric input signal. +/// +/// The input is a onesided representation of the Hermitian symmetric time +/// domain signal. See https://pytorch.org/docs/master/fft.html#torch.fft.hfftn. +/// +/// Example: +/// ``` +/// auto t = torch::randn({128, 65}, torch::kComplexDouble); +/// auto T = torch::fft::hfftn(t, /*s=*/{128, 128}); +/// assert(T.is_floating_point() && T.numel() == 128 * 128); +/// ``` +inline Tensor hfftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_hfftn(self, s, dim, norm); +} + +/// Computes the N-dimensional IFFT of a real input signal. +/// +/// The output is a onesided representation of the Hermitian symmetric time +/// domain signal. See +/// https://pytorch.org/docs/master/fft.html#torch.fft.ihfftn. +/// +/// Example: +/// ``` +/// auto T = torch::randn({128, 128}, torch::kDouble); +/// auto t = torch::fft::hfft2(T); +/// assert(t.is_complex() && t.size(1) == 65); +/// ``` +inline Tensor ihfftn( + const Tensor& self, + at::OptionalIntArrayRef s = c10::nullopt, + IntArrayRef dim = {-2, -1}, + c10::optional norm = c10::nullopt) { + return torch::fft_ihfftn(self, s, dim, norm); +} + +/// Computes the discrete Fourier Transform sample frequencies for a signal of +/// size n. +/// +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftfreq +/// +/// Example: +/// ``` +/// auto frequencies = torch::fft::fftfreq(128, torch::kDouble); +/// ``` +inline Tensor fftfreq(int64_t n, double d, const TensorOptions& options = {}) { + return torch::fft_fftfreq(n, d, options); +} + +inline Tensor fftfreq(int64_t n, const TensorOptions& options = {}) { + return torch::fft_fftfreq(n, /*d=*/1.0, options); +} + +/// Computes the sample frequencies for torch.fft.rfft with a signal of size n. +/// +/// Like torch.fft.rfft, only the positive frequencies are included. +/// See https://pytorch.org/docs/master/fft.html#torch.fft.rfftfreq +/// +/// Example: +/// ``` +/// auto frequencies = torch::fft::rfftfreq(128, torch::kDouble); +/// ``` +inline Tensor rfftfreq(int64_t n, double d, const TensorOptions& options) { + return torch::fft_rfftfreq(n, d, options); +} + +inline Tensor rfftfreq(int64_t n, const TensorOptions& options) { + return torch::fft_rfftfreq(n, /*d=*/1.0, options); +} + +/// Reorders n-dimensional FFT output to have negative frequency terms first, by +/// a torch.roll operation. +/// +/// See https://pytorch.org/docs/master/fft.html#torch.fft.fftshift +/// +/// Example: +/// ``` +/// auto x = torch::randn({127, 4}); +/// auto centred_fft = torch::fft::fftshift(torch::fft::fftn(x)); +/// ``` +inline Tensor fftshift( + const Tensor& x, + at::OptionalIntArrayRef dim = c10::nullopt) { + return torch::fft_fftshift(x, dim); +} + +/// Inverse of torch.fft.fftshift +/// +/// See https://pytorch.org/docs/master/fft.html#torch.fft.ifftshift +/// +/// Example: +/// ``` +/// auto x = torch::randn({127, 4}); +/// auto shift = torch::fft::fftshift(x) +/// auto unshift = torch::fft::ifftshift(shift); +/// assert(torch::allclose(x, unshift)); +/// ``` +inline Tensor ifftshift( + const Tensor& x, + at::OptionalIntArrayRef dim = c10::nullopt) { + return torch::fft_ifftshift(x, dim); +} + +} // namespace fft +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h new file mode 100644 index 0000000000000000000000000000000000000000..3dd59c9f12f87794faf5f03c1aaf95a516e2aa99 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/linalg.h @@ -0,0 +1,1065 @@ +#pragma once + +#include + +namespace torch { +namespace linalg { + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +namespace detail { + +inline Tensor cholesky(const Tensor& self) { + return torch::linalg_cholesky(self); +} + +inline Tensor cholesky_out(Tensor& result, const Tensor& self) { + return torch::linalg_cholesky_out(result, self); +} + +inline Tensor det(const Tensor& self) { + return torch::linalg_det(self); +} + +inline std::tuple slogdet(const Tensor& input) { + return torch::linalg_slogdet(input); +} + +inline std::tuple slogdet_out( + Tensor& sign, + Tensor& logabsdet, + const Tensor& input) { + return torch::linalg_slogdet_out(sign, logabsdet, input); +} + +inline std::tuple eig(const Tensor& self) { + return torch::linalg_eig(self); +} + +inline std::tuple eig_out( + Tensor& eigvals, + Tensor& eigvecs, + const Tensor& self) { + return torch::linalg_eig_out(eigvals, eigvecs, self); +} + +inline Tensor eigvals(const Tensor& self) { + return torch::linalg_eigvals(self); +} + +inline Tensor& eigvals_out(Tensor& result, const Tensor& self) { + return torch::linalg_eigvals_out(result, self); +} + +inline std::tuple eigh( + const Tensor& self, + c10::string_view uplo) { + return torch::linalg_eigh(self, uplo); +} + +inline std::tuple eigh_out( + Tensor& eigvals, + Tensor& eigvecs, + const Tensor& self, + c10::string_view uplo) { + return torch::linalg_eigh_out(eigvals, eigvecs, self, uplo); +} + +inline Tensor eigvalsh(const Tensor& self, c10::string_view uplo) { + return torch::linalg_eigvalsh(self, uplo); +} + +inline Tensor& eigvalsh_out( + Tensor& result, + const Tensor& self, + c10::string_view uplo) { + return torch::linalg_eigvalsh_out(result, self, uplo); +} + +inline Tensor householder_product(const Tensor& input, const Tensor& tau) { + return torch::linalg_householder_product(input, tau); +} + +inline Tensor& householder_product_out( + Tensor& result, + const Tensor& input, + const Tensor& tau) { + return torch::linalg_householder_product_out(result, input, tau); +} + +inline std::tuple lu_factor( + const Tensor& self, + const bool pivot) { + return torch::linalg_lu_factor(self, pivot); +} + +inline std::tuple lu_factor_out( + Tensor& LU, + Tensor& pivots, + const Tensor& self, + const bool pivot) { + return torch::linalg_lu_factor_out(LU, pivots, self, pivot); +} + +inline std::tuple lu( + const Tensor& self, + const bool pivot) { + return torch::linalg_lu(self, pivot); +} + +inline std::tuple lu_out( + Tensor& P, + Tensor& L, + Tensor& U, + const Tensor& self, + const bool pivot) { + return torch::linalg_lu_out(P, L, U, self, pivot); +} + +inline std::tuple lstsq( + const Tensor& self, + const Tensor& b, + c10::optional cond, + c10::optional driver) { + return torch::linalg_lstsq(self, b, cond, driver); +} + +inline Tensor matrix_exp(const Tensor& self) { + return torch::linalg_matrix_exp(self); +} + +inline Tensor norm( + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_norm(self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor norm( + const Tensor& self, + c10::string_view ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& norm_out( + Tensor& result, + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_norm_out( + result, self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& norm_out( + Tensor& result, + const Tensor& self, + c10::string_view ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_norm_out(result, self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor vector_norm( + const Tensor& self, + Scalar ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_vector_norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& vector_norm_out( + Tensor& result, + const Tensor& self, + Scalar ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return torch::linalg_vector_norm_out( + result, self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor matrix_norm( + const Tensor& self, + const Scalar& ord, + IntArrayRef dim, + bool keepdim, + optional dtype) { + return torch::linalg_matrix_norm(self, ord, dim, keepdim, dtype); +} + +inline Tensor& matrix_norm_out( + const Tensor& self, + const Scalar& ord, + IntArrayRef dim, + bool keepdim, + optional dtype, + Tensor& result) { + return torch::linalg_matrix_norm_out(result, self, ord, dim, keepdim, dtype); +} + +inline Tensor matrix_norm( + const Tensor& self, + std::string ord, + IntArrayRef dim, + bool keepdim, + optional dtype) { + return torch::linalg_matrix_norm(self, ord, dim, keepdim, dtype); +} + +inline Tensor& matrix_norm_out( + const Tensor& self, + std::string ord, + IntArrayRef dim, + bool keepdim, + optional dtype, + Tensor& result) { + return torch::linalg_matrix_norm_out(result, self, ord, dim, keepdim, dtype); +} + +inline Tensor matrix_power(const Tensor& self, int64_t n) { + return torch::linalg_matrix_power(self, n); +} + +inline Tensor& matrix_power_out(const Tensor& self, int64_t n, Tensor& result) { + return torch::linalg_matrix_power_out(result, self, n); +} + +inline Tensor matrix_rank(const Tensor& input, double tol, bool hermitian) { + return torch::linalg_matrix_rank(input, tol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + const Tensor& tol, + bool hermitian) { + return torch::linalg_matrix_rank(input, tol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + c10::optional atol, + c10::optional rtol, + bool hermitian) { + return torch::linalg_matrix_rank(input, atol, rtol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + const c10::optional& atol, + const c10::optional& rtol, + bool hermitian) { + return torch::linalg_matrix_rank(input, atol, rtol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + double tol, + bool hermitian) { + return torch::linalg_matrix_rank_out(result, input, tol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + const Tensor& tol, + bool hermitian) { + return torch::linalg_matrix_rank_out(result, input, tol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + c10::optional atol, + c10::optional rtol, + bool hermitian) { + return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + const c10::optional& atol, + const c10::optional& rtol, + bool hermitian) { + return torch::linalg_matrix_rank_out(result, input, atol, rtol, hermitian); +} + +inline Tensor multi_dot(TensorList tensors) { + return torch::linalg_multi_dot(tensors); +} + +inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) { + return torch::linalg_multi_dot_out(result, tensors); +} + +inline Tensor pinv(const Tensor& input, double rcond, bool hermitian) { + return torch::linalg_pinv(input, rcond, hermitian); +} + +inline Tensor& pinv_out( + Tensor& result, + const Tensor& input, + double rcond, + bool hermitian) { + return torch::linalg_pinv_out(result, input, rcond, hermitian); +} + +inline std::tuple qr( + const Tensor& input, + c10::string_view mode) { + return torch::linalg_qr(input, mode); +} + +inline std::tuple qr_out( + Tensor& Q, + Tensor& R, + const Tensor& input, + c10::string_view mode) { + return torch::linalg_qr_out(Q, R, input, mode); +} + +inline std::tuple solve_ex( + const Tensor& input, + const Tensor& other, + bool left, + bool check_errors) { + return torch::linalg_solve_ex(input, other, left, check_errors); +} + +inline std::tuple solve_ex_out( + Tensor& result, + Tensor& info, + const Tensor& input, + const Tensor& other, + bool left, + bool check_errors) { + return torch::linalg_solve_ex_out( + result, info, input, other, left, check_errors); +} + +inline Tensor solve(const Tensor& input, const Tensor& other, bool left) { + return torch::linalg_solve(input, other, left); +} + +inline Tensor& solve_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + bool left) { + return torch::linalg_solve_out(result, input, other, left); +} + +inline Tensor solve_triangular( + const Tensor& input, + const Tensor& other, + bool upper, + bool left, + bool unitriangular) { + return torch::linalg_solve_triangular( + input, other, upper, left, unitriangular); +} + +inline Tensor& solve_triangular_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + bool upper, + bool left, + bool unitriangular) { + return torch::linalg_solve_triangular_out( + result, input, other, upper, left, unitriangular); +} + +inline std::tuple svd( + const Tensor& input, + bool full_matrices, + c10::optional driver) { + return torch::linalg_svd(input, full_matrices, driver); +} + +inline std::tuple svd_out( + Tensor& U, + Tensor& S, + Tensor& Vh, + const Tensor& input, + bool full_matrices, + c10::optional driver) { + return torch::linalg_svd_out(U, S, Vh, input, full_matrices, driver); +} + +inline Tensor svdvals( + const Tensor& input, + c10::optional driver) { + return torch::linalg_svdvals(input, driver); +} + +inline Tensor& svdvals_out( + Tensor& result, + const Tensor& input, + c10::optional driver) { + return torch::linalg_svdvals_out(result, input, driver); +} + +inline Tensor tensorinv(const Tensor& self, int64_t ind) { + return torch::linalg_tensorinv(self, ind); +} + +inline Tensor& tensorinv_out(Tensor& result, const Tensor& self, int64_t ind) { + return torch::linalg_tensorinv_out(result, self, ind); +} + +inline Tensor tensorsolve( + const Tensor& self, + const Tensor& other, + OptionalIntArrayRef dims) { + return torch::linalg_tensorsolve(self, other, dims); +} + +inline Tensor& tensorsolve_out( + Tensor& result, + const Tensor& self, + const Tensor& other, + OptionalIntArrayRef dims) { + return torch::linalg_tensorsolve_out(result, self, other, dims); +} + +inline Tensor inv(const Tensor& input) { + return torch::linalg_inv(input); +} + +inline Tensor& inv_out(Tensor& result, const Tensor& input) { + return torch::linalg_inv_out(result, input); +} + +} // namespace detail +#endif /* DOXYGEN_SHOULD_SKIP_THIS */ + +/// Cholesky decomposition +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.cholesky +/// +/// Example: +/// ``` +/// auto A = torch::randn({4, 4}); +/// auto A = torch::matmul(A, A.t()); +/// auto L = torch::linalg::cholesky(A); +/// assert(torch::allclose(torch::matmul(L, L.t()), A)); +/// ``` +inline Tensor cholesky(const Tensor& self) { + return detail::cholesky(self); +} + +inline Tensor cholesky_out(Tensor& result, const Tensor& self) { + return detail::cholesky_out(result, self); +} + +// C10_DEPRECATED_MESSAGE("linalg_det is deprecated, use det instead.") +inline Tensor linalg_det(const Tensor& self) { + return detail::det(self); +} + +/// See the documentation of torch.linalg.det +inline Tensor det(const Tensor& self) { + return detail::det(self); +} + +/// Computes the sign and (natural) logarithm of the determinant +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.slogdet +inline std::tuple slogdet(const Tensor& input) { + return detail::slogdet(input); +} + +inline std::tuple slogdet_out( + Tensor& sign, + Tensor& logabsdet, + const Tensor& input) { + return detail::slogdet_out(sign, logabsdet, input); +} + +/// Computes eigenvalues and eigenvectors of non-symmetric/non-hermitian +/// matrices +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eig +inline std::tuple eig(const Tensor& self) { + return detail::eig(self); +} + +inline std::tuple eig_out( + Tensor& eigvals, + Tensor& eigvecs, + const Tensor& self) { + return detail::eig_out(eigvals, eigvecs, self); +} + +/// Computes eigenvalues of non-symmetric/non-hermitian matrices +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvals +inline Tensor eigvals(const Tensor& self) { + return detail::eigvals(self); +} + +inline Tensor& eigvals_out(Tensor& result, const Tensor& self) { + return detail::eigvals_out(result, self); +} + +/// Computes eigenvalues and eigenvectors +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigh +inline std::tuple eigh( + const Tensor& self, + c10::string_view uplo) { + return detail::eigh(self, uplo); +} + +inline std::tuple eigh_out( + Tensor& eigvals, + Tensor& eigvecs, + const Tensor& self, + c10::string_view uplo) { + return detail::eigh_out(eigvals, eigvecs, self, uplo); +} + +/// Computes eigenvalues +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.eigvalsh +inline Tensor eigvalsh(const Tensor& self, c10::string_view uplo) { + return detail::eigvalsh(self, uplo); +} + +inline Tensor& eigvalsh_out( + Tensor& result, + const Tensor& self, + c10::string_view uplo) { + return detail::eigvalsh_out(result, self, uplo); +} + +/// Computes the product of Householder matrices +/// +/// See +/// https://pytorch.org/docs/master/linalg.html#torch.linalg.householder_product +inline Tensor householder_product(const Tensor& input, const Tensor& tau) { + return detail::householder_product(input, tau); +} + +inline Tensor& householder_product_out( + Tensor& result, + const Tensor& input, + const Tensor& tau) { + return detail::householder_product_out(result, input, tau); +} + +inline std::tuple lstsq( + const Tensor& self, + const Tensor& b, + c10::optional cond, + c10::optional driver) { + return detail::lstsq(self, b, cond, driver); +} + +/// Computes the matrix exponential +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_exp +inline Tensor matrix_exp(const Tensor& input) { + return detail::matrix_exp(input); +} + +// C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.") +inline Tensor linalg_norm( + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm(self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +// C10_DEPRECATED_MESSAGE("linalg_norm is deprecated, use norm instead.") +inline Tensor linalg_norm( + const Tensor& self, + c10::string_view ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +// C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out +// instead.") +inline Tensor& linalg_norm_out( + Tensor& result, + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm_out(result, self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +// C10_DEPRECATED_MESSAGE("linalg_norm_out is deprecated, use norm_out +// instead.") +inline Tensor& linalg_norm_out( + Tensor& result, + const Tensor& self, + c10::string_view ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm_out(result, self, ord, opt_dim, keepdim, opt_dtype); +} + +/// Computes the LU factorization with partial pivoting +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu_factor +inline std::tuple lu_factor( + const Tensor& input, + const bool pivot = true) { + return detail::lu_factor(input, pivot); +} + +inline std::tuple lu_factor_out( + Tensor& LU, + Tensor& pivots, + const Tensor& self, + const bool pivot = true) { + return detail::lu_factor_out(LU, pivots, self, pivot); +} + +/// Computes the LU factorization with partial pivoting +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.lu +inline std::tuple lu( + const Tensor& input, + const bool pivot = true) { + return detail::lu(input, pivot); +} + +inline std::tuple lu_out( + Tensor& P, + Tensor& L, + Tensor& U, + const Tensor& self, + const bool pivot = true) { + return detail::lu_out(P, L, U, self, pivot); +} + +inline Tensor norm( + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm(self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor norm( + const Tensor& self, + std::string ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& norm_out( + Tensor& result, + const Tensor& self, + const optional& opt_ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm_out(result, self, opt_ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& norm_out( + Tensor& result, + const Tensor& self, + std::string ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::norm_out(result, self, ord, opt_dim, keepdim, opt_dtype); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.vector_norm +inline Tensor vector_norm( + const Tensor& self, + Scalar ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::vector_norm(self, ord, opt_dim, keepdim, opt_dtype); +} + +inline Tensor& vector_norm_out( + Tensor& result, + const Tensor& self, + Scalar ord, + OptionalIntArrayRef opt_dim, + bool keepdim, + optional opt_dtype) { + return detail::vector_norm_out( + result, self, ord, opt_dim, keepdim, opt_dtype); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_norm +inline Tensor matrix_norm( + const Tensor& self, + const Scalar& ord, + IntArrayRef dim, + bool keepdim, + optional dtype) { + return detail::matrix_norm(self, ord, dim, keepdim, dtype); +} + +inline Tensor& matrix_norm_out( + const Tensor& self, + const Scalar& ord, + IntArrayRef dim, + bool keepdim, + optional dtype, + Tensor& result) { + return detail::matrix_norm_out(self, ord, dim, keepdim, dtype, result); +} + +inline Tensor matrix_norm( + const Tensor& self, + std::string ord, + IntArrayRef dim, + bool keepdim, + optional dtype) { + return detail::matrix_norm(self, ord, dim, keepdim, dtype); +} + +inline Tensor& matrix_norm_out( + const Tensor& self, + std::string ord, + IntArrayRef dim, + bool keepdim, + optional dtype, + Tensor& result) { + return detail::matrix_norm_out(self, ord, dim, keepdim, dtype, result); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_power +inline Tensor matrix_power(const Tensor& self, int64_t n) { + return detail::matrix_power(self, n); +} + +inline Tensor& matrix_power_out(const Tensor& self, int64_t n, Tensor& result) { + return detail::matrix_power_out(self, n, result); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.matrix_rank +inline Tensor matrix_rank(const Tensor& input, double tol, bool hermitian) { + return detail::matrix_rank(input, tol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + const Tensor& tol, + bool hermitian) { + return detail::matrix_rank(input, tol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + c10::optional atol, + c10::optional rtol, + bool hermitian) { + return detail::matrix_rank(input, atol, rtol, hermitian); +} + +inline Tensor matrix_rank( + const Tensor& input, + const c10::optional& atol, + const c10::optional& rtol, + bool hermitian) { + return detail::matrix_rank(input, atol, rtol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + double tol, + bool hermitian) { + return detail::matrix_rank_out(result, input, tol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + const Tensor& tol, + bool hermitian) { + return detail::matrix_rank_out(result, input, tol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + c10::optional atol, + c10::optional rtol, + bool hermitian) { + return detail::matrix_rank_out(result, input, atol, rtol, hermitian); +} + +inline Tensor& matrix_rank_out( + Tensor& result, + const Tensor& input, + const c10::optional& atol, + const c10::optional& rtol, + bool hermitian) { + return detail::matrix_rank_out(result, input, atol, rtol, hermitian); +} + +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.multi_dot +inline Tensor multi_dot(TensorList tensors) { + return detail::multi_dot(tensors); +} + +inline Tensor& multi_dot_out(TensorList tensors, Tensor& result) { + return detail::multi_dot_out(tensors, result); +} + +/// Computes the pseudo-inverse +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.pinv +inline Tensor pinv( + const Tensor& input, + double rcond = 1e-15, + bool hermitian = false) { + return detail::pinv(input, rcond, hermitian); +} + +inline Tensor& pinv_out( + Tensor& result, + const Tensor& input, + double rcond = 1e-15, + bool hermitian = false) { + return detail::pinv_out(result, input, rcond, hermitian); +} + +/// Computes the QR decomposition +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.qr +inline std::tuple qr( + const Tensor& input, + c10::string_view mode = "reduced") { + // C++17 Change the initialisation to "reduced"sv + // Same for qr_out + return detail::qr(input, mode); +} + +inline std::tuple qr_out( + Tensor& Q, + Tensor& R, + const Tensor& input, + c10::string_view mode = "reduced") { + return detail::qr_out(Q, R, input, mode); +} + +/// Computes the LDL decomposition +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_factor_ex +inline std::tuple ldl_factor_ex( + const Tensor& input, + bool hermitian, + bool check_errors) { + return torch::linalg_ldl_factor_ex(input, hermitian, check_errors); +} + +inline std::tuple ldl_factor_ex_out( + Tensor& LD, + Tensor& pivots, + Tensor& info, + const Tensor& input, + bool hermitian, + bool check_errors) { + return torch::linalg_ldl_factor_ex_out( + LD, pivots, info, input, hermitian, check_errors); +} + +/// Solve a system of linear equations using the LDL decomposition +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.ldl_solve +inline Tensor ldl_solve( + const Tensor& LD, + const Tensor& pivots, + const Tensor& B, + bool hermitian) { + return torch::linalg_ldl_solve(LD, pivots, B, hermitian); +} + +inline Tensor& ldl_solve_out( + Tensor& result, + const Tensor& LD, + const Tensor& pivots, + const Tensor& B, + bool hermitian) { + return torch::linalg_ldl_solve_out(result, LD, pivots, B, hermitian); +} + +/// Solves a system linear system AX = B +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_ex +inline std::tuple solve_ex( + const Tensor& input, + const Tensor& other, + bool left, + bool check_errors) { + return detail::solve_ex(input, other, left, check_errors); +} + +inline std::tuple solve_ex_out( + Tensor& result, + Tensor& info, + const Tensor& input, + const Tensor& other, + bool left, + bool check_errors) { + return detail::solve_ex_out(result, info, input, other, left, check_errors); +} + +/// Computes a tensor `x` such that `matmul(input, x) = other`. +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.solve +inline Tensor solve(const Tensor& input, const Tensor& other, bool left) { + return detail::solve(input, other, left); +} + +inline Tensor& solve_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + bool left) { + return detail::solve_out(result, input, other, left); +} + +/// Computes a solution of a linear system AX = B for input = A and other = B +/// whenever A is square upper or lower triangular and does not have zeros in +/// the diagonal +/// +/// See +/// https://pytorch.org/docs/master/linalg.html#torch.linalg.solve_triangular +inline Tensor solve_triangular( + const Tensor& input, + const Tensor& other, + bool upper, + bool left, + bool unitriangular) { + return detail::solve_triangular(input, other, upper, left, unitriangular); +} + +inline Tensor& solve_triangular_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + bool upper, + bool left, + bool unitriangular) { + return detail::solve_triangular_out( + result, input, other, upper, left, unitriangular); +} + +/// Computes the singular values and singular vectors +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.svd +inline std::tuple svd( + const Tensor& input, + bool full_matrices, + c10::optional driver) { + return detail::svd(input, full_matrices, driver); +} + +inline std::tuple svd_out( + Tensor& U, + Tensor& S, + Tensor& Vh, + const Tensor& input, + bool full_matrices, + c10::optional driver) { + return detail::svd_out(U, S, Vh, input, full_matrices, driver); +} + +/// Computes the singular values +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.svdvals +inline Tensor svdvals( + const Tensor& input, + c10::optional driver) { + return detail::svdvals(input, driver); +} + +inline Tensor& svdvals_out( + Tensor& result, + const Tensor& input, + c10::optional driver) { + return detail::svdvals_out(result, input, driver); +} + +/// Computes the inverse of a tensor +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorinv +/// +/// Example: +/// ``` +/// auto a = torch::eye(4*6).reshape({4, 6, 8, 3}); +/// int64_t ind = 2; +/// auto ainv = torch::linalg::tensorinv(a, ind); +/// ``` +inline Tensor tensorinv(const Tensor& self, int64_t ind) { + return detail::tensorinv(self, ind); +} + +inline Tensor& tensorinv_out(Tensor& result, const Tensor& self, int64_t ind) { + return detail::tensorinv_out(result, self, ind); +} + +/// Computes a tensor `x` such that `tensordot(input, x, dims=x.dim()) = other`. +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.tensorsolve +/// +/// Example: +/// ``` +/// auto a = torch::eye(2*3*4).reshape({2*3, 4, 2, 3, 4}); +/// auto b = torch::randn(2*3, 4); +/// auto x = torch::linalg::tensorsolve(a, b); +/// ``` +inline Tensor tensorsolve( + const Tensor& input, + const Tensor& other, + OptionalIntArrayRef dims) { + return detail::tensorsolve(input, other, dims); +} + +inline Tensor& tensorsolve_out( + Tensor& result, + const Tensor& input, + const Tensor& other, + OptionalIntArrayRef dims) { + return detail::tensorsolve_out(result, input, other, dims); +} + +/// Computes a tensor `inverse_input` such that `dot(input, inverse_input) = +/// eye(input.size(0))`. +/// +/// See https://pytorch.org/docs/master/linalg.html#torch.linalg.inv +inline Tensor inv(const Tensor& input) { + return detail::inv(input); +} + +inline Tensor& inv_out(Tensor& result, const Tensor& input) { + return detail::inv_out(result, input); +} + +} // namespace linalg +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h new file mode 100644 index 0000000000000000000000000000000000000000..1b2eabd6832ba8a3c1d07add2e0313ae407c51da --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/mps.h @@ -0,0 +1,44 @@ +#pragma once + +#include + +#include +#include + +#ifdef __OBJC__ +#include +#include +using MTLCommandBuffer_t = id; +using DispatchQueue_t = dispatch_queue_t; +#else +using MTLCommandBuffer_t = void*; +using DispatchQueue_t = void*; +#endif + +namespace torch { +namespace mps { + +/// Returns true if MPS device is available. +bool TORCH_API is_available(); + +/// Sets the RNG seed for the MPS device. +void TORCH_API manual_seed(uint64_t seed); + +/// Waits for all streams on the MPS device to complete. +/// This blocks the calling CPU thread by using the 'waitUntilCompleted()' +/// method to wait for Metal command buffers finish executing all the +/// encoded GPU operations before returning. +void TORCH_API synchronize(); + +/// Submits the currently active command buffer to run on the MPS device. +void TORCH_API commit(); + +/// Get the current command buffer to encode the Metal commands. +MTLCommandBuffer_t TORCH_API get_command_buffer(); + +/// Get the dispatch_queue_t to synchronize encoding the custom kernels +/// with the PyTorch MPS backend. +DispatchQueue_t TORCH_API get_dispatch_queue(); + +} // namespace mps +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim.h new file mode 100644 index 0000000000000000000000000000000000000000..8aef6238ebaa3d9b91b827c88fe5d4c5eb0c765a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/optim.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/python.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/python.h new file mode 100644 index 0000000000000000000000000000000000000000..15902a026cf597c4f1eacf0063e65b9f0fa2948e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/python.h @@ -0,0 +1,262 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch { +namespace python { +namespace detail { +inline Device py_object_to_device(py::object object) { + PyObject* obj = object.ptr(); + if (THPDevice_Check(obj)) { + return reinterpret_cast(obj)->device; + } + throw TypeError("Expected device"); +} + +inline Dtype py_object_to_dtype(py::object object) { + PyObject* obj = object.ptr(); + if (THPDtype_Check(obj)) { + return reinterpret_cast(obj)->scalar_type; + } + throw TypeError("Expected dtype"); +} + +template +using PyModuleClass = + py::class_>; + +/// Dynamically creates a subclass of `torch.nn.cpp.ModuleWrapper` that is also +/// a subclass of `torch.nn.Module`, and passes it the user-provided C++ module +/// to which it delegates all calls. +template +void bind_cpp_module_wrapper( + py::module module, + PyModuleClass cpp_class, + const char* name) { + // Grab the `torch.nn.cpp.ModuleWrapper` class, which we'll subclass + // with a dynamically created class below. + py::object cpp_module = + py::module::import("torch.nn.cpp").attr("ModuleWrapper"); + + // Grab the `type` class which we'll use as a metaclass to create a new class + // dynamically. + py::object type_metaclass = + py::reinterpret_borrow((PyObject*)&PyType_Type); + + // The `ModuleWrapper` constructor copies all functions to its own `__dict__` + // in its constructor, but we do need to give our dynamic class a constructor. + // Inside, we construct an instance of the original C++ module we're binding + // (the `torch::nn::Module` subclass), and then forward it to the + // `ModuleWrapper` constructor. + py::dict attributes; + + // `type()` always needs a `str`, but pybind11's `str()` method always creates + // a `unicode` object. + py::object name_str = py::str(name); + + // Dynamically create the subclass of `ModuleWrapper`, which is a subclass of + // `torch.nn.Module`, and will delegate all calls to the C++ module we're + // binding. + py::object wrapper_class = + type_metaclass(name_str, py::make_tuple(cpp_module), attributes); + + // The constructor of the dynamic class calls `ModuleWrapper.__init__()`, + // which replaces its methods with those of the C++ module. + wrapper_class.attr("__init__") = py::cpp_function( + [cpp_module, cpp_class]( + py::object self, py::args args, py::kwargs kwargs) { + cpp_module.attr("__init__")(self, cpp_class(*args, **kwargs)); + }, + py::is_method(wrapper_class)); + + // Calling `my_module.my_class` now means that `my_class` is a subclass of + // `ModuleWrapper`, and whose methods call into the C++ module we're binding. + module.attr(name) = wrapper_class; +} +} // namespace detail + +/// Adds method bindings for a pybind11 `class_` that binds an `nn::Module` +/// subclass. +/// +/// Say you have a pybind11 class object created with `py::class_(m, +/// "Net")`. This function will add all the necessary `.def()` calls to bind the +/// `nn::Module` base class' methods, such as `train()`, `eval()` etc. into +/// Python. +/// +/// Users should prefer to use `bind_module` if possible. +template +py::class_ add_module_bindings( + py::class_ module) { + // clang-format off + return module + .def("train", + [](ModuleType& module, bool mode) { module.train(mode); }, + py::arg("mode") = true) + .def("eval", [](ModuleType& module) { module.eval(); }) + .def("clone", [](ModuleType& module) { return module.clone(); }) + .def_property_readonly( + "training", [](ModuleType& module) { return module.is_training(); }) + .def("zero_grad", [](ModuleType& module) { module.zero_grad(); }) + .def_property_readonly( "_parameters", [](ModuleType& module) { + return module.named_parameters(/*recurse=*/false); + }) + .def("parameters", [](ModuleType& module, bool recurse) { + return module.parameters(recurse); + }, + py::arg("recurse") = true) + .def("named_parameters", [](ModuleType& module, bool recurse) { + return module.named_parameters(recurse); + }, + py::arg("recurse") = true) + .def_property_readonly("_buffers", [](ModuleType& module) { + return module.named_buffers(/*recurse=*/false); + }) + .def("buffers", [](ModuleType& module, bool recurse) { + return module.buffers(recurse); }, + py::arg("recurse") = true) + .def("named_buffers", [](ModuleType& module, bool recurse) { + return module.named_buffers(recurse); + }, + py::arg("recurse") = true) + .def_property_readonly( + "_modules", [](ModuleType& module) { return module.named_children(); }) + .def("modules", [](ModuleType& module) { return module.modules(); }) + .def("named_modules", + [](ModuleType& module, py::object /* unused */, std::string prefix, bool remove_duplicate /* unused */) { + return module.named_modules(std::move(prefix)); + }, + py::arg("memo") = py::none(), + py::arg("prefix") = std::string(), + py::arg("remove_duplicate") = true) + .def("children", [](ModuleType& module) { return module.children(); }) + .def("named_children", + [](ModuleType& module) { return module.named_children(); }) + .def("to", [](ModuleType& module, py::object object, bool non_blocking) { + if (THPDevice_Check(object.ptr())) { + module.to( + reinterpret_cast(object.ptr())->device, + non_blocking); + } else { + module.to(detail::py_object_to_dtype(object), non_blocking); + } + }, + py::arg("dtype_or_device"), + py::arg("non_blocking") = false) + .def("to", + [](ModuleType& module, + py::object device, + py::object dtype, + bool non_blocking) { + if (device.is_none()) { + module.to(detail::py_object_to_dtype(dtype), non_blocking); + } else if (dtype.is_none()) { + module.to(detail::py_object_to_device(device), non_blocking); + } else { + module.to( + detail::py_object_to_device(device), + detail::py_object_to_dtype(dtype), + non_blocking); + } + }, + py::arg("device"), + py::arg("dtype"), + py::arg("non_blocking") = false) + .def("cuda", [](ModuleType& module) { module.to(kCUDA); }) + .def("cpu", [](ModuleType& module) { module.to(kCPU); }) + .def("float", [](ModuleType& module) { module.to(kFloat32); }) + .def("double", [](ModuleType& module) { module.to(kFloat64); }) + .def("half", [](ModuleType& module) { module.to(kFloat16); }) + .def("__str__", [](ModuleType& module) { return module.name(); }) + .def("__repr__", [](ModuleType& module) { return module.name(); }); + // clang-format on +} + +/// Creates a pybind11 class object for an `nn::Module` subclass type and adds +/// default bindings. +/// +/// After adding the default bindings, the class object is returned, such that +/// you can add more bindings. +/// +/// Example usage: +/// \rst +/// .. code-block:: cpp +/// +/// struct Net : torch::nn::Module { +/// Net(int in, int out) { } +/// torch::Tensor forward(torch::Tensor x) { return x; } +/// }; +/// +/// PYBIND11_MODULE(my_module, m) { +/// torch::python::bind_module(m, "Net") +/// .def(py::init()) +/// .def("forward", &Net::forward); +/// } +/// \endrst +template +torch::disable_if_t< + torch::detail::has_forward::value && !force_enable, + detail::PyModuleClass> +bind_module(py::module module, const char* name) { + py::module cpp = module.def_submodule("cpp"); + auto cpp_class = + add_module_bindings(detail::PyModuleClass(cpp, name)); + detail::bind_cpp_module_wrapper(module, cpp_class, name); + return cpp_class; +} + +/// Creates a pybind11 class object for an `nn::Module` subclass type and adds +/// default bindings. +/// +/// After adding the default bindings, the class object is returned, such that +/// you can add more bindings. +/// +/// If the class has a `forward()` method, it is automatically exposed as +/// `forward()` and `__call__` in Python. +/// +/// Example usage: +/// \rst +/// .. code-block:: cpp +/// +/// struct Net : torch::nn::Module { +/// Net(int in, int out) { } +/// torch::Tensor forward(torch::Tensor x) { return x; } +/// }; +/// +/// PYBIND11_MODULE(my_module, m) { +/// torch::python::bind_module(m, "Net") +/// .def(py::init()) +/// .def("forward", &Net::forward); +/// } +/// \endrst +template < + typename ModuleType, + typename = + torch::enable_if_t::value>> +detail::PyModuleClass bind_module( + py::module module, + const char* name) { + return bind_module(module, name) + .def("forward", &ModuleType::forward) + .def("__call__", &ModuleType::forward); +} +} // namespace python +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize.h new file mode 100644 index 0000000000000000000000000000000000000000..60ec25b8ffe7924249afcdbb6d50b6d353850ceb --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/serialize.h @@ -0,0 +1,144 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch { + +/// Serializes the given `value`. +/// There must be an overload of `operator<<` between `serialize::OutputArchive` +/// and `Value` for this method to be well-formed. Currently, such an overload +/// is provided for (subclasses of): +/// +/// - `torch::nn::Module`, +/// - `torch::optim::Optimizer` +/// - `torch::Tensor` +/// +/// To perform the serialization, a `serialize::OutputArchive` is constructed, +/// and all arguments after the `value` are forwarded to its `save_to` method. +/// For example, you can pass a filename, or an `ostream`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::Linear model(3, 4); +/// torch::save(model, "model.pt"); +/// +/// torch::optim::SGD sgd(/*lr=*/0.9); +/// std::ostringstream stream; +/// // Note that the same stream cannot be used in multiple torch::save(...) +/// // invocations, otherwise the header will be corrupted. +/// torch::save(sgd, stream); +/// +/// auto tensor = torch::ones({3, 4}); +/// torch::save(tensor, "my_tensor.pt"); +/// \endrst +template +void save(const Value& value, SaveToArgs&&... args) { + serialize::OutputArchive archive(std::make_shared()); + archive << value; + archive.save_to(std::forward(args)...); +} + +/// Serializes the given `tensor_vec` of type `std::vector`. +/// +/// To perform the serialization, a `serialize::OutputArchive` is constructed, +/// and all arguments after the `tensor_vec` are forwarded to its `save_to` +/// method. For example, you can pass a filename, or an `ostream`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// std::vector tensor_vec = { torch::randn({1, 2}), +/// torch::randn({3, 4}) }; torch::save(tensor_vec, "my_tensor_vec.pt"); +/// +/// std::vector tensor_vec = { torch::randn({5, 6}), +/// torch::randn({7, 8}) }; std::ostringstream stream; +/// // Note that the same stream cannot be used in multiple torch::save(...) +/// // invocations, otherwise the header will be corrupted. +/// torch::save(tensor_vec, stream); +/// \endrst +template +void save(const std::vector& tensor_vec, SaveToArgs&&... args) { + serialize::OutputArchive archive(std::make_shared()); + for (const auto i : c10::irange(tensor_vec.size())) { + auto& value = tensor_vec[i]; + archive.write(std::to_string(i), value); + } + archive.save_to(std::forward(args)...); +} + +TORCH_API std::vector pickle_save(const torch::IValue& ivalue); +TORCH_API torch::IValue pickle_load(const std::vector& data); + +/// Deserializes the given `value`. +/// There must be an overload of `operator>>` between `serialize::InputArchive` +/// and `Value` for this method to be well-formed. Currently, such an overload +/// is provided for (subclasses of): +/// +/// - `torch::nn::Module`, +/// - `torch::optim::Optimizer` +/// - `torch::Tensor` +/// +/// To perform the serialization, a `serialize::InputArchive` is constructed, +/// and all arguments after the `value` are forwarded to its `load_from` method. +/// For example, you can pass a filename, or an `istream`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// torch::nn::Linear model(3, 4); +/// torch::load(model, "model.pt"); +/// +/// torch::optim::SGD sgd(/*lr=*/0.9); +/// std::istringstream stream("..."); +/// torch::load(sgd, stream); +/// +/// auto tensor = torch::ones({3, 4}); +/// torch::load(tensor, "my_tensor.pt"); +/// \endrst +template +void load(Value& value, LoadFromArgs&&... args) { + serialize::InputArchive archive; + archive.load_from(std::forward(args)...); + archive >> value; +} + +/// Deserializes the given `tensor_vec` of type `std::vector`. +/// +/// To perform the serialization, a `serialize::InputArchive` is constructed, +/// and all arguments after the `value` are forwarded to its `load_from` method. +/// For example, you can pass a filename, or an `istream`. +/// +/// \rst +/// .. code-block:: cpp +/// +/// std::vector tensor_vec; +/// torch::load(tensor_vec, "my_tensor_vec.pt"); +/// +/// std::vector tensor_vec; +/// std::istringstream stream("..."); +/// torch::load(tensor_vec, stream); +/// \endrst +template +void load(std::vector& tensor_vec, LoadFromArgs&&... args) { + serialize::InputArchive archive; + archive.load_from(std::forward(args)...); + + // NOTE: The number of elements in the serialized `std::vector` + // is not known ahead of time, so we need a while-loop to increment the index, + // and use `archive.try_read(...)` to check whether we have reached the end of + // the serialized `std::vector`. + size_t index = 0; + torch::Tensor value; + while (archive.try_read(std::to_string(index), value)) { + tensor_vec.push_back(std::move(value)); + value = torch::Tensor(); + index++; + } +} +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/sparse.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/sparse.h new file mode 100644 index 0000000000000000000000000000000000000000..a30e74477e3658ef411fe14d2715cde323ce618b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/sparse.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +namespace torch { +namespace sparse {} +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/torch.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/torch.h new file mode 100644 index 0000000000000000000000000000000000000000..7316af88d2eba7337086b29d099370bf30aa99dc --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/api/include/torch/torch.h @@ -0,0 +1,8 @@ +#pragma once + +#include + +#ifdef TORCH_API_INCLUDE_EXTENSION_H +#include + +#endif // defined(TORCH_API_INCLUDE_EXTENSION_H) diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h new file mode 100644 index 0000000000000000000000000000000000000000..cd8d733da75549add7dbccbe90200ff38face7b2 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/ir/subgraph_matcher.h @@ -0,0 +1,74 @@ +#pragma once + +#include + +#include +#include + +namespace torch { +namespace jit { + +/** + * \brief A structure describing a match of a pattern in a graph. + * + * The structure contains an anchor node, from which the match was found, and + * match-maps for nodes and values. A match-map specifies the correspondance + * between nodes in the pattern graph (match-map keys) with nodes in the actual + * graph (match-map values). We keep such maps for both nodes and values. + */ +struct Match { + Node* anchor; + std::unordered_map nodes_map; + std::unordered_map values_map; +}; + +/** + * \brief Find all matches of a \p PATTERN in a \p GRAPH. + * + * The function returns a vector of match-descriptors (see description of + * `struct Match`). + * + * Matching rules: + * - Pattern graph must contain a single block. + * - Matched subgraphs do not span across different blocks. + * - No uses outside the match are allowed, except for Param and Return nodes. + * Basically, we're matching hammocks, not arbitrary subgraphs. + * - The pattern graph must return only one value (i.e. it must have a single + * node leading to return). + * - Nodes that are not used in computation of the return value in the pattern + * graph are ignored during matching (IOW, we're essentially performing DCE on + * the pattern). + * - Pattern graph nodes cannot alias. TODO: the check not implemented yet. + * - Aliasing nodes in the graph cannot consitute a match (i.e. through all + * found matches, no nodes in the subgraph alias with each other). TODO: check + * not implemented yet. + * - The matcher will not mutate either the pattern graph or the matched graph. + * The matched graph is taken as non-const so that Match may contain non-const + * pointers. This enables clients of this API to use Match to drive mutations. + * + * Note [Multi-output Patterns] + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * Subgraph matcher provides limited support for multi-output patterns. With a + * single output pattern, a single scan through the graph is sufficient to + * find all the matches: given a starting node (an "anchor"), we can + * deterministically check whether a pattern matches a subgraph corresponding to + * this anchor node. For a general case of multi-output patterns, we would have + * N anchors, which would result in M^N comparisons (M is the size of the + * graph). Clearly this is computationally prohibitive. + * + * To overcome this, we impose some constraints on the multi-output patterns + * that we accept. We require that checking whether the pattern matches a + * subgraph would still be fully determined by a single node in the graph. To + * achieve this, we designate the first output in the pattern as the "main" + * output and assume that we can traverse up from this node to match the + * entire pattern. + * + * Corrolary 1: the order of outputs in the pattern matters! + * Corollary 2: patterns cannot contain any nodes not participating in the main + * output computation. + */ +std::vector TORCH_API +findPatternMatches(const Graph& pattern, Graph& graph); + +} // namespace jit +} // namespace torch diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h new file mode 100644 index 0000000000000000000000000000000000000000..64d514374f58e69b732133ce324053d6d1bebc4c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/custom_operator.h @@ -0,0 +1,30 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +/// Registration class for new operators. Effectively calls +/// `torch::jit::registerOperator` for every supplied operator, but allows doing +/// so in the global scope when a `RegisterOperators` object is assigned to a +/// static variable. +/// Note: This is *not* the custom operator API. If you want to register custom +/// operators, take a look at torch::RegisterOperators. +struct TORCH_API RegisterOperators { + RegisterOperators() = default; + + /// Registers a vector of already created `Operator`s. + /// The operator element is now optional to filter null ops. It's backward + /// compatible and works for selective operator registration. + explicit RegisterOperators(std::vector> operators) { + for (c10::optional& o : operators) { + if (o) { + registerOperator(std::move(o.value())); + } + } + } +}; + +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h new file mode 100644 index 0000000000000000000000000000000000000000..e3f00272a999f3d9431528db7d8e74ff0cc3d823 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h @@ -0,0 +1,29 @@ +#pragma once +#include +#include + +namespace torch::jit { + +struct ExceptionMessage { + ExceptionMessage(const std::exception& e) : e_(e) {} + + private: + const std::exception& e_; + friend std::ostream& operator<<( + std::ostream& out, + const ExceptionMessage& msg); +}; + +inline std::ostream& operator<<( + std::ostream& out, + const ExceptionMessage& msg) { + auto c10_error = dynamic_cast(&msg.e_); + if (c10_error) { + out << c10_error->what_without_backtrace(); + } else { + out << msg.e_.what(); + } + return out; +} + +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..b0b67c68088389bdd35e72c00cd7d1005399cb1c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch::jit::logging { + +class LoggerBase { + public: + TORCH_API virtual void addStatValue( + const std::string& stat_name, + int64_t val) = 0; + virtual ~LoggerBase() = default; +}; + +TORCH_API LoggerBase* getLogger(); +TORCH_API LoggerBase* setLogger(LoggerBase* logger); + +// No-op logger. This is the default and is meant to incur almost no runtime +// overhead. + +class NoopLogger : public LoggerBase { + public: + void addStatValue(const std::string& stat_name, int64_t val) override {} + ~NoopLogger() override = default; +}; + +// Trivial locking logger. Pass in an instance of this to setLogger() to use it. +// This keeps track of the sum of all statistics. +// +// NOTE: this is not written in a scalable way and should probably only be used +// in the single-threaded case or for testing. +class TORCH_API LockingLogger : public LoggerBase { + public: + void addStatValue(const std::string& stat_name, int64_t val) override; + virtual int64_t getCounterValue(const std::string& name) const; + enum class AggregationType { SUM = 0, AVG = 1 }; + void setAggregationType(const std::string& stat_name, AggregationType type); + ~LockingLogger() override = default; + + private: + mutable std::mutex m; + struct RawCounter { + RawCounter() : sum(0), count(0) {} + int64_t sum; + size_t count; + }; + std::unordered_map raw_counters; + std::unordered_map agg_types; +}; + +// Make this struct so the timer internals are opaque to the user. +struct JITTimePoint { + std::chrono::time_point point; +}; + +TORCH_API JITTimePoint timePoint(); +TORCH_API void recordDurationSince( + const std::string& name, + const JITTimePoint& tp); + +namespace runtime_counters { +constexpr const char* GRAPH_EXECUTORS_CONSTRUCTED = + "pytorch_runtime.graph_executors_constructed"; +constexpr const char* GRAPH_EXECUTOR_INVOCATIONS = + "pytorch_runtime.graph_executor_invocations"; +constexpr const char* EXECUTION_PLAN_CACHE_HIT = + "pytorch_runtime.execution_plan_cache_hit"; +constexpr const char* EXECUTION_PLAN_CACHE_MISS = + "pytorch_runtime.execution_plan_cache_miss"; + +inline std::vector allRuntimeCounters() { + return { + GRAPH_EXECUTORS_CONSTRUCTED, + GRAPH_EXECUTOR_INVOCATIONS, + EXECUTION_PLAN_CACHE_HIT, + EXECUTION_PLAN_CACHE_MISS}; +} + +} // namespace runtime_counters + +} // namespace torch::jit::logging diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h new file mode 100644 index 0000000000000000000000000000000000000000..50c41fc3ad39d44262b4da8e54fd4b75b00d8f2d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator_options.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::jit { + +using AliasAnalysisKind = c10::AliasAnalysisKind; + +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..36feaffb200b655bd452ff822ae7af5149bc2670 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/print_handler.h @@ -0,0 +1,15 @@ +#pragma once + +#include + +#include + +namespace torch::jit { + +using PrintHandler = void (*)(const std::string&); + +TORCH_API PrintHandler getDefaultPrintHandler(); +TORCH_API PrintHandler getPrintHandler(); +TORCH_API void setPrintHandler(PrintHandler ph); + +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h new file mode 100644 index 0000000000000000000000000000000000000000..8e08255687cf79109dd2247f90a01c9704415f3d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h @@ -0,0 +1,99 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace torch::jit { +namespace profiling { + +struct Datapoint { + using Timepoint = std::chrono::time_point; + SourceRange sourceRange; + Timepoint start; + Timepoint end; + + explicit Datapoint(SourceRange sr) + : sourceRange(std::move(sr)), start(std::chrono::steady_clock::now()) {} +}; + +class TORCH_API InstructionSpan { + public: + explicit InstructionSpan(Node&); + ~InstructionSpan(); + InstructionSpan(InstructionSpan&&) = delete; + InstructionSpan& operator=(InstructionSpan&&) = delete; + + private: + std::unique_ptr datapoint_; +}; + +} // namespace profiling + +struct TORCH_API InstructionStats : public CustomClassHolder { + int64_t count{0}; + std::chrono::nanoseconds duration{0}; +}; + +class TORCH_API SourceStats : public CustomClassHolder { + public: + using LineMap = c10::Dict>; + + SourceStats(SourceRef source, LineMap lineMap) + : source_(std::move(source)), lineMap_(std::move(lineMap)) {} + + const SourceRef& getSourceRef() const { + return source_; + } + + const LineMap& getLineMap() const { + return lineMap_; + } + + private: + SourceRef source_; + LineMap lineMap_; +}; + +/** + * ScriptProfile is an underlying C++ implementation for TorchScript profiling. + * The profiling section is specified by calling enable() and disable(): + * + * ... + * scriptProfile.enable(); + * ... + * (scripts) + * ... + * scriptProfile.disable(); + * ... + * + * To retrieve collected runtime data, users may call dumpStats() and do + * arbitrary filtering on the data they want. Note that dumpStats() should + * not be called inside a profiling section. + * In general, stats are aggregated per source function body, and then by line + * number. + */ +class TORCH_API ScriptProfile : public CustomClassHolder { + // Aggregates datapoints by function source id, then by line number. + using LineMap = std::map; + using SourceMap = std::map>; + + public: + void enable(); + void disable(); + const SourceMap& dumpStats(); + void addDatapoint(std::shared_ptr); + ~ScriptProfile() override; + + private: + bool enabled_{false}; + std::vector> datapoints_; + SourceMap sourceMap_; +}; + +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..e822f3f93e3d29d533f27e8565d7a0de787f33b5 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedShapeFunctions(); + +TORCH_API const OperatorMap& GetShapeFunctionMappings(); + +TORCH_API const OperatorMap>& +GetBoundedShapeMappings(); + +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..34272000f0d1a3e2e808ce2bbe27ec4ab299380e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/simple_graph_executor_impl.h @@ -0,0 +1,23 @@ +#pragma once +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API SimpleGraphExecutorImpl : public GraphExecutorImplBase { + SimpleGraphExecutorImpl( + const std::shared_ptr& graph, + std::string function_name); + + const ExecutionPlan& getPlanFor( + Stack& stack, + c10::optional remaining_bailout_depth) override; + GraphExecutorState getDebugState() override; + ~SimpleGraphExecutorImpl() override = default; + + private: + c10::optional execution_plan_; +}; + +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h new file mode 100644 index 0000000000000000000000000000000000000000..720c8b69e5ecd55cbe9a00d13342fa9f5cbc98db --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/slice_indices_adjust.h @@ -0,0 +1,26 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +// Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +// 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software +// Foundation; All Rights Reserved +// +// Stolen (with appropriate modifications) by @agolynski +// (https://github.com/pytorch/pytorch/pull/33019) from cpython repo +// Objects/sliceobject.c with comment: this is harder to get right than you +// might think +// +// This adjusts indexes according to python list semantics and returns number +// of elements in the resulting list. +TORCH_API int64_t slice_indices_adjust( + int64_t length, + int64_t* start, + int64_t* stop, + int64_t step); + +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h new file mode 100644 index 0000000000000000000000000000000000000000..64e0d6661baebc3bb0c82831a8566dba3e0112f6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h @@ -0,0 +1,18 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include +#include + +namespace torch::jit { +struct GradientPair { + std::shared_ptr forward; + std::shared_ptr backward; +}; + +TORCH_API c10::optional gradientInfoForSchema( + const FunctionSchema& schema); +TORCH_API bool hasGradientInfoForSchema(const FunctionSchema& schema); +} // namespace torch::jit diff --git a/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/tensor/python_tensor.h b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/tensor/python_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..9040f84ac4b72638d38e3e7bc9aac61914e57e42 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/tensor/python_tensor.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include +#include + +namespace at { +class Tensor; +} // namespace at + +namespace torch { +namespace tensors { + +// Initializes the Python tensor type objects: torch.FloatTensor, +// torch.DoubleTensor, etc. and binds them in their containing modules. +void initialize_python_bindings(); + +// Same as set_default_tensor_type() but takes a PyObject* +void py_set_default_tensor_type(PyObject* type_obj); + +// Same as py_set_default_tensor_type, but only changes the dtype (ScalarType). +void py_set_default_dtype(PyObject* dtype_obj); + +// Gets the DispatchKey for the default tensor type. +// +// TODO: This is nuts! There is no reason to let the default tensor type id +// change. Probably only store ScalarType, as that's the only flex point +// we support. +TORCH_API c10::DispatchKey get_default_dispatch_key(); +at::Device get_default_device(); + +// Gets the ScalarType for the default tensor type. +at::ScalarType get_default_scalar_type(); +} // namespace tensors +} // namespace torch diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_adjacent_difference.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_adjacent_difference.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c19cb90079a94b6b814c43c3d74149a3464422be --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_adjacent_difference.cuh @@ -0,0 +1,279 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +template +struct AgentAdjacentDifferencePolicy +{ + static constexpr int BLOCK_THREADS = _BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = _ITEMS_PER_THREAD; + static constexpr int ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD; + + static constexpr cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + static constexpr cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + static constexpr cub::BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; +}; + +namespace detail +{ +namespace adjacent_difference +{ + +template +struct AgentDifference +{ + using LoadIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + + using BlockLoad = typename cub::BlockLoadType::type; + using BlockStore = typename cub::BlockStoreType::type; + + using BlockAdjacentDifferenceT = cub::BlockAdjacentDifference; + + union _TempStorage + { + typename BlockLoad::TempStorage load; + typename BlockStore::TempStorage store; + typename BlockAdjacentDifferenceT::TempStorage adjacent_difference; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + static constexpr int BLOCK_THREADS = Policy::BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = Policy::ITEMS_PER_THREAD; + static constexpr int ITEMS_PER_TILE = Policy::ITEMS_PER_TILE; + static constexpr int SHARED_MEMORY_SIZE = static_cast(sizeof(TempStorage)); + + _TempStorage& temp_storage; + InputIteratorT input_it; + LoadIt load_it; + InputT* first_tile_previous; + OutputIteratorT result; + DifferenceOpT difference_op; + OffsetT num_items; + + _CCCL_DEVICE _CCCL_FORCEINLINE AgentDifference( + TempStorage& temp_storage, + InputIteratorT input_it, + InputT* first_tile_previous, + OutputIteratorT result, + DifferenceOpT difference_op, + OffsetT num_items) + : temp_storage(temp_storage.Alias()) + , input_it(input_it) + , load_it(THRUST_NS_QUALIFIER::cuda_cub::core::make_load_iterator(Policy(), input_it)) + , first_tile_previous(first_tile_previous) + , result(result) + , difference_op(difference_op) + , num_items(num_items) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void consume_tile_impl(int num_remaining, int tile_idx, OffsetT tile_base) + { + InputT input[ITEMS_PER_THREAD]; + OutputT output[ITEMS_PER_THREAD]; + + if (IS_LAST_TILE) + { + // Fill last elements with the first element + // because collectives are not suffix guarded + BlockLoad(temp_storage.load).Load(load_it + tile_base, input, num_remaining, *(load_it + tile_base)); + } + else + { + BlockLoad(temp_storage.load).Load(load_it + tile_base, input); + } + + __syncthreads(); + + if (ReadLeft) + { + if (IS_FIRST_TILE) + { + if (IS_LAST_TILE) + { + BlockAdjacentDifferenceT(temp_storage.adjacent_difference) + .SubtractLeftPartialTile(input, output, difference_op, num_remaining); + } + else + { + BlockAdjacentDifferenceT(temp_storage.adjacent_difference).SubtractLeft(input, output, difference_op); + } + } + else + { + InputT tile_prev_input = MayAlias ? first_tile_previous[tile_idx] : *(input_it + tile_base - 1); + + if (IS_LAST_TILE) + { + BlockAdjacentDifferenceT(temp_storage.adjacent_difference) + .SubtractLeftPartialTile(input, output, difference_op, num_remaining, tile_prev_input); + } + else + { + BlockAdjacentDifferenceT(temp_storage.adjacent_difference) + .SubtractLeft(input, output, difference_op, tile_prev_input); + } + } + } + else + { + if (IS_LAST_TILE) + { + BlockAdjacentDifferenceT(temp_storage.adjacent_difference) + .SubtractRightPartialTile(input, output, difference_op, num_remaining); + } + else + { + InputT tile_next_input = MayAlias ? first_tile_previous[tile_idx] : *(input_it + tile_base + ITEMS_PER_TILE); + + BlockAdjacentDifferenceT(temp_storage.adjacent_difference) + .SubtractRight(input, output, difference_op, tile_next_input); + } + } + + __syncthreads(); + + if (IS_LAST_TILE) + { + BlockStore(temp_storage.store).Store(result + tile_base, output, num_remaining); + } + else + { + BlockStore(temp_storage.store).Store(result + tile_base, output); + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void consume_tile(int num_remaining, int tile_idx, OffsetT tile_base) + { + if (tile_idx == 0) + { + consume_tile_impl(num_remaining, tile_idx, tile_base); + } + else + { + consume_tile_impl(num_remaining, tile_idx, tile_base); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void Process(int tile_idx, OffsetT tile_base) + { + OffsetT num_remaining = num_items - tile_base; + + if (num_remaining > ITEMS_PER_TILE) // not a last tile + { + consume_tile(num_remaining, tile_idx, tile_base); + } + else + { + consume_tile(num_remaining, tile_idx, tile_base); + } + } +}; + +template +struct AgentDifferenceInit +{ + static constexpr int BLOCK_THREADS = 128; + + static _CCCL_DEVICE _CCCL_FORCEINLINE void + Process(int tile_idx, InputIteratorT first, InputT* result, OffsetT num_tiles, int items_per_tile) + { + OffsetT tile_base = static_cast(tile_idx) * items_per_tile; + + if (tile_base > 0 && tile_idx < num_tiles) + { + if (ReadLeft) + { + result[tile_idx] = first[tile_base - 1]; + } + else + { + result[tile_idx - 1] = first[tile_base]; + } + } + } +}; + +} // namespace adjacent_difference +} // namespace detail + +template +using AgentDifference CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = detail::adjacent_difference:: + AgentDifference; + +template +using AgentDifferenceInit CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::adjacent_difference::AgentDifferenceInit; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_batch_memcpy.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_batch_memcpy.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2b926f582fe63e72992f5dccbba68890c328d44e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_batch_memcpy.cuh @@ -0,0 +1,1180 @@ +/****************************************************************************** + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * cub::AgentBatchMemcpy implements device-wide copying of a batch of device-accessible + * source-buffers to device-accessible destination-buffers. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +namespace detail +{ +namespace batch_memcpy +{ +template +_CCCL_FORCEINLINE _CCCL_DEVICE void +LoadVectorAndFunnelShiftR(uint32_t const* aligned_ptr, uint32_t bit_shift, uint4& data_out) +{ + data_out = {aligned_ptr[0], aligned_ptr[1], aligned_ptr[2], aligned_ptr[3]}; + + if (!PTR_IS_FOUR_BYTE_ALIGNED) + { + uint32_t tail = aligned_ptr[4]; + data_out.x = __funnelshift_r(data_out.x, data_out.y, bit_shift); + data_out.y = __funnelshift_r(data_out.y, data_out.z, bit_shift); + data_out.z = __funnelshift_r(data_out.z, data_out.w, bit_shift); + data_out.w = __funnelshift_r(data_out.w, tail, bit_shift); + } +} + +template +_CCCL_FORCEINLINE _CCCL_DEVICE void +LoadVectorAndFunnelShiftR(uint32_t const* aligned_ptr, uint32_t bit_shift, uint2& data_out) +{ + data_out = {aligned_ptr[0], aligned_ptr[1]}; + + if (!PTR_IS_FOUR_BYTE_ALIGNED) + { + uint32_t tail = aligned_ptr[2]; + data_out.x = __funnelshift_r(data_out.x, data_out.y, bit_shift); + data_out.y = __funnelshift_r(data_out.y, tail, bit_shift); + } +} + +template +_CCCL_FORCEINLINE _CCCL_DEVICE void +LoadVectorAndFunnelShiftR(uint32_t const* aligned_ptr, uint32_t bit_shift, uint32_t& data_out) +{ + data_out = aligned_ptr[0]; + + if (!PTR_IS_FOUR_BYTE_ALIGNED) + { + uint32_t tail = aligned_ptr[1]; + data_out = __funnelshift_r(data_out, tail, bit_shift); + } +} + +/** + * @brief Loads data from \p ptr into \p data_out without requiring \p ptr to be aligned. + * @note If \p ptr isn't aligned to four bytes, the bytes from the last four-byte aligned address up + * to \p ptr are loaded too (but dropped) and, hence, need to be device-accessible. Similarly, if + * \p ptr isn't aligned to four bytes, the bytes from `(ptr + sizeof(VectorT))` up to the following + * four-byte aligned address are loaded too (but dropped), and, hence, need to be device-accessible. + * + * @tparam VectorT The vector type used for vectorized stores (i.e., one of uint4, uint2, uint32_t) + * @param ptr The pointer from which the data is supposed to be loaded + * @param data_out The vector type that stores the data loaded from \p ptr + */ +template +_CCCL_FORCEINLINE _CCCL_DEVICE void LoadVector(const char* ptr, VectorT& data_out) +{ + const uint32_t offset = reinterpret_cast(ptr) % 4U; + const uint32_t* aligned_ptr = reinterpret_cast(ptr - offset); + constexpr uint32_t bits_per_byte = 8U; + const uint32_t bit_shift = offset * bits_per_byte; + + // If `ptr` is aligned to four bytes, we can perform a simple uint32_t-aliased load + if (offset == 0) + { + LoadVectorAndFunnelShiftR(aligned_ptr, bit_shift, data_out); + } + // Otherwise, we need to load extra bytes and perform funnel-shifting + else + { + LoadVectorAndFunnelShiftR(aligned_ptr, bit_shift, data_out); + } +} + +/** + * @brief Helper data structure to hold information on the byte range for which we can safely + * perform vectorized copies. + * + * @tparam VectorT The vector type used for vectorized stores (i.e., one of uint4, uint2, uint32_t) + */ +template +struct PointerRange +{ + VectorT* out_begin; + VectorT* out_end; + const char* in_begin; + const char* in_end; +}; + +/** + * @brief Both `out_start_aligned` and `out_end_aligned` are indices into `out_ptr`. + * `out_start_aligned` is the first VectorT-aligned memory location after `out_ptr + 3`. + * `out_end_aligned` is the last VectorT-aligned memory location before `out_end - 4`, where out_end + * corresponds to one past the last byte to be copied. Bytes between `[out_start_aligned, + * out_end_aligned)` will be copied using VectorT. `out_ptr + 3` and `out_end - 4` are used instead + * of `out_ptr` and `out_end` to avoid `LoadVector` reading beyond data boundaries. + * + * @tparam VectorT The vector type used for vectorized stores (i.e., one of uint4, uint2, uint32_t) + * @tparam ByteOffsetT Type used to index the bytes within the buffers + * @param in_begin Pointer to the beginning of the byte range that shall be copied + * @param out_begin Pointer to the beginning of the byte range that shall be copied + * @param num_bytes Number of bytes that shall be copied + * @return The byte range that can safely be copied using vectorized stores of type VectorT + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE PointerRange +GetAlignedPtrs(const void* in_begin, void* out_begin, ByteOffsetT num_bytes) +{ + // Data type size used for vectorized stores + constexpr size_t out_datatype_size = sizeof(VectorT); + // Data type size used for type-aliased loads + constexpr size_t in_datatype_size = sizeof(uint32_t); + + // char-aliased ptrs to simplify pointer arithmetic + char* out_ptr = reinterpret_cast(out_begin); + const char* in_ptr = reinterpret_cast(in_begin); + + // Number of bytes between the first VectorT-aligned address at or before out_begin and out_begin + const uint32_t alignment_offset = reinterpret_cast(out_ptr) % out_datatype_size; + + // The first VectorT-aligned address before (or at) out_begin + char* out_chars_aligned = reinterpret_cast(out_ptr - alignment_offset); + + // The number of extra bytes preceding `in_ptr` that are loaded but dropped + uint32_t in_extra_bytes = reinterpret_cast(in_ptr) % in_datatype_size; + + // The offset required by `LoadVector`: + // If the input pointer is not aligned, we load data from the last aligned address preceding the + // pointer. That is, loading up to (in_datatype_size-1) bytes before `in_ptr` + uint32_t in_offset_req = in_extra_bytes; + + // Bytes after `out_chars_aligned` to the first VectorT-aligned address at or after `out_begin` + uint32_t out_start_aligned = + CUB_QUOTIENT_CEILING(in_offset_req + alignment_offset, out_datatype_size) * out_datatype_size; + + // Compute the beginning of the aligned ranges (output and input pointers) + VectorT* out_aligned_begin = reinterpret_cast(out_chars_aligned + out_start_aligned); + const char* in_aligned_begin = in_ptr + (reinterpret_cast(out_aligned_begin) - out_ptr); + + // If the aligned range is not aligned for the input pointer, we load up to (in_datatype_size-1) + // bytes after the last byte that is copied. That is, we always load four bytes up to the next + // aligned input address at a time. E.g., if the last byte loaded is one byte past the last + // aligned address we'll also load the three bytes after that byte. + uint32_t in_extra_bytes_from_aligned = (reinterpret_cast(in_aligned_begin) % in_datatype_size); + uint32_t in_end_padding_req = (in_datatype_size - in_extra_bytes_from_aligned) % in_datatype_size; + + // Bytes after `out_chars_aligned` to the last VectorT-aligned + // address at (or before) `out_begin` + `num_bytes` + uint32_t out_end_aligned{}; + if (in_end_padding_req + alignment_offset > num_bytes) + { + out_end_aligned = out_start_aligned; + } + else + { + out_end_aligned = (num_bytes - in_end_padding_req + alignment_offset) / out_datatype_size * out_datatype_size; + } + + VectorT* out_aligned_end = reinterpret_cast(out_chars_aligned + out_end_aligned); + const char* in_aligned_end = in_ptr + (reinterpret_cast(out_aligned_end) - out_ptr); + + return {out_aligned_begin, out_aligned_end, in_aligned_begin, in_aligned_end}; +} + +/** + * @brief Cooperatively copies \p num_bytes from \p src to \p dest using vectorized stores of type + * \p VectorT for addresses within [dest, dest + num_bytes) that are aligned to \p VectorT. A + * byte-wise copy is used for byte-ranges that are not aligned to \p VectorT. + * + * @tparam LOGICAL_WARP_SIZE The number of threads cooperaing to copy the data; all threads within + * [0, `LOGICAL_WARP_SIZE`) must invoke this method with the same arguments + * @tparam VectorT The vector type used for vectorized stores (i.e., one of uint4, uint2, uint32_t) + * @tparam ByteOffsetT Type used to index the bytes within the buffers + * @param thread_rank The thread rank within the group that cooperates to copy the data must be + * within [0, `LOGICAL_WARP_SIZE`) + * @param dest Pointer to the memory location to copy to + * @param num_bytes Number of bytes to copy + * @param src Pointer to the memory location to copy from + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +VectorizedCopy(int32_t thread_rank, void* dest, ByteOffsetT num_bytes, const void* src) +{ + char* out_ptr = reinterpret_cast(dest); + const char* in_ptr = reinterpret_cast(src); + + // Gets the byte range that can safely be copied using vectorized stores of type VectorT + auto aligned_range = GetAlignedPtrs(src, dest, num_bytes); + + // If byte range for which we can use vectorized copies is empty -> use byte-wise copies + if (aligned_range.out_end <= aligned_range.out_begin) + { + for (ByteOffsetT ichar = thread_rank; ichar < num_bytes; ichar += LOGICAL_WARP_SIZE) + { + out_ptr[ichar] = in_ptr[ichar]; + } + } + else + { + // Copy bytes in range `[dest, aligned_range.out_begin)` + out_ptr += thread_rank; + in_ptr += thread_rank; + while (out_ptr < reinterpret_cast(aligned_range.out_begin)) + { + *out_ptr = *in_ptr; + out_ptr += LOGICAL_WARP_SIZE; + in_ptr += LOGICAL_WARP_SIZE; + } + + // Copy bytes in range `[aligned_range.out_begin, aligned_range.out_end)` + VectorT* aligned_range_begin = aligned_range.out_begin + thread_rank; + const char* in_aligned_begin = aligned_range.in_begin + thread_rank * sizeof(VectorT); + while (aligned_range_begin < aligned_range.out_end) + { + VectorT data_in; + LoadVector(in_aligned_begin, data_in); + *aligned_range_begin = data_in; + in_aligned_begin += sizeof(VectorT) * LOGICAL_WARP_SIZE; + aligned_range_begin += LOGICAL_WARP_SIZE; + } + + // Copy bytes in range `[aligned_range.out_end, dest + num_bytes)`. + out_ptr = reinterpret_cast(aligned_range.out_end) + thread_rank; + in_ptr = aligned_range.in_end + thread_rank; + while (out_ptr < reinterpret_cast(dest) + num_bytes) + { + *out_ptr = *in_ptr; + out_ptr += LOGICAL_WARP_SIZE; + in_ptr += LOGICAL_WARP_SIZE; + } + } +} + +template ::type = 0> +_CCCL_DEVICE _CCCL_FORCEINLINE void +copy_items(InputBufferT input_buffer, OutputBufferT output_buffer, OffsetT num_bytes, OffsetT offset = 0) +{ + VectorizedCopy( + threadIdx.x % LOGICAL_WARP_SIZE, + &reinterpret_cast(output_buffer)[offset], + num_bytes, + &reinterpret_cast(input_buffer)[offset]); +} + +template ::type = 0> +_CCCL_DEVICE _CCCL_FORCEINLINE void +copy_items(InputBufferT input_buffer, OutputBufferT output_buffer, OffsetT num_items, OffsetT offset = 0) +{ + output_buffer += offset; + input_buffer += offset; + for (OffsetT i = threadIdx.x % LOGICAL_WARP_SIZE; i < num_items; i += LOGICAL_WARP_SIZE) + { + *(output_buffer + i) = *(input_buffer + i); + } +} + +template ::type = 0> +_CCCL_DEVICE _CCCL_FORCEINLINE AliasT read_item(InputIt buffer_src, OffsetT offset) +{ + return *(reinterpret_cast(buffer_src) + offset); +} + +template ::type = 0> +_CCCL_DEVICE _CCCL_FORCEINLINE AliasT read_item(InputIt buffer_src, OffsetT offset) +{ + return *(buffer_src + offset); +} + +template ::type = 0> +_CCCL_DEVICE _CCCL_FORCEINLINE void write_item(OutputIt buffer_dst, OffsetT offset, AliasT value) +{ + *(reinterpret_cast(buffer_dst) + offset) = value; +} + +template ::type = 0> +_CCCL_DEVICE _CCCL_FORCEINLINE void write_item(OutputIt buffer_dst, OffsetT offset, AliasT value) +{ + *(buffer_dst + offset) = value; +} + +/** + * @brief A helper class that allows threads to maintain multiple counters, where the counter that + * shall be incremented can be addressed dynamically without incurring register spillage. + * + * @tparam NUM_ITEMS The number of counters to allocate + * @tparam MAX_ITEM_VALUE The maximum count that must be supported. + * @tparam PREFER_POW2_BITS Whether the number of bits to dedicate to each counter should be a + * power-of-two. If enabled, this allows replacing integer multiplication with a bit-shift in + * exchange for higher register pressure. + * @tparam BackingUnitT The data type that is used to provide the bits of all the counters that + * shall be allocated. + */ +template +class BitPackedCounter +{ +private: + /// The minimum number of bits required to represent all values from [0, MAX_ITEM_VALUE] + static constexpr uint32_t MIN_BITS_PER_ITEM = + (MAX_ITEM_VALUE == 0U) ? 1U : cub::Log2(MAX_ITEM_VALUE + 1U)>::VALUE; + + /// The number of bits allocated for each item. For pre-Volta, we prefer a power-of-2 here to + /// have the compiler replace costly integer multiplication with bit-shifting. + static constexpr uint32_t BITS_PER_ITEM = + PREFER_POW2_BITS ? (0x01ULL << (cub::Log2(MIN_BITS_PER_ITEM)>::VALUE)) : MIN_BITS_PER_ITEM; + + /// The number of bits that each backing data type can store + static constexpr uint32_t NUM_BITS_PER_UNIT = sizeof(BackingUnitT) * 8; + + /// The number of items that each backing data type can store + static constexpr uint32_t ITEMS_PER_UNIT = NUM_BITS_PER_UNIT / BITS_PER_ITEM; + + /// The number of bits the backing data type is actually making use of + static constexpr uint32_t USED_BITS_PER_UNIT = ITEMS_PER_UNIT * BITS_PER_ITEM; + + /// The number of backing data types required to store the given number of items + static constexpr uint32_t NUM_TOTAL_UNITS = CUB_QUOTIENT_CEILING(NUM_ITEMS, ITEMS_PER_UNIT); + + /// This is the net number of bit-storage provided by each unit (remainder bits are unused) + static constexpr uint32_t UNIT_MASK = + (USED_BITS_PER_UNIT >= (8U * sizeof(uint32_t))) ? 0xFFFFFFFF : (0x01U << USED_BITS_PER_UNIT) - 1; + /// This is the bit-mask for each item + static constexpr uint32_t ITEM_MASK = + (BITS_PER_ITEM >= (8U * sizeof(uint32_t))) ? 0xFFFFFFFF : (0x01U << BITS_PER_ITEM) - 1; + + //------------------------------------------------------------------------------ + // ACCESSORS + //------------------------------------------------------------------------------ + +public: + _CCCL_DEVICE _CCCL_FORCEINLINE uint32_t Get(uint32_t index) const + { + const uint32_t target_offset = index * BITS_PER_ITEM; + uint32_t val = 0; + +#pragma unroll + for (uint32_t i = 0; i < NUM_TOTAL_UNITS; ++i) + { + // In case the bit-offset of the counter at is larger than the bit range of the + // current unit, the bit_shift amount will be larger than the bits provided by this unit. As + // C++'s bit-shift has undefined behaviour if the bits being shifted exceed the operand width, + // we use the PTX instruction `shr` to make sure behaviour is well-defined. + // Negative bit-shift amounts wrap around in unsigned integer math and are ultimately clamped. + const uint32_t bit_shift = target_offset - i * USED_BITS_PER_UNIT; + val |= detail::LogicShiftRight(data[i], bit_shift) & ITEM_MASK; + } + return val; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void Add(uint32_t index, uint32_t value) + { + const uint32_t target_offset = index * BITS_PER_ITEM; + +#pragma unroll + for (uint32_t i = 0; i < NUM_TOTAL_UNITS; ++i) + { + // In case the bit-offset of the counter at is larger than the bit range of the + // current unit, the bit_shift amount will be larger than the bits provided by this unit. As + // C++'s bit-shift has undefined behaviour if the bits being shifted exceed the operand width, + // we use the PTX instruction `shl` to make sure behaviour is well-defined. + // Negative bit-shift amounts wrap around in unsigned integer math and are ultimately clamped. + const uint32_t bit_shift = target_offset - i * USED_BITS_PER_UNIT; + data[i] += detail::LogicShiftLeft(value, bit_shift) & UNIT_MASK; + } + } + + _CCCL_DEVICE BitPackedCounter operator+(const BitPackedCounter& rhs) const + { + BitPackedCounter result; +#pragma unroll + for (uint32_t i = 0; i < NUM_TOTAL_UNITS; ++i) + { + result.data[i] = data[i] + rhs.data[i]; + } + return result; + } + + //------------------------------------------------------------------------------ + // MEMBER VARIABLES + //------------------------------------------------------------------------------ + +private: + BackingUnitT data[NUM_TOTAL_UNITS] = {}; +}; + +/** + * Parameterizable tuning policy type for AgentBatchMemcpy + */ +template +struct AgentBatchMemcpyPolicy +{ + /// Threads per thread block + static constexpr uint32_t BLOCK_THREADS = _BLOCK_THREADS; + /// Items per thread (per tile of input) + static constexpr uint32_t BUFFERS_PER_THREAD = _BUFFERS_PER_THREAD; + /// The number of bytes that each thread will work on with each iteration of reading in bytes + /// from one or more + // source-buffers and writing them out to the respective destination-buffers. + static constexpr uint32_t TLEV_BYTES_PER_THREAD = _TLEV_BYTES_PER_THREAD; + /// Whether the BitPackedCounter should prefer allocating a power-of-2 number of bits per + /// counter + static constexpr uint32_t PREFER_POW2_BITS = _PREFER_POW2_BITS; + /// BLEV tile size granularity + static constexpr uint32_t BLOCK_LEVEL_TILE_SIZE = _BLOCK_LEVEL_TILE_SIZE; + + static constexpr uint32_t WARP_LEVEL_THRESHOLD = _WARP_LEVEL_THRESHOLD; + static constexpr uint32_t BLOCK_LEVEL_THRESHOLD = _BLOCK_LEVEL_THRESHOLD; + + using buff_delay_constructor = BuffDelayConstructor; + using block_delay_constructor = BlockDelayConstructor; +}; + +template +class AgentBatchMemcpy +{ +private: + //--------------------------------------------------------------------- + // CONFIGS / CONSTANTS + //--------------------------------------------------------------------- + // Tuning policy-based configurations + static constexpr uint32_t BLOCK_THREADS = AgentMemcpySmallBuffersPolicyT::BLOCK_THREADS; + static constexpr uint32_t BUFFERS_PER_THREAD = AgentMemcpySmallBuffersPolicyT::BUFFERS_PER_THREAD; + static constexpr uint32_t TLEV_BYTES_PER_THREAD = AgentMemcpySmallBuffersPolicyT::TLEV_BYTES_PER_THREAD; + static constexpr bool PREFER_POW2_BITS = AgentMemcpySmallBuffersPolicyT::PREFER_POW2_BITS; + static constexpr uint32_t BLOCK_LEVEL_TILE_SIZE = AgentMemcpySmallBuffersPolicyT::BLOCK_LEVEL_TILE_SIZE; + + // Derived configs + static constexpr uint32_t BUFFERS_PER_BLOCK = BUFFERS_PER_THREAD * BLOCK_THREADS; + static constexpr uint32_t TLEV_BUFFERS_PER_THREAD = BUFFERS_PER_THREAD; + static constexpr uint32_t BLEV_BUFFERS_PER_THREAD = BUFFERS_PER_THREAD; + + static constexpr uint32_t WARP_LEVEL_THRESHOLD = AgentMemcpySmallBuffersPolicyT::WARP_LEVEL_THRESHOLD; + + static constexpr uint32_t BLOCK_LEVEL_THRESHOLD = AgentMemcpySmallBuffersPolicyT::BLOCK_LEVEL_THRESHOLD; + + static constexpr uint32_t BUFFER_STABLE_PARTITION = false; + + // Constants + enum : uint32_t + { + TLEV_SIZE_CLASS = 0, + WLEV_SIZE_CLASS, + BLEV_SIZE_CLASS, + NUM_SIZE_CLASSES, + }; + + //--------------------------------------------------------------------- + // TYPE DECLARATIONS + //--------------------------------------------------------------------- + /// Internal load/store type. For byte-wise memcpy, a single-byte type + using AliasT = + typename ::cuda::std::conditional, + std::iterator_traits>>::type::value_type; + + /// Types of the input and output buffers + using InputBufferT = cub::detail::value_t; + using OutputBufferT = cub::detail::value_t; + + /// Type that has to be sufficiently large to hold any of the buffers' sizes. + /// The BufferSizeIteratorT's value type must be convertible to this type. + using BufferSizeT = cub::detail::value_t; + + /// Type used to index into the tile of buffers that this thread block is assigned to. + using BlockBufferOffsetT = uint16_t; + + /// Internal type used to index into the bytes of and represent size of a TLEV buffer + using TLevBufferSizeT = uint16_t; + + /** + * @brief Helper struct to simplify BlockExchange within a single four-byte word + */ + struct ZippedTLevByteAssignment + { + // The buffer id within this tile + BlockBufferOffsetT tile_buffer_id; + + // Byte-offset within that buffer + TLevBufferSizeT buffer_byte_offset; + }; + + /** + * POD to keep track of pairs after having partitioned this tile's + * buffers by their size. + */ + struct BufferTuple + { + // Size is only valid (and relevant) for buffers that are use thread-level collaboration + TLevBufferSizeT size; + + // The buffer id relative to this tile (i.e., the buffer id within this tile) + BlockBufferOffsetT buffer_id; + }; + + // Load buffers in a striped arrangement if we do not want to performa a stable partitioning into + // small, medium, and large buffers, otherwise load them in a blocked arrangement + using BufferLoadT = + BlockLoad(BLOCK_THREADS), + static_cast(BUFFERS_PER_THREAD), + BUFFER_STABLE_PARTITION ? BLOCK_LOAD_WARP_TRANSPOSE : BLOCK_LOAD_STRIPED>; + + // A vectorized counter that will count the number of buffers that fall into each of the + // size-classes. Where the size class represents the collaboration level that is required to + // process a buffer. The collaboration level being either: + //-> (1) TLEV (thread-level collaboration), requiring one or multiple threads but not a FULL warp + // to collaborate + //-> (2) WLEV (warp-level collaboration), requiring a full warp to collaborate on a buffer + //-> (3) BLEV (block-level collaboration), requiring one or multiple thread blocks to collaborate + // on a buffer */ + using VectorizedSizeClassCounterT = BitPackedCounter; + + // Block-level scan used to compute the write offsets + using BlockSizeClassScanT = cub::BlockScan(BLOCK_THREADS)>; + + // + using BlockBLevTileCountScanT = cub::BlockScan(BLOCK_THREADS)>; + + // Block-level run-length decode algorithm to evenly distribute work of all buffers requiring + // thread-level collaboration + using BlockRunLengthDecodeT = + cub::BlockRunLengthDecode(BLOCK_THREADS), + static_cast(TLEV_BUFFERS_PER_THREAD), + static_cast(TLEV_BYTES_PER_THREAD)>; + + using BlockExchangeTLevT = + cub::BlockExchange(BLOCK_THREADS), + static_cast(TLEV_BYTES_PER_THREAD)>; + + using BLevBuffScanPrefixCallbackOpT = + TilePrefixCallbackOp, + BLevBufferOffsetTileState, + 0, + typename AgentMemcpySmallBuffersPolicyT::buff_delay_constructor>; + + using BLevBlockScanPrefixCallbackOpT = + TilePrefixCallbackOp, + BLevBlockOffsetTileState, + 0, + typename AgentMemcpySmallBuffersPolicyT::block_delay_constructor>; + + //----------------------------------------------------------------------------- + // SHARED MEMORY DECLARATIONS + //----------------------------------------------------------------------------- + struct _TempStorage + { + union + { + typename BufferLoadT::TempStorage load_storage; + + // Stage 1: histogram over the size classes in preparation for partitioning buffers by size + typename BlockSizeClassScanT::TempStorage size_scan_storage; + + // Stage 2: Communicate the number ofer buffers requiring block-level collaboration + typename BLevBuffScanPrefixCallbackOpT::TempStorage buffer_scan_callback; + + // Stage 3; batch memcpy buffers that require only thread-level collaboration + struct + { + BufferTuple buffers_by_size_class[BUFFERS_PER_BLOCK]; + + // Stage 3.1: Write buffers requiring block-level collaboration to queue + union + { + struct + { + typename BLevBlockScanPrefixCallbackOpT::TempStorage block_scan_callback; + typename BlockBLevTileCountScanT::TempStorage block_scan_storage; + } blev; + + // Stage 3.3: run-length decode & block exchange for tlev + // rld_state needs to be persistent across loop iterations (RunLengthDecode calls) and, + // hence, cannot alias block_exchange_storage + struct + { + typename BlockRunLengthDecodeT::TempStorage rld_state; + typename BlockExchangeTLevT::TempStorage block_exchange_storage; + } tlev; + }; + } staged; + }; + BufferOffsetT blev_buffer_offset; + }; + + //----------------------------------------------------------------------------- + // PUBLIC TYPE MEMBERS + //----------------------------------------------------------------------------- + +public: + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //----------------------------------------------------------------------------- + // PRIVATE MEMBER FUNCTIONS + //----------------------------------------------------------------------------- + +private: + /// Shared storage reference + _TempStorage& temp_storage; + + /** + * @brief Loads this tile's buffers' sizes, without any guards (i.e., out-of-bounds checks) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + LoadBufferSizesFullTile(BufferSizeIteratorT tile_buffer_sizes_it, BufferSizeT (&buffer_sizes)[BUFFERS_PER_THREAD]) + { + BufferLoadT(temp_storage.load_storage).Load(tile_buffer_sizes_it, buffer_sizes); + } + + /** + * @brief Loads this tile's buffers' sizes, making sure to read at most \p num_valid items. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadBufferSizesPartialTile( + BufferSizeIteratorT tile_buffer_sizes_it, BufferSizeT (&buffer_sizes)[BUFFERS_PER_THREAD], BufferOffsetT num_valid) + { + // Out-of-bounds buffer items are initialized to '0', so those buffers will simply be ignored + // later on + constexpr BufferSizeT OOB_DEFAULT_BUFFER_SIZE = 0U; + + BufferLoadT(temp_storage.load_storage).Load(tile_buffer_sizes_it, buffer_sizes, num_valid, OOB_DEFAULT_BUFFER_SIZE); + } + + /** + * @brief Computes the histogram over the number of buffers belonging to each of the three + * size-classes (TLEV, WLEV, BLEV). + */ + _CCCL_DEVICE _CCCL_FORCEINLINE VectorizedSizeClassCounterT + GetBufferSizeClassHistogram(const BufferSizeT (&buffer_sizes)[BUFFERS_PER_THREAD]) + { + VectorizedSizeClassCounterT vectorized_counters{}; +#pragma unroll + for (uint32_t i = 0; i < BUFFERS_PER_THREAD; i++) + { + // Whether to increment ANY of the buffer size classes at all + const uint32_t increment = buffer_sizes[i] > 0 ? 1U : 0U; + // Identify the buffer's size class + uint32_t buffer_size_class = 0; + buffer_size_class += buffer_sizes[i] > WARP_LEVEL_THRESHOLD ? 1U : 0U; + buffer_size_class += buffer_sizes[i] > BLOCK_LEVEL_THRESHOLD ? 1U : 0U; + + // Increment the count of the respective size class + vectorized_counters.Add(buffer_size_class, increment); + } + return vectorized_counters; + } + + /** + * @brief Scatters the buffers into the respective buffer's size-class partition. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void PartitionBuffersBySize( + const BufferSizeT (&buffer_sizes)[BUFFERS_PER_THREAD], + VectorizedSizeClassCounterT& vectorized_offsets, + BufferTuple (&buffers_by_size_class)[BUFFERS_PER_BLOCK]) + { + // If we intend to perform a stable partitioning, the thread's buffer are in a blocked + // arrangement, otherwise they are in a striped arrangement + BlockBufferOffsetT buffer_id = BUFFER_STABLE_PARTITION ? (BUFFERS_PER_THREAD * threadIdx.x) : (threadIdx.x); + constexpr BlockBufferOffsetT BUFFER_STRIDE = + BUFFER_STABLE_PARTITION ? static_cast(1) : static_cast(BLOCK_THREADS); + +#pragma unroll + for (uint32_t i = 0; i < BUFFERS_PER_THREAD; i++) + { + if (buffer_sizes[i] > 0) + { + uint32_t buffer_size_class = 0; + buffer_size_class += buffer_sizes[i] > WARP_LEVEL_THRESHOLD ? 1U : 0U; + buffer_size_class += buffer_sizes[i] > BLOCK_LEVEL_THRESHOLD ? 1U : 0U; + const uint32_t write_offset = vectorized_offsets.Get(buffer_size_class); + buffers_by_size_class[write_offset] = {static_cast(buffer_sizes[i]), buffer_id}; + vectorized_offsets.Add(buffer_size_class, 1U); + } + buffer_id += BUFFER_STRIDE; + } + } + + /** + * @brief Read in all the buffers that require block-level collaboration and put them to a queue + * that will get picked up in a separate, subsequent kernel. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void EnqueueBLEVBuffers( + BufferTuple* buffers_by_size_class, + InputBufferIt tile_buffer_srcs, + OutputBufferIt tile_buffer_dsts, + BufferSizeIteratorT tile_buffer_sizes, + BlockBufferOffsetT num_blev_buffers, + BufferOffsetT tile_buffer_offset, + BufferOffsetT tile_id) + { + BlockOffsetT block_offset[BLEV_BUFFERS_PER_THREAD]; + // Read in the BLEV buffer partition (i.e., the buffers that require block-level collaboration) + uint32_t blev_buffer_offset = threadIdx.x * BLEV_BUFFERS_PER_THREAD; +#pragma unroll + for (uint32_t i = 0; i < BLEV_BUFFERS_PER_THREAD; i++) + { + if (blev_buffer_offset < num_blev_buffers) + { + BlockBufferOffsetT tile_buffer_id = buffers_by_size_class[blev_buffer_offset].buffer_id; + block_offset[i] = CUB_QUOTIENT_CEILING(tile_buffer_sizes[tile_buffer_id], BLOCK_LEVEL_TILE_SIZE); + } + else + { + // Out-of-bounds buffers are assigned a tile count of '0' + block_offset[i] = 0U; + } + blev_buffer_offset++; + } + + if (tile_id == 0) + { + BlockOffsetT block_aggregate; + BlockBLevTileCountScanT(temp_storage.staged.blev.block_scan_storage) + .ExclusiveSum(block_offset, block_offset, block_aggregate); + if (threadIdx.x == 0) + { + blev_block_scan_state.SetInclusive(0, block_aggregate); + } + } + else + { + BLevBlockScanPrefixCallbackOpT blev_tile_prefix_op( + blev_block_scan_state, temp_storage.staged.blev.block_scan_callback, ::cuda::std::plus<>{}, tile_id); + BlockBLevTileCountScanT(temp_storage.staged.blev.block_scan_storage) + .ExclusiveSum(block_offset, block_offset, blev_tile_prefix_op); + } + __syncthreads(); + + // Read in the BLEV buffer partition (i.e., the buffers that require block-level collaboration) + blev_buffer_offset = threadIdx.x * BLEV_BUFFERS_PER_THREAD; +#pragma unroll + for (uint32_t i = 0; i < BLEV_BUFFERS_PER_THREAD; i++) + { + if (blev_buffer_offset < num_blev_buffers) + { + BlockBufferOffsetT tile_buffer_id = buffers_by_size_class[blev_buffer_offset].buffer_id; + blev_buffer_srcs[tile_buffer_offset + blev_buffer_offset] = tile_buffer_srcs[tile_buffer_id]; + blev_buffer_dsts[tile_buffer_offset + blev_buffer_offset] = tile_buffer_dsts[tile_buffer_id]; + blev_buffer_sizes[tile_buffer_offset + blev_buffer_offset] = tile_buffer_sizes[tile_buffer_id]; + blev_buffer_tile_offsets[tile_buffer_offset + blev_buffer_offset] = block_offset[i]; + blev_buffer_offset++; + } + } + } + + /** + * @brief Read in all the buffers of this tile that require warp-level collaboration and copy + * their bytes to the corresponding destination buffer + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void BatchMemcpyWLEVBuffers( + BufferTuple* buffers_by_size_class, + InputBufferIt tile_buffer_srcs, + OutputBufferIt tile_buffer_dsts, + BufferSizeIteratorT tile_buffer_sizes, + BlockBufferOffsetT num_wlev_buffers) + { + const int32_t warp_id = threadIdx.x / CUB_PTX_WARP_THREADS; + constexpr uint32_t WARPS_PER_BLOCK = BLOCK_THREADS / CUB_PTX_WARP_THREADS; + + for (BlockBufferOffsetT buffer_offset = warp_id; buffer_offset < num_wlev_buffers; buffer_offset += WARPS_PER_BLOCK) + { + const auto buffer_id = buffers_by_size_class[buffer_offset].buffer_id; + copy_items( + tile_buffer_srcs[buffer_id], tile_buffer_dsts[buffer_id], tile_buffer_sizes[buffer_id]); + } + } + + /** + * @brief Read in all the buffers of this tile that require thread-level collaboration and copy + * their bytes to the corresponding destination buffer + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void BatchMemcpyTLEVBuffers( + BufferTuple* buffers_by_size_class, + InputBufferIt tile_buffer_srcs, + OutputBufferIt tile_buffer_dsts, + BlockBufferOffsetT num_tlev_buffers) + { + // Read in the buffers' ids that require thread-level collaboration (where buffer id is the + // buffer within this tile) + BlockBufferOffsetT tlev_buffer_ids[TLEV_BUFFERS_PER_THREAD]; + TLevBufferSizeT tlev_buffer_sizes[TLEV_BUFFERS_PER_THREAD]; + // Currently we do not go over the TLEV buffers in multiple iterations, so we need to make sure + // we are able to be covered for the case that all our buffers are TLEV buffers + static_assert(TLEV_BUFFERS_PER_THREAD >= BUFFERS_PER_THREAD, + "Unsupported confiugraiton: The number of 'thread-level buffers' must be at " + "least as large as the number of overall buffers being processed by each " + "thread."); + + // Read in the TLEV buffer partition (i.e., the buffers that require thread-level collaboration) + uint32_t tlev_buffer_offset = threadIdx.x * TLEV_BUFFERS_PER_THREAD; + + // Pre-populate the buffer sizes to 0 (i.e. zero-padding towards the end) to ensure + // out-of-bounds TLEV buffers will not be considered +#pragma unroll + for (uint32_t i = 0; i < TLEV_BUFFERS_PER_THREAD; i++) + { + tlev_buffer_sizes[i] = 0; + } + + // Assign TLEV buffers in a blocked arrangement (each thread is assigned consecutive TLEV + // buffers) +#pragma unroll + for (uint32_t i = 0; i < TLEV_BUFFERS_PER_THREAD; i++) + { + if (tlev_buffer_offset < num_tlev_buffers) + { + tlev_buffer_ids[i] = buffers_by_size_class[tlev_buffer_offset].buffer_id; + tlev_buffer_sizes[i] = buffers_by_size_class[tlev_buffer_offset].size; + } + tlev_buffer_offset++; + } + + // Evenly distribute all the bytes that have to be copied from all the buffers that require + // thread-level collaboration using BlockRunLengthDecode + uint32_t num_total_tlev_bytes = 0U; + BlockRunLengthDecodeT block_run_length_decode( + temp_storage.staged.tlev.rld_state, tlev_buffer_ids, tlev_buffer_sizes, num_total_tlev_bytes); + + // Run-length decode the buffers' sizes into a window buffer of limited size. This is repeated + // until we were able to cover all the bytes of TLEV buffers + uint32_t decoded_window_offset = 0U; + while (decoded_window_offset < num_total_tlev_bytes) + { + BlockBufferOffsetT buffer_id[TLEV_BYTES_PER_THREAD]; + TLevBufferSizeT buffer_byte_offset[TLEV_BYTES_PER_THREAD]; + + // Now we have a balanced assignment: buffer_id[i] will hold the tile's buffer id and + // buffer_byte_offset[i] that buffer's byte that this thread supposed to copy + block_run_length_decode.RunLengthDecode(buffer_id, buffer_byte_offset, decoded_window_offset); + + // Zip from SoA to AoS + ZippedTLevByteAssignment zipped_byte_assignment[TLEV_BYTES_PER_THREAD]; +#pragma unroll + for (int32_t i = 0; i < TLEV_BYTES_PER_THREAD; i++) + { + zipped_byte_assignment[i] = {buffer_id[i], buffer_byte_offset[i]}; + } + + // Exchange from blocked to striped arrangement for coalesced memory reads and writes + BlockExchangeTLevT(temp_storage.staged.tlev.block_exchange_storage) + .BlockedToStriped(zipped_byte_assignment, zipped_byte_assignment); + + // Read in the bytes that this thread is assigned to + constexpr uint32_t WINDOW_SIZE = (TLEV_BYTES_PER_THREAD * BLOCK_THREADS); + const bool is_full_window = decoded_window_offset + WINDOW_SIZE < num_total_tlev_bytes; + if (is_full_window) + { + uint32_t absolute_tlev_byte_offset = decoded_window_offset + threadIdx.x; + AliasT src_byte[TLEV_BYTES_PER_THREAD]; +#pragma unroll + for (int32_t i = 0; i < TLEV_BYTES_PER_THREAD; i++) + { + src_byte[i] = read_item( + tile_buffer_srcs[zipped_byte_assignment[i].tile_buffer_id], zipped_byte_assignment[i].buffer_byte_offset); + absolute_tlev_byte_offset += BLOCK_THREADS; + } +#pragma unroll + for (int32_t i = 0; i < TLEV_BYTES_PER_THREAD; i++) + { + write_item( + tile_buffer_dsts[zipped_byte_assignment[i].tile_buffer_id], + zipped_byte_assignment[i].buffer_byte_offset, + src_byte[i]); + } + } + else + { + uint32_t absolute_tlev_byte_offset = decoded_window_offset + threadIdx.x; +#pragma unroll + for (int32_t i = 0; i < TLEV_BYTES_PER_THREAD; i++) + { + if (absolute_tlev_byte_offset < num_total_tlev_bytes) + { + const AliasT src_byte = read_item( + tile_buffer_srcs[zipped_byte_assignment[i].tile_buffer_id], zipped_byte_assignment[i].buffer_byte_offset); + write_item( + tile_buffer_dsts[zipped_byte_assignment[i].tile_buffer_id], + zipped_byte_assignment[i].buffer_byte_offset, + src_byte); + } + absolute_tlev_byte_offset += BLOCK_THREADS; + } + } + + decoded_window_offset += WINDOW_SIZE; + + // Ensure all threads finished collaborative BlockExchange so temporary storage can be reused + // with next iteration + __syncthreads(); + } + } + + //----------------------------------------------------------------------------- + // PUBLIC MEMBER FUNCTIONS + //----------------------------------------------------------------------------- + +public: + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTile(BufferOffsetT tile_id) + { + // Offset into this tile's buffers + BufferOffsetT buffer_offset = tile_id * BUFFERS_PER_BLOCK; + + // Indicates whether all of this tiles items are within bounds + bool is_full_tile = buffer_offset + BUFFERS_PER_BLOCK < num_buffers; + + // Load the buffer sizes of this tile's buffers + BufferSizeIteratorT tile_buffer_sizes_it = buffer_sizes_it + buffer_offset; + BufferSizeT buffer_sizes[BUFFERS_PER_THREAD]; + if (is_full_tile) + { + LoadBufferSizesFullTile(tile_buffer_sizes_it, buffer_sizes); + } + else + { + LoadBufferSizesPartialTile(tile_buffer_sizes_it, buffer_sizes, num_buffers - buffer_offset); + } + + // Ensure we can repurpose the BlockLoad's temporary storage + __syncthreads(); + + // Count how many buffers fall into each size-class + VectorizedSizeClassCounterT size_class_histogram = GetBufferSizeClassHistogram(buffer_sizes); + + // Compute the prefix sum over the histogram + VectorizedSizeClassCounterT size_class_agg = {}; + BlockSizeClassScanT(temp_storage.size_scan_storage) + .ExclusiveSum(size_class_histogram, size_class_histogram, size_class_agg); + + // Ensure we can repurpose the scan's temporary storage for scattering the buffer ids + __syncthreads(); + + // Factor in the per-size-class counts / offsets + // That is, WLEV buffer offset has to be offset by the TLEV buffer count and BLEV buffer offset + // has to be offset by the TLEV+WLEV buffer count + uint32_t buffer_count = 0U; + for (uint32_t i = 0; i < NUM_SIZE_CLASSES; i++) + { + size_class_histogram.Add(i, buffer_count); + buffer_count += size_class_agg.Get(i); + } + + // Signal the number of BLEV buffers we're planning to write out + BufferOffsetT buffer_exclusive_prefix = 0; + if (tile_id == 0) + { + if (threadIdx.x == 0) + { + blev_buffer_scan_state.SetInclusive(tile_id, size_class_agg.Get(BLEV_SIZE_CLASS)); + } + buffer_exclusive_prefix = 0; + } + else + { + BLevBuffScanPrefixCallbackOpT blev_buffer_prefix_op( + blev_buffer_scan_state, temp_storage.buffer_scan_callback, ::cuda::std::plus<>{}, tile_id); + + // Signal our partial prefix and wait for the inclusive prefix of previous tiles + if (threadIdx.x < CUB_PTX_WARP_THREADS) + { + buffer_exclusive_prefix = blev_buffer_prefix_op(size_class_agg.Get(BLEV_SIZE_CLASS)); + } + } + if (threadIdx.x == 0) + { + temp_storage.blev_buffer_offset = buffer_exclusive_prefix; + } + + // Ensure the prefix callback has finished using its temporary storage and that it can be reused + // in the next stage + __syncthreads(); + + // Scatter the buffers into one of the three partitions (TLEV, WLEV, BLEV) depending on their + // size + PartitionBuffersBySize(buffer_sizes, size_class_histogram, temp_storage.staged.buffers_by_size_class); + + // Ensure all buffers have been partitioned by their size class AND + // ensure that blev_buffer_offset has been written to shared memory + __syncthreads(); + + // TODO: think about prefetching tile_buffer_{srcs,dsts} into shmem + InputBufferIt tile_buffer_srcs = input_buffer_it + buffer_offset; + OutputBufferIt tile_buffer_dsts = output_buffer_it + buffer_offset; + BufferSizeIteratorT tile_buffer_sizes = buffer_sizes_it + buffer_offset; + + // Copy block-level buffers + EnqueueBLEVBuffers( + &temp_storage.staged + .buffers_by_size_class[size_class_agg.Get(TLEV_SIZE_CLASS) + size_class_agg.Get(WLEV_SIZE_CLASS)], + tile_buffer_srcs, + tile_buffer_dsts, + tile_buffer_sizes, + size_class_agg.Get(BLEV_SIZE_CLASS), + temp_storage.blev_buffer_offset, + tile_id); + + // Ensure we can repurpose the temporary storage required by EnqueueBLEVBuffers + __syncthreads(); + + // Copy warp-level buffers + BatchMemcpyWLEVBuffers( + &temp_storage.staged.buffers_by_size_class[size_class_agg.Get(TLEV_SIZE_CLASS)], + tile_buffer_srcs, + tile_buffer_dsts, + tile_buffer_sizes, + size_class_agg.Get(WLEV_SIZE_CLASS)); + + // Perform batch memcpy for all the buffers that require thread-level collaboration + uint32_t num_tlev_buffers = size_class_agg.Get(TLEV_SIZE_CLASS); + BatchMemcpyTLEVBuffers( + temp_storage.staged.buffers_by_size_class, tile_buffer_srcs, tile_buffer_dsts, num_tlev_buffers); + } + + //----------------------------------------------------------------------------- + // CONSTRUCTOR + //----------------------------------------------------------------------------- + _CCCL_DEVICE _CCCL_FORCEINLINE AgentBatchMemcpy( + TempStorage& temp_storage, + InputBufferIt input_buffer_it, + OutputBufferIt output_buffer_it, + BufferSizeIteratorT buffer_sizes_it, + BufferOffsetT num_buffers, + BlevBufferSrcsOutItT blev_buffer_srcs, + BlevBufferDstsOutItT blev_buffer_dsts, + BlevBufferSizesOutItT blev_buffer_sizes, + BlevBufferTileOffsetsOutItT blev_buffer_tile_offsets, + BLevBufferOffsetTileState blev_buffer_scan_state, + BLevBlockOffsetTileState blev_block_scan_state) + : temp_storage(temp_storage.Alias()) + , input_buffer_it(input_buffer_it) + , output_buffer_it(output_buffer_it) + , buffer_sizes_it(buffer_sizes_it) + , num_buffers(num_buffers) + , blev_buffer_srcs(blev_buffer_srcs) + , blev_buffer_dsts(blev_buffer_dsts) + , blev_buffer_sizes(blev_buffer_sizes) + , blev_buffer_tile_offsets(blev_buffer_tile_offsets) + , blev_buffer_scan_state(blev_buffer_scan_state) + , blev_block_scan_state(blev_block_scan_state) + {} + +private: + // Iterator providing the pointers to the source memory buffers + InputBufferIt input_buffer_it; + // Iterator providing the pointers to the destination memory buffers + OutputBufferIt output_buffer_it; + // Iterator providing the number of bytes to be copied for each pair of buffers + BufferSizeIteratorT buffer_sizes_it; + // The total number of buffer pairs + BufferOffsetT num_buffers; + // Output iterator to which the source pointers of the BLEV buffers are written + BlevBufferSrcsOutItT blev_buffer_srcs; + // Output iterator to which the destination pointers of the BLEV buffers are written + BlevBufferDstsOutItT blev_buffer_dsts; + // Output iterator to which the number of bytes to be copied of the BLEV buffers are written + BlevBufferSizesOutItT blev_buffer_sizes; + // Output iterator to which the mapping of tiles to BLEV buffers is written + BlevBufferTileOffsetsOutItT blev_buffer_tile_offsets; + // The single-pass prefix scan's tile state used for tracking the prefix sum over the number of + // BLEV buffers + BLevBufferOffsetTileState blev_buffer_scan_state; + // The single-pass prefix scan's tile state used for tracking the prefix sum over tiles of BLEV + // buffers + BLevBlockOffsetTileState blev_block_scan_state; +}; +} // namespace batch_memcpy +} // namespace detail + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_for.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_for.cuh new file mode 100644 index 0000000000000000000000000000000000000000..dbbb77a0c613df9766684b2e00020f53070f6965 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_for.cuh @@ -0,0 +1,84 @@ +/****************************************************************************** + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +CUB_NAMESPACE_BEGIN + +namespace detail +{ +namespace for_each +{ + +template +struct policy_t +{ + static constexpr int block_threads = BlockThreads; + static constexpr int items_per_thread = ItemsPerThread; +}; + +template +struct agent_block_striped_t +{ + static constexpr int items_per_thread = PolicyT::items_per_thread; + + OffsetT tile_base; + OpT op; + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void consume_tile(int items_in_tile, int block_threads) + { +#pragma unroll + for (int item = 0; item < items_per_thread; item++) + { + const auto idx = static_cast(block_threads * item + threadIdx.x); + + if (IsFullTile || idx < items_in_tile) + { + (void) op(tile_base + idx); + } + } + } +}; + +} // namespace for_each +} // namespace detail + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_histogram.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_histogram.cuh new file mode 100644 index 0000000000000000000000000000000000000000..291bba1ba7a3be7440527b16cf8b0cc2ac47362c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_histogram.cuh @@ -0,0 +1,949 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * cub::AgentHistogram implements a stateful abstraction of CUDA thread blocks for participating in device-wide + * histogram . + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy + ******************************************************************************/ + +/** + * + */ +enum BlockHistogramMemoryPreference +{ + GMEM, + SMEM, + BLEND +}; + +/** + * Parameterizable tuning policy type for AgentHistogram + * + * @tparam _BLOCK_THREADS + * Threads per thread block + * + * @tparam _PIXELS_PER_THREAD + * Pixels per thread (per tile of input) + * + * @tparam _LOAD_ALGORITHM + * The BlockLoad algorithm to use + * + * @tparam _LOAD_MODIFIER + * Cache load modifier for reading input elements + * + * @tparam _RLE_COMPRESS + * Whether to perform localized RLE to compress samples before histogramming + * + * @tparam _MEM_PREFERENCE + * Whether to prefer privatized shared-memory bins (versus privatized global-memory bins) + * + * @tparam _WORK_STEALING + * Whether to dequeue tiles from a global work queue + * + * @tparam _VEC_SIZE + * Vector size for samples loading (1, 2, 4) + */ +template +struct AgentHistogramPolicy +{ + /// Threads per thread block + static constexpr int BLOCK_THREADS = _BLOCK_THREADS; + /// Pixels per thread (per tile of input) + static constexpr int PIXELS_PER_THREAD = _PIXELS_PER_THREAD; + + /// Whether to perform localized RLE to compress samples before histogramming + static constexpr bool IS_RLE_COMPRESS = _RLE_COMPRESS; + + /// Whether to prefer privatized shared-memory bins (versus privatized global-memory bins) + static constexpr BlockHistogramMemoryPreference MEM_PREFERENCE = _MEM_PREFERENCE; + + /// Whether to dequeue tiles from a global work queue + static constexpr bool IS_WORK_STEALING = _WORK_STEALING; + + /// Vector size for samples loading (1, 2, 4) + static constexpr int VEC_SIZE = _VEC_SIZE; + + ///< The BlockLoad algorithm to use + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + + ///< Cache load modifier for reading input elements + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace histogram +{ + +/** + * @brief AgentHistogram implements a stateful abstraction of CUDA thread blocks for participating + * in device-wide histogram . + * + * @tparam AgentHistogramPolicyT + * Parameterized AgentHistogramPolicy tuning policy type + * + * @tparam PRIVATIZED_SMEM_BINS + * Number of privatized shared-memory histogram bins of any channel. Zero indicates privatized + * counters to be maintained in device-accessible memory. + * + * @tparam NUM_CHANNELS + * Number of channels interleaved in the input data. Supports up to four channels. + * + * @tparam NUM_ACTIVE_CHANNELS + * Number of channels actively being histogrammed + * + * @tparam SampleIteratorT + * Random-access input iterator type for reading samples + * + * @tparam CounterT + * Integer type for counting sample occurrences per histogram bin + * + * @tparam PrivatizedDecodeOpT + * The transform operator type for determining privatized counter indices from samples, one for + * each channel + * + * @tparam OutputDecodeOpT + * The transform operator type for determining output bin-ids from privatized counter indices, one + * for each channel + * + * @tparam OffsetT + * Signed integer type for global offsets + * + * @tparam LEGACY_PTX_ARCH + * PTX compute capability (unused) + */ +template +struct AgentHistogram +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + /// The sample type of the input iterator + using SampleT = cub::detail::value_t; + + /// The pixel type of SampleT + using PixelT = typename CubVector::Type; + + /// The vec type of SampleT + static constexpr int VecSize = AgentHistogramPolicyT::VEC_SIZE; + using VecT = typename CubVector::Type; + + /// Constants + static constexpr int BLOCK_THREADS = AgentHistogramPolicyT::BLOCK_THREADS; + + static constexpr int PIXELS_PER_THREAD = AgentHistogramPolicyT::PIXELS_PER_THREAD; + static constexpr int SAMPLES_PER_THREAD = PIXELS_PER_THREAD * NUM_CHANNELS; + static constexpr int VECS_PER_THREAD = SAMPLES_PER_THREAD / VecSize; + + static constexpr int TILE_PIXELS = PIXELS_PER_THREAD * BLOCK_THREADS; + static constexpr int TILE_SAMPLES = SAMPLES_PER_THREAD * BLOCK_THREADS; + + static constexpr bool IS_RLE_COMPRESS = AgentHistogramPolicyT::IS_RLE_COMPRESS; + + static constexpr BlockHistogramMemoryPreference MEM_PREFERENCE = + (PRIVATIZED_SMEM_BINS > 0) ? AgentHistogramPolicyT::MEM_PREFERENCE : GMEM; + + static constexpr bool IS_WORK_STEALING = AgentHistogramPolicyT::IS_WORK_STEALING; + + /// Cache load modifier for reading input elements + static constexpr CacheLoadModifier LOAD_MODIFIER = AgentHistogramPolicyT::LOAD_MODIFIER; + + /// Input iterator wrapper type (for applying cache modifier) + // Wrap the native input pointer with CacheModifiedInputIterator + // or directly use the supplied input iterator type + using WrappedSampleIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + SampleIteratorT>; + + /// Pixel input iterator type (for applying cache modifier) + using WrappedPixelIteratorT = CacheModifiedInputIterator; + + /// Qaud input iterator type (for applying cache modifier) + using WrappedVecsIteratorT = CacheModifiedInputIterator; + + /// Parameterized BlockLoad type for samples + using BlockLoadSampleT = BlockLoad; + + /// Parameterized BlockLoad type for pixels + using BlockLoadPixelT = BlockLoad; + + /// Parameterized BlockLoad type for vecs + using BlockLoadVecT = BlockLoad; + + /// Shared memory type required by this thread block + struct _TempStorage + { + // Smem needed for block-privatized smem histogram (with 1 word of padding) + CounterT histograms[NUM_ACTIVE_CHANNELS][PRIVATIZED_SMEM_BINS + 1]; + + int tile_idx; + + // Aliasable storage layout + union Aliasable + { + // Smem needed for loading a tile of samples + typename BlockLoadSampleT::TempStorage sample_load; + + // Smem needed for loading a tile of pixels + typename BlockLoadPixelT::TempStorage pixel_load; + + // Smem needed for loading a tile of vecs + typename BlockLoadVecT::TempStorage vec_load; + + } aliasable; + }; + + /// Temporary storage type (unionable) + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + /// Reference to temp_storage + _TempStorage& temp_storage; + + /// Sample input iterator (with cache modifier applied, if possible) + WrappedSampleIteratorT d_wrapped_samples; + + /// Native pointer for input samples (possibly nullptr if unavailable) + SampleT* d_native_samples; + + /// The number of output bins for each channel + int* num_output_bins; + + /// The number of privatized bins for each channel + int* num_privatized_bins; + + /// Copy of gmem privatized histograms for each channel + CounterT* d_privatized_histograms[NUM_ACTIVE_CHANNELS]; + + /// Reference to final output histograms (gmem) + CounterT** d_output_histograms; + + /// The transform operator for determining output bin-ids from privatized counter indices, one for each channel + OutputDecodeOpT* output_decode_op; + + /// The transform operator for determining privatized counter indices from samples, one for each channel + PrivatizedDecodeOpT* privatized_decode_op; + + /// Whether to prefer privatized smem counters vs privatized global counters + bool prefer_smem; + + //--------------------------------------------------------------------- + // Initialize privatized bin counters + //--------------------------------------------------------------------- + + // Initialize privatized bin counters + _CCCL_DEVICE _CCCL_FORCEINLINE void InitBinCounters(CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]) + { +// Initialize histogram bin counts to zeros +#pragma unroll + for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) + { + for (int privatized_bin = threadIdx.x; privatized_bin < num_privatized_bins[CHANNEL]; + privatized_bin += BLOCK_THREADS) + { + privatized_histograms[CHANNEL][privatized_bin] = 0; + } + } + + // Barrier to make sure all threads are done updating counters + __syncthreads(); + } + + // Initialize privatized bin counters. Specialized for privatized shared-memory counters + _CCCL_DEVICE _CCCL_FORCEINLINE void InitSmemBinCounters() + { + CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]; + + for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) + { + privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL]; + } + + InitBinCounters(privatized_histograms); + } + + // Initialize privatized bin counters. Specialized for privatized global-memory counters + _CCCL_DEVICE _CCCL_FORCEINLINE void InitGmemBinCounters() + { + InitBinCounters(d_privatized_histograms); + } + + //--------------------------------------------------------------------- + // Update final output histograms + //--------------------------------------------------------------------- + + // Update final output histograms from privatized histograms + _CCCL_DEVICE _CCCL_FORCEINLINE void StoreOutput(CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]) + { + // Barrier to make sure all threads are done updating counters + __syncthreads(); + +// Apply privatized bin counts to output bin counts +#pragma unroll + for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) + { + int channel_bins = num_privatized_bins[CHANNEL]; + for (int privatized_bin = threadIdx.x; privatized_bin < channel_bins; privatized_bin += BLOCK_THREADS) + { + int output_bin = -1; + CounterT count = privatized_histograms[CHANNEL][privatized_bin]; + bool is_valid = count > 0; + + output_decode_op[CHANNEL].template BinSelect((SampleT) privatized_bin, output_bin, is_valid); + + if (output_bin >= 0) + { + atomicAdd(&d_output_histograms[CHANNEL][output_bin], count); + } + } + } + } + + // Update final output histograms from privatized histograms. Specialized for privatized shared-memory counters + _CCCL_DEVICE _CCCL_FORCEINLINE void StoreSmemOutput() + { + CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]; + for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) + { + privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL]; + } + + StoreOutput(privatized_histograms); + } + + // Update final output histograms from privatized histograms. Specialized for privatized global-memory counters + _CCCL_DEVICE _CCCL_FORCEINLINE void StoreGmemOutput() + { + StoreOutput(d_privatized_histograms); + } + + //--------------------------------------------------------------------- + // Tile accumulation + //--------------------------------------------------------------------- + + // Accumulate pixels. Specialized for RLE compression. + _CCCL_DEVICE _CCCL_FORCEINLINE void AccumulatePixels( + SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS], + bool is_valid[PIXELS_PER_THREAD], + CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS], + Int2Type is_rle_compress) + { +#pragma unroll + for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) + { + // Bin pixels + int bins[PIXELS_PER_THREAD]; + +#pragma unroll + for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL) + { + bins[PIXEL] = -1; + privatized_decode_op[CHANNEL].template BinSelect( + samples[PIXEL][CHANNEL], bins[PIXEL], is_valid[PIXEL]); + } + + CounterT accumulator = 1; + +#pragma unroll + for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD - 1; ++PIXEL) + { + if (bins[PIXEL] != bins[PIXEL + 1]) + { + if (bins[PIXEL] >= 0) + { + atomicAdd(privatized_histograms[CHANNEL] + bins[PIXEL], accumulator); + } + + accumulator = 0; + } + accumulator++; + } + + // Last pixel + if (bins[PIXELS_PER_THREAD - 1] >= 0) + { + atomicAdd(privatized_histograms[CHANNEL] + bins[PIXELS_PER_THREAD - 1], accumulator); + } + } + } + + // Accumulate pixels. Specialized for individual accumulation of each pixel. + _CCCL_DEVICE _CCCL_FORCEINLINE void AccumulatePixels( + SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS], + bool is_valid[PIXELS_PER_THREAD], + CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS], + Int2Type is_rle_compress) + { +#pragma unroll + for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL) + { +#pragma unroll + for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) + { + int bin = -1; + privatized_decode_op[CHANNEL].template BinSelect(samples[PIXEL][CHANNEL], bin, is_valid[PIXEL]); + if (bin >= 0) + { + atomicAdd(privatized_histograms[CHANNEL] + bin, 1); + } + } + } + } + + /** + * Accumulate pixel, specialized for smem privatized histogram + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + AccumulateSmemPixels(SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS], bool is_valid[PIXELS_PER_THREAD]) + { + CounterT* privatized_histograms[NUM_ACTIVE_CHANNELS]; + + for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) + { + privatized_histograms[CHANNEL] = temp_storage.histograms[CHANNEL]; + } + + AccumulatePixels(samples, is_valid, privatized_histograms, Int2Type()); + } + + /** + * Accumulate pixel, specialized for gmem privatized histogram + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + AccumulateGmemPixels(SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS], bool is_valid[PIXELS_PER_THREAD]) + { + AccumulatePixels(samples, is_valid, d_privatized_histograms, Int2Type()); + } + + //--------------------------------------------------------------------- + // Tile loading + //--------------------------------------------------------------------- + + // Load full, aligned tile using pixel iterator (multi-channel) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadFullAlignedTile( + OffsetT block_offset, + int valid_samples, + SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], + Int2Type<_NUM_ACTIVE_CHANNELS> num_active_channels) + { + using AliasedPixels = PixelT[PIXELS_PER_THREAD]; + + WrappedPixelIteratorT d_wrapped_pixels((PixelT*) (d_native_samples + block_offset)); + + // Load using a wrapped pixel iterator + BlockLoadPixelT(temp_storage.aliasable.pixel_load).Load(d_wrapped_pixels, reinterpret_cast(samples)); + } + + // Load full, aligned tile using vec iterator (single-channel) + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadFullAlignedTile( + OffsetT block_offset, + int valid_samples, + SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], + Int2Type<1> num_active_channels) + { + using AliasedVecs = VecT[VECS_PER_THREAD]; + + WrappedVecsIteratorT d_wrapped_vecs((VecT*) (d_native_samples + block_offset)); + + // Load using a wrapped vec iterator + BlockLoadVecT(temp_storage.aliasable.vec_load).Load(d_wrapped_vecs, reinterpret_cast(samples)); + } + + // Load full, aligned tile + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadTile( + OffsetT block_offset, + int valid_samples, + SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], + Int2Type is_full_tile, + Int2Type is_aligned) + { + LoadFullAlignedTile(block_offset, valid_samples, samples, Int2Type()); + } + + // Load full, mis-aligned tile using sample iterator + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadTile( + OffsetT block_offset, + int valid_samples, + SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], + Int2Type is_full_tile, + Int2Type is_aligned) + { + using AliasedSamples = SampleT[SAMPLES_PER_THREAD]; + + // Load using sample iterator + BlockLoadSampleT(temp_storage.aliasable.sample_load) + .Load(d_wrapped_samples + block_offset, reinterpret_cast(samples)); + } + + // Load partially-full, aligned tile using the pixel iterator + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadTile( + OffsetT block_offset, + int valid_samples, + SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], + Int2Type is_full_tile, + Int2Type is_aligned) + { + using AliasedPixels = PixelT[PIXELS_PER_THREAD]; + + WrappedPixelIteratorT d_wrapped_pixels((PixelT*) (d_native_samples + block_offset)); + + int valid_pixels = valid_samples / NUM_CHANNELS; + + // Load using a wrapped pixel iterator + BlockLoadPixelT(temp_storage.aliasable.pixel_load) + .Load(d_wrapped_pixels, reinterpret_cast(samples), valid_pixels); + } + + // Load partially-full, mis-aligned tile using sample iterator + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadTile( + OffsetT block_offset, + int valid_samples, + SampleT (&samples)[PIXELS_PER_THREAD][NUM_CHANNELS], + Int2Type is_full_tile, + Int2Type is_aligned) + { + using AliasedSamples = SampleT[SAMPLES_PER_THREAD]; + + BlockLoadSampleT(temp_storage.aliasable.sample_load) + .Load(d_wrapped_samples + block_offset, reinterpret_cast(samples), valid_samples); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + MarkValid(bool (&is_valid)[PIXELS_PER_THREAD], int valid_samples, Int2Type /* is_striped = false */) + { +#pragma unroll + for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL) + { + is_valid[PIXEL] = IS_FULL_TILE || (((threadIdx.x * PIXELS_PER_THREAD + PIXEL) * NUM_CHANNELS) < valid_samples); + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + MarkValid(bool (&is_valid)[PIXELS_PER_THREAD], int valid_samples, Int2Type /* is_striped = true */) + { +#pragma unroll + for (int PIXEL = 0; PIXEL < PIXELS_PER_THREAD; ++PIXEL) + { + is_valid[PIXEL] = IS_FULL_TILE || (((threadIdx.x + BLOCK_THREADS * PIXEL) * NUM_CHANNELS) < valid_samples); + } + } + + //--------------------------------------------------------------------- + // Tile processing + //--------------------------------------------------------------------- + + /** + * @brief Consume a tile of data samples + * + * @tparam IS_ALIGNED + * Whether the tile offset is aligned (vec-aligned for single-channel, pixel-aligned for multi-channel) + * + * @tparam IS_FULL_TILE + Whether the tile is full + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTile(OffsetT block_offset, int valid_samples) + { + SampleT samples[PIXELS_PER_THREAD][NUM_CHANNELS]; + bool is_valid[PIXELS_PER_THREAD]; + + // Load tile + LoadTile(block_offset, valid_samples, samples, Int2Type(), Int2Type()); + + // Set valid flags + MarkValid( + is_valid, valid_samples, Int2Type{}); + + // Accumulate samples + if (prefer_smem) + { + AccumulateSmemPixels(samples, is_valid); + } + else + { + AccumulateGmemPixels(samples, is_valid); + } + } + + /** + * @brief Consume row tiles. Specialized for work-stealing from queue + * + * @param num_row_pixels + * The number of multi-channel pixels per row in the region of interest + * + * @param num_rows + * The number of rows in the region of interest + * + * @param row_stride_samples + * The number of samples between starts of consecutive rows in the region of interest + * + * @param tiles_per_row + * Number of image tiles per row + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTiles( + OffsetT num_row_pixels, + OffsetT num_rows, + OffsetT row_stride_samples, + int tiles_per_row, + GridQueue tile_queue, + Int2Type is_work_stealing) + { + int num_tiles = num_rows * tiles_per_row; + int tile_idx = (blockIdx.y * gridDim.x) + blockIdx.x; + OffsetT num_even_share_tiles = gridDim.x * gridDim.y; + + while (tile_idx < num_tiles) + { + int row = tile_idx / tiles_per_row; + int col = tile_idx - (row * tiles_per_row); + OffsetT row_offset = row * row_stride_samples; + OffsetT col_offset = (col * TILE_SAMPLES); + OffsetT tile_offset = row_offset + col_offset; + + if (col == tiles_per_row - 1) + { + // Consume a partially-full tile at the end of the row + OffsetT num_remaining = (num_row_pixels * NUM_CHANNELS) - col_offset; + ConsumeTile(tile_offset, num_remaining); + } + else + { + // Consume full tile + ConsumeTile(tile_offset, TILE_SAMPLES); + } + + __syncthreads(); + + // Get next tile + if (threadIdx.x == 0) + { + temp_storage.tile_idx = tile_queue.Drain(1) + num_even_share_tiles; + } + + __syncthreads(); + + tile_idx = temp_storage.tile_idx; + } + } + + /** + * @brief Consume row tiles. Specialized for even-share (striped across thread blocks) + * + * @param num_row_pixels + * The number of multi-channel pixels per row in the region of interest + * + * @param num_rows + * The number of rows in the region of interest + * + * @param row_stride_samples + * The number of samples between starts of consecutive rows in the region of interest + * + * @param tiles_per_row + * Number of image tiles per row + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTiles( + OffsetT num_row_pixels, + OffsetT num_rows, + OffsetT row_stride_samples, + int tiles_per_row, + GridQueue tile_queue, + Int2Type is_work_stealing) + { + for (int row = blockIdx.y; row < num_rows; row += gridDim.y) + { + OffsetT row_begin = row * row_stride_samples; + OffsetT row_end = row_begin + (num_row_pixels * NUM_CHANNELS); + OffsetT tile_offset = row_begin + (blockIdx.x * TILE_SAMPLES); + + while (tile_offset < row_end) + { + OffsetT num_remaining = row_end - tile_offset; + + if (num_remaining < TILE_SAMPLES) + { + // Consume partial tile + ConsumeTile(tile_offset, num_remaining); + break; + } + + // Consume full tile + ConsumeTile(tile_offset, TILE_SAMPLES); + tile_offset += gridDim.x * TILE_SAMPLES; + } + } + } + + //--------------------------------------------------------------------- + // Parameter extraction + //--------------------------------------------------------------------- + + // Return a native pixel pointer (specialized for CacheModifiedInputIterator types) + template + _CCCL_DEVICE _CCCL_FORCEINLINE SampleT* NativePointer(CacheModifiedInputIterator<_MODIFIER, _ValueT, _OffsetT> itr) + { + return itr.ptr; + } + + // Return a native pixel pointer (specialized for other types) + template + _CCCL_DEVICE _CCCL_FORCEINLINE SampleT* NativePointer(IteratorT itr) + { + return nullptr; + } + + //--------------------------------------------------------------------- + // Interface + //--------------------------------------------------------------------- + + /** + * @brief Constructor + * + * @param temp_storage + * Reference to temp_storage + * + * @param d_samples + * Input data to reduce + * + * @param num_output_bins + * The number bins per final output histogram + * + * @param num_privatized_bins + * The number bins per privatized histogram + * + * @param d_output_histograms + * Reference to final output histograms + * + * @param d_privatized_histograms + * Reference to privatized histograms + * + * @param output_decode_op + * The transform operator for determining output bin-ids from privatized counter indices, one for each channel + * + * @param privatized_decode_op + * The transform operator for determining privatized counter indices from samples, one for each channel + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentHistogram( + TempStorage& temp_storage, + SampleIteratorT d_samples, + int* num_output_bins, + int* num_privatized_bins, + CounterT** d_output_histograms, + CounterT** d_privatized_histograms, + OutputDecodeOpT* output_decode_op, + PrivatizedDecodeOpT* privatized_decode_op) + : temp_storage(temp_storage.Alias()) + , d_wrapped_samples(d_samples) + , d_native_samples(NativePointer(d_wrapped_samples)) + , num_output_bins(num_output_bins) + , num_privatized_bins(num_privatized_bins) + , d_output_histograms(d_output_histograms) + , output_decode_op(output_decode_op) + , privatized_decode_op(privatized_decode_op) + , prefer_smem((MEM_PREFERENCE == SMEM) ? true : // prefer smem privatized histograms + (MEM_PREFERENCE == GMEM) ? false + : // prefer gmem privatized histograms + blockIdx.x & 1) // prefer blended privatized histograms + { + int blockId = (blockIdx.y * gridDim.x) + blockIdx.x; + + // Initialize the locations of this block's privatized histograms + for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) + { + this->d_privatized_histograms[CHANNEL] = + d_privatized_histograms[CHANNEL] + (blockId * num_privatized_bins[CHANNEL]); + } + } + + /** + * @brief Consume image + * + * @param num_row_pixels + * The number of multi-channel pixels per row in the region of interest + * + * @param num_rows + * The number of rows in the region of interest + * + * @param row_stride_samples + * The number of samples between starts of consecutive rows in the region of interest + * + * @param tiles_per_row + * Number of image tiles per row + * + * @param tile_queue + * Queue descriptor for assigning tiles of work to thread blocks + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTiles( + OffsetT num_row_pixels, OffsetT num_rows, OffsetT row_stride_samples, int tiles_per_row, GridQueue tile_queue) + { + // Check whether all row starting offsets are vec-aligned (in single-channel) or pixel-aligned (in multi-channel) + int vec_mask = AlignBytes::ALIGN_BYTES - 1; + int pixel_mask = AlignBytes::ALIGN_BYTES - 1; + size_t row_bytes = sizeof(SampleT) * row_stride_samples; + + bool vec_aligned_rows = + (NUM_CHANNELS == 1) && (SAMPLES_PER_THREAD % VecSize == 0) && // Single channel + ((size_t(d_native_samples) & vec_mask) == 0) && // ptr is quad-aligned + ((num_rows == 1) || ((row_bytes & vec_mask) == 0)); // number of row-samples is a multiple of the alignment of the + // quad + + bool pixel_aligned_rows = + (NUM_CHANNELS > 1) && // Multi channel + ((size_t(d_native_samples) & pixel_mask) == 0) && // ptr is pixel-aligned + ((row_bytes & pixel_mask) == 0); // number of row-samples is a multiple of the alignment of the pixel + + // Whether rows are aligned and can be vectorized + if ((d_native_samples != nullptr) && (vec_aligned_rows || pixel_aligned_rows)) + { + ConsumeTiles( + num_row_pixels, num_rows, row_stride_samples, tiles_per_row, tile_queue, Int2Type()); + } + else + { + ConsumeTiles( + num_row_pixels, num_rows, row_stride_samples, tiles_per_row, tile_queue, Int2Type()); + } + } + + /** + * Initialize privatized bin counters. Specialized for privatized shared-memory counters + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void InitBinCounters() + { + if (prefer_smem) + { + InitSmemBinCounters(); + } + else + { + InitGmemBinCounters(); + } + } + + /** + * Store privatized histogram to device-accessible memory. Specialized for privatized shared-memory counters + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void StoreOutput() + { + if (prefer_smem) + { + StoreSmemOutput(); + } + else + { + StoreGmemOutput(); + } + } +}; + +} // namespace histogram +} // namespace detail + +template +using AgentHistogram CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::histogram::AgentHistogram< + AgentHistogramPolicyT, + PRIVATIZED_SMEM_BINS, + NUM_CHANNELS, + NUM_ACTIVE_CHANNELS, + SampleIteratorT, + CounterT, + PrivatizedDecodeOpT, + OutputDecodeOpT, + OffsetT, + LEGACY_PTX_ARCH>; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_merge.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_merge.cuh new file mode 100644 index 0000000000000000000000000000000000000000..9ae14c3e42e5daa8a04695a4d4568e5333510dae --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_merge.cuh @@ -0,0 +1,232 @@ +// SPDX-FileCopyrightText: Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +namespace merge +{ +template +struct agent_policy_t +{ + // do not change data member names, policy_wrapper_t depends on it + static constexpr int BLOCK_THREADS = ThreadsPerBlock; + static constexpr int ITEMS_PER_THREAD = ItemsPerThread; + static constexpr int ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD; + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = LoadAlgorithm; + static constexpr CacheLoadModifier LOAD_MODIFIER = LoadCacheModifier; + static constexpr BlockStoreAlgorithm STORE_ALGORITHM = StoreAlgorithm; +}; + +// TODO(bgruber): can we unify this one with AgentMerge in agent_merge_sort.cuh? +template +struct agent_t +{ + using policy = Policy; + + // key and value type are taken from the first input sequence (consistent with old Thrust behavior) + using key_type = typename ::cuda::std::iterator_traits::value_type; + using item_type = typename ::cuda::std::iterator_traits::value_type; + + using keys_load_it1 = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + using keys_load_it2 = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + using items_load_it1 = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + using items_load_it2 = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + + using block_load_keys1 = typename BlockLoadType::type; + using block_load_keys2 = typename BlockLoadType::type; + using block_load_items1 = typename BlockLoadType::type; + using block_load_items2 = typename BlockLoadType::type; + + using block_store_keys = typename BlockStoreType::type; + using block_store_items = typename BlockStoreType::type; + + union temp_storages + { + typename block_load_keys1::TempStorage load_keys1; + typename block_load_keys2::TempStorage load_keys2; + typename block_load_items1::TempStorage load_items1; + typename block_load_items2::TempStorage load_items2; + typename block_store_keys::TempStorage store_keys; + typename block_store_items::TempStorage store_items; + + key_type keys_shared[Policy::ITEMS_PER_TILE + 1]; + item_type items_shared[Policy::ITEMS_PER_TILE + 1]; + }; + + struct TempStorage : Uninitialized + {}; + + static constexpr int items_per_thread = Policy::ITEMS_PER_THREAD; + static constexpr int threads_per_block = Policy::BLOCK_THREADS; + static constexpr Offset items_per_tile = Policy::ITEMS_PER_TILE; + + // Per thread data + temp_storages& storage; + keys_load_it1 keys1_in; + items_load_it1 items1_in; + Offset keys1_count; + keys_load_it2 keys2_in; + items_load_it2 items2_in; + Offset keys2_count; + KeysOutputIt keys_out; + ItemsOutputIt items_out; + CompareOp compare_op; + Offset* merge_partitions; + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void consume_tile(Offset tile_idx, Offset tile_base, int num_remaining) + { + const Offset partition_beg = merge_partitions[tile_idx + 0]; + const Offset partition_end = merge_partitions[tile_idx + 1]; + + const Offset diag0 = items_per_tile * tile_idx; + const Offset diag1 = (::cuda::std::min)(keys1_count + keys2_count, diag0 + items_per_tile); + + // compute bounding box for keys1 & keys2 + const Offset keys1_beg = partition_beg; + const Offset keys1_end = partition_end; + const Offset keys2_beg = diag0 - keys1_beg; + const Offset keys2_end = diag1 - keys1_end; + + // number of keys per tile + const int num_keys1 = static_cast(keys1_end - keys1_beg); + const int num_keys2 = static_cast(keys2_end - keys2_beg); + + key_type keys_loc[items_per_thread]; + merge_sort::gmem_to_reg( + keys_loc, keys1_in + keys1_beg, keys2_in + keys2_beg, num_keys1, num_keys2); + merge_sort::reg_to_shared(&storage.keys_shared[0], keys_loc); + __syncthreads(); + + // use binary search in shared memory to find merge path for each of thread. + // we can use int type here, because the number of items in shared memory is limited + const int diag0_loc = (::cuda::std::min)(num_keys1 + num_keys2, static_cast(items_per_thread * threadIdx.x)); + + const int keys1_beg_loc = + MergePath(&storage.keys_shared[0], &storage.keys_shared[num_keys1], num_keys1, num_keys2, diag0_loc, compare_op); + const int keys1_end_loc = num_keys1; + const int keys2_beg_loc = diag0_loc - keys1_beg_loc; + const int keys2_end_loc = num_keys2; + + const int num_keys1_loc = keys1_end_loc - keys1_beg_loc; + const int num_keys2_loc = keys2_end_loc - keys2_beg_loc; + + // perform serial merge + int indices[items_per_thread]; + cub::SerialMerge( + &storage.keys_shared[0], + keys1_beg_loc, + keys2_beg_loc + num_keys1, + num_keys1_loc, + num_keys2_loc, + keys_loc, + indices, + compare_op); + __syncthreads(); + + // write keys + if (IsFullTile) + { + block_store_keys{storage.store_keys}.Store(keys_out + tile_base, keys_loc); + } + else + { + block_store_keys{storage.store_keys}.Store(keys_out + tile_base, keys_loc, num_remaining); + } + + // if items are provided, merge them + static constexpr bool have_items = !std::is_same::value; +#if _CCCL_CUDACC_BELOW(11, 8) + if (have_items) // nvcc 11.1 cannot handle #pragma unroll inside if constexpr but 11.8 can. + // nvcc versions between may work +#else // ^^^ _CCCL_CUDACC_BELOW(11, 8) ^^^ / vvv _CCCL_CUDACC_AT_LEAST(11, 8) + _CCCL_IF_CONSTEXPR (have_items) +#endif // _CCCL_CUDACC_AT_LEAST(11, 8) + { + item_type items_loc[items_per_thread]; + merge_sort::gmem_to_reg( + items_loc, items1_in + keys1_beg, items2_in + keys2_beg, num_keys1, num_keys2); + __syncthreads(); // block_store_keys above uses shared memory, so make sure all threads are done before we write + // to it + merge_sort::reg_to_shared(&storage.items_shared[0], items_loc); + __syncthreads(); + + // gather items from shared mem +#pragma unroll + for (int i = 0; i < items_per_thread; ++i) + { + items_loc[i] = storage.items_shared[indices[i]]; + } + __syncthreads(); + + // write from reg to gmem + if (IsFullTile) + { + block_store_items{storage.store_items}.Store(items_out + tile_base, items_loc); + } + else + { + block_store_items{storage.store_items}.Store(items_out + tile_base, items_loc, num_remaining); + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + // XXX with 8.5 changing type to Offset (or long long) results in error! + // TODO(bgruber): is the above still true? + const int tile_idx = static_cast(blockIdx.x); + const Offset tile_base = tile_idx * items_per_tile; + // TODO(bgruber): random mixing of int and Offset + const int items_in_tile = + static_cast((::cuda::std::min)(static_cast(items_per_tile), keys1_count + keys2_count - tile_base)); + if (items_in_tile == items_per_tile) + { + consume_tile(tile_idx, tile_base, items_per_tile); // full tile + } + else + { + consume_tile(tile_idx, tile_base, items_in_tile); // partial tile + } + } +}; +} // namespace merge +} // namespace detail +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_merge_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_merge_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..4c74b73baf240f975831aaecf63800e59d679d01 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_merge_sort.cuh @@ -0,0 +1,792 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include + +#include +#include + +CUB_NAMESPACE_BEGIN + +template +struct AgentMergeSortPolicy +{ + static constexpr int BLOCK_THREADS = _BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = _ITEMS_PER_THREAD; + static constexpr int ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD; + + static constexpr cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + static constexpr cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + static constexpr cub::BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; +}; + +namespace detail +{ +namespace merge_sort +{ + +template +struct AgentBlockSort +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + static constexpr bool KEYS_ONLY = std::is_same::value; + + using BlockMergeSortT = BlockMergeSort; + + using KeysLoadIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + using ItemsLoadIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + + using BlockLoadKeys = typename cub::BlockLoadType::type; + using BlockLoadItems = typename cub::BlockLoadType::type; + + using BlockStoreKeysIt = typename cub::BlockStoreType::type; + using BlockStoreItemsIt = typename cub::BlockStoreType::type; + using BlockStoreKeysRaw = typename cub::BlockStoreType::type; + using BlockStoreItemsRaw = typename cub::BlockStoreType::type; + + union _TempStorage + { + typename BlockLoadKeys::TempStorage load_keys; + typename BlockLoadItems::TempStorage load_items; + typename BlockStoreKeysIt::TempStorage store_keys_it; + typename BlockStoreItemsIt::TempStorage store_items_it; + typename BlockStoreKeysRaw::TempStorage store_keys_raw; + typename BlockStoreItemsRaw::TempStorage store_items_raw; + typename BlockMergeSortT::TempStorage block_merge; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + static constexpr int BLOCK_THREADS = Policy::BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = Policy::ITEMS_PER_THREAD; + static constexpr int ITEMS_PER_TILE = Policy::ITEMS_PER_TILE; + + //--------------------------------------------------------------------- + // Per thread data + //--------------------------------------------------------------------- + + bool ping; + _TempStorage& storage; + KeysLoadIt keys_in; + ItemsLoadIt items_in; + OffsetT keys_count; + KeyIteratorT keys_out_it; + ValueIteratorT items_out_it; + KeyT* keys_out_raw; + ValueT* items_out_raw; + CompareOpT compare_op; + + _CCCL_DEVICE _CCCL_FORCEINLINE AgentBlockSort( + bool ping_, + TempStorage& storage_, + KeysLoadIt keys_in_, + ItemsLoadIt items_in_, + OffsetT keys_count_, + KeyIteratorT keys_out_it_, + ValueIteratorT items_out_it_, + KeyT* keys_out_raw_, + ValueT* items_out_raw_, + CompareOpT compare_op_) + : ping(ping_) + , storage(storage_.Alias()) + , keys_in(keys_in_) + , items_in(items_in_) + , keys_count(keys_count_) + , keys_out_it(keys_out_it_) + , items_out_it(items_out_it_) + , keys_out_raw(keys_out_raw_) + , items_out_raw(items_out_raw_) + , compare_op(compare_op_) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE void Process() + { + auto tile_idx = static_cast(blockIdx.x); + auto num_tiles = static_cast(gridDim.x); + auto tile_base = tile_idx * ITEMS_PER_TILE; + int items_in_tile = (::cuda::std::min)(static_cast(keys_count - tile_base), int{ITEMS_PER_TILE}); + + if (tile_idx < num_tiles - 1) + { + consume_tile(tile_base, ITEMS_PER_TILE); + } + else + { + consume_tile(tile_base, items_in_tile); + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void consume_tile(OffsetT tile_base, int num_remaining) + { + ValueT items_local[ITEMS_PER_THREAD]; + + _CCCL_PDL_GRID_DEPENDENCY_SYNC(); + + _CCCL_IF_CONSTEXPR (!KEYS_ONLY) + { + _CCCL_IF_CONSTEXPR (IS_LAST_TILE) + { + BlockLoadItems(storage.load_items) + .Load(items_in + tile_base, items_local, num_remaining, *(items_in + tile_base)); + } + else + { + BlockLoadItems(storage.load_items).Load(items_in + tile_base, items_local); + } + + __syncthreads(); + } + + KeyT keys_local[ITEMS_PER_THREAD]; + _CCCL_IF_CONSTEXPR (IS_LAST_TILE) + { + BlockLoadKeys(storage.load_keys).Load(keys_in + tile_base, keys_local, num_remaining, *(keys_in + tile_base)); + } + else + { + BlockLoadKeys(storage.load_keys).Load(keys_in + tile_base, keys_local); + } + + __syncthreads(); + _CCCL_PDL_TRIGGER_NEXT_LAUNCH(); + + _CCCL_IF_CONSTEXPR (IS_LAST_TILE) + { + BlockMergeSortT(storage.block_merge).Sort(keys_local, items_local, compare_op, num_remaining, keys_local[0]); + } + else + { + BlockMergeSortT(storage.block_merge).Sort(keys_local, items_local, compare_op); + } + + __syncthreads(); + + if (ping) + { + _CCCL_IF_CONSTEXPR (IS_LAST_TILE) + { + BlockStoreKeysIt(storage.store_keys_it).Store(keys_out_it + tile_base, keys_local, num_remaining); + } + else + { + BlockStoreKeysIt(storage.store_keys_it).Store(keys_out_it + tile_base, keys_local); + } + + _CCCL_IF_CONSTEXPR (!KEYS_ONLY) + { + __syncthreads(); + + _CCCL_IF_CONSTEXPR (IS_LAST_TILE) + { + BlockStoreItemsIt(storage.store_items_it).Store(items_out_it + tile_base, items_local, num_remaining); + } + else + { + BlockStoreItemsIt(storage.store_items_it).Store(items_out_it + tile_base, items_local); + } + } + } + else + { + _CCCL_IF_CONSTEXPR (IS_LAST_TILE) + { + BlockStoreKeysRaw(storage.store_keys_raw).Store(keys_out_raw + tile_base, keys_local, num_remaining); + } + else + { + BlockStoreKeysRaw(storage.store_keys_raw).Store(keys_out_raw + tile_base, keys_local); + } + + _CCCL_IF_CONSTEXPR (!KEYS_ONLY) + { + __syncthreads(); + + _CCCL_IF_CONSTEXPR (IS_LAST_TILE) + { + BlockStoreItemsRaw(storage.store_items_raw).Store(items_out_raw + tile_base, items_local, num_remaining); + } + else + { + BlockStoreItemsRaw(storage.store_items_raw).Store(items_out_raw + tile_base, items_local); + } + } + } + } +}; + +/** + * \brief This agent is responsible for partitioning a merge path into equal segments + * + * There are two sorted arrays to be merged into one array. If the first array + * is partitioned between parallel workers by slicing it into ranges of equal + * size, there could be a significant workload imbalance. The imbalance is + * caused by the fact that the distribution of elements from the second array + * is unknown beforehand. Instead, the MergePath is partitioned between workers. + * This approach guarantees an equal amount of work being assigned to each worker. + * + * This approach is outlined in the paper: + * Odeh et al, "Merge Path - Parallel Merging Made Simple" + * doi:10.1109/IPDPSW.2012.202 + */ +template +struct AgentPartition +{ + bool ping; + KeyIteratorT keys_ping; + KeyT* keys_pong; + OffsetT keys_count; + OffsetT partition_idx; + OffsetT* merge_partitions; + CompareOpT compare_op; + OffsetT target_merged_tiles_number; + int items_per_tile; + OffsetT num_partitions; + + _CCCL_DEVICE _CCCL_FORCEINLINE AgentPartition( + bool ping, + KeyIteratorT keys_ping, + KeyT* keys_pong, + OffsetT keys_count, + OffsetT partition_idx, + OffsetT* merge_partitions, + CompareOpT compare_op, + OffsetT target_merged_tiles_number, + int items_per_tile, + OffsetT num_partitions) + : ping(ping) + , keys_ping(keys_ping) + , keys_pong(keys_pong) + , keys_count(keys_count) + , partition_idx(partition_idx) + , merge_partitions(merge_partitions) + , compare_op(compare_op) + , target_merged_tiles_number(target_merged_tiles_number) + , items_per_tile(items_per_tile) + , num_partitions(num_partitions) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE void Process() + { + const OffsetT merged_tiles_number = target_merged_tiles_number / 2; + + // target_merged_tiles_number is a power of two. + const OffsetT mask = target_merged_tiles_number - 1; + + // The first tile number in the tiles group being merged, equal to: + // target_merged_tiles_number * (partition_idx / target_merged_tiles_number) + const OffsetT list = ~mask & partition_idx; + const OffsetT start = items_per_tile * list; + const OffsetT size = items_per_tile * merged_tiles_number; + + // Tile number within the tile group being merged, equal to: + // partition_idx / target_merged_tiles_number + const OffsetT local_tile_idx = mask & partition_idx; + + const OffsetT keys1_beg = (::cuda::std::min)(keys_count, start); + const OffsetT keys1_end = (::cuda::std::min)(keys_count, detail::safe_add_bound_to_max(start, size)); + const OffsetT keys2_beg = keys1_end; + const OffsetT keys2_end = (::cuda::std::min)(keys_count, detail::safe_add_bound_to_max(keys2_beg, size)); + + _CCCL_PDL_GRID_DEPENDENCY_SYNC(); + + // The last partition (which is one-past-the-last-tile) is only to mark the end of keys1_end for the merge stage + if (partition_idx + 1 == num_partitions) + { + merge_partitions[partition_idx] = keys1_end; + } + else + { + const OffsetT partition_at = (::cuda::std::min)(keys2_end - keys1_beg, items_per_tile * local_tile_idx); + + OffsetT partition_diag = + ping + ? MergePath(keys_ping + keys1_beg, + keys_ping + keys2_beg, + keys1_end - keys1_beg, + keys2_end - keys2_beg, + partition_at, + compare_op) + : MergePath(keys_pong + keys1_beg, + keys_pong + keys2_beg, + keys1_end - keys1_beg, + keys2_end - keys2_beg, + partition_at, + compare_op); + + merge_partitions[partition_idx] = keys1_beg + partition_diag; + } + } +}; + +/** + * \brief Concatenates up to ITEMS_PER_THREAD elements from input{1,2} into output array + * + * Reads data in a coalesced fashion [BLOCK_THREADS * item + tid] and + * stores the result in output[item]. + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +gmem_to_reg(T (&output)[ITEMS_PER_THREAD], It1 input1, It2 input2, int count1, int count2) +{ + _CCCL_IF_CONSTEXPR (IS_FULL_TILE) + { +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; ++item) + { + const int idx = BLOCK_THREADS * item + threadIdx.x; + // It1 and It2 could have different value types. Convert after load. + output[item] = (idx < count1) ? static_cast(input1[idx]) : static_cast(input2[idx - count1]); + } + } + else + { +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; ++item) + { + const int idx = BLOCK_THREADS * item + threadIdx.x; + if (idx < count1 + count2) + { + output[item] = (idx < count1) ? static_cast(input1[idx]) : static_cast(input2[idx - count1]); + } + } + } +} + +/// \brief Stores data in a coalesced fashion in[item] -> out[BLOCK_THREADS * item + tid] +template +_CCCL_DEVICE _CCCL_FORCEINLINE void reg_to_shared(It output, T (&input)[ITEMS_PER_THREAD]) +{ +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; ++item) + { + const int idx = BLOCK_THREADS * item + threadIdx.x; + output[idx] = input[item]; + } +} + +/// \brief The agent is responsible for merging N consecutive sorted arrays into N/2 sorted arrays. +template +struct AgentMerge +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + using KeysLoadPingIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + using ItemsLoadPingIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + using KeysLoadPongIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + using ItemsLoadPongIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + + using KeysOutputPongIt = KeyIteratorT; + using ItemsOutputPongIt = ValueIteratorT; + using KeysOutputPingIt = KeyT*; + using ItemsOutputPingIt = ValueT*; + + using BlockStoreKeysPong = typename BlockStoreType::type; + using BlockStoreItemsPong = typename BlockStoreType::type; + using BlockStoreKeysPing = typename BlockStoreType::type; + using BlockStoreItemsPing = typename BlockStoreType::type; + + /// Parameterized BlockReduce primitive + + union _TempStorage + { + typename BlockStoreKeysPing::TempStorage store_keys_ping; + typename BlockStoreItemsPing::TempStorage store_items_ping; + typename BlockStoreKeysPong::TempStorage store_keys_pong; + typename BlockStoreItemsPong::TempStorage store_items_pong; + + KeyT keys_shared[Policy::ITEMS_PER_TILE + 1]; + ValueT items_shared[Policy::ITEMS_PER_TILE + 1]; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + static constexpr bool KEYS_ONLY = std::is_same::value; + static constexpr int BLOCK_THREADS = Policy::BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = Policy::ITEMS_PER_THREAD; + static constexpr int ITEMS_PER_TILE = Policy::ITEMS_PER_TILE; + + //--------------------------------------------------------------------- + // Per thread data + //--------------------------------------------------------------------- + + bool ping; + _TempStorage& storage; + + KeysLoadPingIt keys_in_ping; + ItemsLoadPingIt items_in_ping; + KeysLoadPongIt keys_in_pong; + ItemsLoadPongIt items_in_pong; + + OffsetT keys_count; + + KeysOutputPongIt keys_out_pong; + ItemsOutputPongIt items_out_pong; + KeysOutputPingIt keys_out_ping; + ItemsOutputPingIt items_out_ping; + + CompareOpT compare_op; + OffsetT* merge_partitions; + OffsetT target_merged_tiles_number; + + //--------------------------------------------------------------------- + // Utility functions + //--------------------------------------------------------------------- + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void consume_tile(int tid, OffsetT tile_idx, OffsetT tile_base, int count) + { + const OffsetT partition_beg = merge_partitions[tile_idx + 0]; + const OffsetT partition_end = merge_partitions[tile_idx + 1]; + + // target_merged_tiles_number is a power of two. + const OffsetT merged_tiles_number = target_merged_tiles_number / 2; + + const OffsetT mask = target_merged_tiles_number - 1; + + // The first tile number in the tiles group being merged, equal to: + // target_merged_tiles_number * (tile_idx / target_merged_tiles_number) + const OffsetT list = ~mask & tile_idx; + const OffsetT start = ITEMS_PER_TILE * list; + const OffsetT size = ITEMS_PER_TILE * merged_tiles_number; + + const OffsetT diag = ITEMS_PER_TILE * tile_idx - start; + + const OffsetT keys1_beg = partition_beg - start; + OffsetT keys1_end = partition_end - start; + + const OffsetT keys_end_dist_from_start = keys_count - start; + const OffsetT max_keys2 = (keys_end_dist_from_start > size) ? (keys_end_dist_from_start - size) : 0; + + // We have the following invariants: + // diag >= keys1_beg, because diag is the distance of the total merge path so far (keys1 + keys2) + // diag+ITEMS_PER_TILE >= keys1_end, because diag+ITEMS_PER_TILE is the distance of the merge path for the next tile + // and keys1_end is key1's component of that path + const OffsetT keys2_beg = (::cuda::std::min)(max_keys2, diag - keys1_beg); + OffsetT keys2_end = (::cuda::std::min)( + max_keys2, detail::safe_add_bound_to_max(diag, static_cast(ITEMS_PER_TILE)) - keys1_end); + + // Check if it's the last tile in the tile group being merged + if (mask == (mask & tile_idx)) + { + keys1_end = (::cuda::std::min)(keys_count - start, size); + keys2_end = (::cuda::std::min)(max_keys2, size); + } + + // number of keys per tile + const int num_keys1 = static_cast(keys1_end - keys1_beg); + const int num_keys2 = static_cast(keys2_end - keys2_beg); + + _CCCL_PDL_GRID_DEPENDENCY_SYNC(); + + // load keys1 & keys2 + KeyT keys_local[ITEMS_PER_THREAD]; + if (ping) + { + gmem_to_reg( + keys_local, keys_in_ping + start + keys1_beg, keys_in_ping + start + size + keys2_beg, num_keys1, num_keys2); + } + else + { + gmem_to_reg( + keys_local, keys_in_pong + start + keys1_beg, keys_in_pong + start + size + keys2_beg, num_keys1, num_keys2); + } + reg_to_shared(&storage.keys_shared[0], keys_local); + + // preload items into registers already + // + ValueT items_local[ITEMS_PER_THREAD]; + (void) items_local; // TODO(bgruber): replace by [[maybe_unused]] in C++17 + _CCCL_IF_CONSTEXPR (!KEYS_ONLY) + { + if (ping) + { + gmem_to_reg( + items_local, + items_in_ping + start + keys1_beg, + items_in_ping + start + size + keys2_beg, + num_keys1, + num_keys2); + } + else + { + gmem_to_reg( + items_local, + items_in_pong + start + keys1_beg, + items_in_pong + start + size + keys2_beg, + num_keys1, + num_keys2); + } + } + + __syncthreads(); + _CCCL_PDL_TRIGGER_NEXT_LAUNCH(); + + // use binary search in shared memory + // to find merge path for each of thread + // we can use int type here, because the number of + // items in shared memory is limited + // + const int diag0_local = (::cuda::std::min)(num_keys1 + num_keys2, ITEMS_PER_THREAD * tid); + + const int keys1_beg_local = MergePath( + &storage.keys_shared[0], &storage.keys_shared[num_keys1], num_keys1, num_keys2, diag0_local, compare_op); + const int keys1_end_local = num_keys1; + const int keys2_beg_local = diag0_local - keys1_beg_local; + const int keys2_end_local = num_keys2; + + const int num_keys1_local = keys1_end_local - keys1_beg_local; + const int num_keys2_local = keys2_end_local - keys2_beg_local; + + // perform serial merge + // + int indices[ITEMS_PER_THREAD]; + + SerialMerge( + &storage.keys_shared[0], + keys1_beg_local, + keys2_beg_local + num_keys1, + num_keys1_local, + num_keys2_local, + keys_local, + indices, + compare_op); + + __syncthreads(); + + // write keys + if (ping) + { + _CCCL_IF_CONSTEXPR (IS_FULL_TILE) + { + BlockStoreKeysPing(storage.store_keys_ping).Store(keys_out_ping + tile_base, keys_local); + } + else + { + BlockStoreKeysPing(storage.store_keys_ping).Store(keys_out_ping + tile_base, keys_local, num_keys1 + num_keys2); + } + } + else + { + _CCCL_IF_CONSTEXPR (IS_FULL_TILE) + { + BlockStoreKeysPong(storage.store_keys_pong).Store(keys_out_pong + tile_base, keys_local); + } + else + { + BlockStoreKeysPong(storage.store_keys_pong).Store(keys_out_pong + tile_base, keys_local, num_keys1 + num_keys2); + } + } + + // if items are provided, merge them +#if _CCCL_CUDACC_BELOW(11, 8) + if (!KEYS_ONLY) // nvcc 11.1 cannot handle #pragma unroll inside if constexpr but 11.8 can. + // nvcc versions between may work +#else // ^^^ _CCCL_CUDACC_BELOW(11, 8) ^^^ / vvv _CCCL_CUDACC_AT_LEAST(11, 8) + _CCCL_IF_CONSTEXPR (!KEYS_ONLY) +#endif // _CCCL_CUDACC_AT_LEAST(11, 8) + { + __syncthreads(); + + reg_to_shared(&storage.items_shared[0], items_local); + + __syncthreads(); + + // gather items from shared mem + // +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; ++item) + { + items_local[item] = storage.items_shared[indices[item]]; + } + + __syncthreads(); + + // write from reg to gmem + // + if (ping) + { + _CCCL_IF_CONSTEXPR (IS_FULL_TILE) + { + BlockStoreItemsPing(storage.store_items_ping).Store(items_out_ping + tile_base, items_local); + } + else + { + BlockStoreItemsPing(storage.store_items_ping).Store(items_out_ping + tile_base, items_local, count); + } + } + else + { + _CCCL_IF_CONSTEXPR (IS_FULL_TILE) + { + BlockStoreItemsPong(storage.store_items_pong).Store(items_out_pong + tile_base, items_local); + } + else + { + BlockStoreItemsPong(storage.store_items_pong).Store(items_out_pong + tile_base, items_local, count); + } + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE AgentMerge( + bool ping_, + TempStorage& storage_, + KeysLoadPingIt keys_in_ping_, + ItemsLoadPingIt items_in_ping_, + KeysLoadPongIt keys_in_pong_, + ItemsLoadPongIt items_in_pong_, + OffsetT keys_count_, + KeysOutputPingIt keys_out_ping_, + ItemsOutputPingIt items_out_ping_, + KeysOutputPongIt keys_out_pong_, + ItemsOutputPongIt items_out_pong_, + CompareOpT compare_op_, + OffsetT* merge_partitions_, + OffsetT target_merged_tiles_number_) + : ping(ping_) + , storage(storage_.Alias()) + , keys_in_ping(keys_in_ping_) + , items_in_ping(items_in_ping_) + , keys_in_pong(keys_in_pong_) + , items_in_pong(items_in_pong_) + , keys_count(keys_count_) + , keys_out_pong(keys_out_pong_) + , items_out_pong(items_out_pong_) + , keys_out_ping(keys_out_ping_) + , items_out_ping(items_out_ping_) + , compare_op(compare_op_) + , merge_partitions(merge_partitions_) + , target_merged_tiles_number(target_merged_tiles_number_) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE void Process() + { + const int tile_idx = static_cast(blockIdx.x); + const int num_tiles = static_cast(gridDim.x); + const OffsetT tile_base = OffsetT(tile_idx) * ITEMS_PER_TILE; + const int tid = static_cast(threadIdx.x); + const int items_in_tile = + static_cast((::cuda::std::min)(static_cast(ITEMS_PER_TILE), keys_count - tile_base)); + + if (tile_idx < num_tiles - 1) + { + consume_tile(tid, tile_idx, tile_base, ITEMS_PER_TILE); + } + else + { + consume_tile(tid, tile_idx, tile_base, items_in_tile); + } + } +}; + +} // namespace merge_sort +} // namespace detail + +template +using AgentBlockSort CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::merge_sort::AgentBlockSort< + Policy, + KeyInputIteratorT, + ValueInputIteratorT, + KeyIteratorT, + ValueIteratorT, + OffsetT, + CompareOpT, + KeyT, + ValueT>; + +template +using AgentPartition CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::merge_sort::AgentPartition; + +template +using AgentMerge CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public interface " + "will be removed.") = + detail::merge_sort::AgentMerge; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_downsweep.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_downsweep.cuh new file mode 100644 index 0000000000000000000000000000000000000000..cc6e5c18f11c673b4890ab7df70e73c096150c51 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_downsweep.cuh @@ -0,0 +1,777 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * AgentRadixSortDownsweep implements a stateful abstraction of CUDA thread + * blocks for participating in device-wide radix sort downsweep . + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * @brief Parameterizable tuning policy type for AgentRadixSortDownsweep + * + * @tparam NOMINAL_BLOCK_THREADS_4B + * Threads per thread block + * + * @tparam NOMINAL_ITEMS_PER_THREAD_4B + * Items per thread (per tile of input) + * + * @tparam ComputeT + * Dominant compute type + * + * @tparam _LOAD_ALGORITHM + * The BlockLoad algorithm to use + * + * @tparam _LOAD_MODIFIER + * Cache load modifier for reading keys (and values) + * + * @tparam _RANK_ALGORITHM + * The radix ranking algorithm to use + * + * @tparam _SCAN_ALGORITHM + * The block scan algorithm to use + * + * @tparam _RADIX_BITS + * The number of radix bits, i.e., log2(bins) + */ +template > +struct AgentRadixSortDownsweepPolicy : ScalingType +{ + enum + { + /// The number of radix bits, i.e., log2(bins) + RADIX_BITS = _RADIX_BITS, + }; + + /// The BlockLoad algorithm to use + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + + /// Cache load modifier for reading keys (and values) + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + + /// The radix ranking algorithm to use + static constexpr RadixRankAlgorithm RANK_ALGORITHM = _RANK_ALGORITHM; + + /// The BlockScan algorithm to use + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace radix_sort +{ + +/** + * @brief AgentRadixSortDownsweep implements a stateful abstraction of CUDA thread blocks for participating in + * device-wide radix sort downsweep . + * + * @tparam AgentRadixSortDownsweepPolicy + * Parameterized AgentRadixSortDownsweepPolicy tuning policy type + * + * @tparam IS_DESCENDING + * Whether or not the sorted-order is high-to-low + * + * @tparam KeyT + * KeyT type + * + * @tparam ValueT + * ValueT type + * + * @tparam OffsetT + * Signed integer type for global offsets + */ +template +struct AgentRadixSortDownsweep +{ + //--------------------------------------------------------------------- + // Type definitions and constants + //--------------------------------------------------------------------- + + using traits = radix::traits_t; + using bit_ordered_type = typename traits::bit_ordered_type; + using bit_ordered_conversion = typename traits::bit_ordered_conversion_policy; + + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = AgentRadixSortDownsweepPolicy::LOAD_ALGORITHM; + static constexpr CacheLoadModifier LOAD_MODIFIER = AgentRadixSortDownsweepPolicy::LOAD_MODIFIER; + static constexpr RadixRankAlgorithm RANK_ALGORITHM = AgentRadixSortDownsweepPolicy::RANK_ALGORITHM; + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = AgentRadixSortDownsweepPolicy::SCAN_ALGORITHM; + + enum + { + BLOCK_THREADS = AgentRadixSortDownsweepPolicy::BLOCK_THREADS, + ITEMS_PER_THREAD = AgentRadixSortDownsweepPolicy::ITEMS_PER_THREAD, + RADIX_BITS = AgentRadixSortDownsweepPolicy::RADIX_BITS, + TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, + + RADIX_DIGITS = 1 << RADIX_BITS, + KEYS_ONLY = std::is_same::value, + LOAD_WARP_STRIPED = RANK_ALGORITHM == RADIX_RANK_MATCH || RANK_ALGORITHM == RADIX_RANK_MATCH_EARLY_COUNTS_ANY + || RANK_ALGORITHM == RADIX_RANK_MATCH_EARLY_COUNTS_ATOMIC_OR, + }; + + // Input iterator wrapper type (for applying cache modifier)s + using KeysItr = CacheModifiedInputIterator; + using ValuesItr = CacheModifiedInputIterator; + + // Radix ranking type to use + using BlockRadixRankT = block_radix_rank_t; + + // Digit extractor type + using fundamental_digit_extractor_t = BFEDigitExtractor; + using digit_extractor_t = typename traits::template digit_extractor_t; + + enum + { + /// Number of bin-starting offsets tracked per thread + BINS_TRACKED_PER_THREAD = BlockRadixRankT::BINS_TRACKED_PER_THREAD + }; + + // BlockLoad type (keys) + using BlockLoadKeysT = BlockLoad; + + // BlockLoad type (values) + using BlockLoadValuesT = BlockLoad; + + // Value exchange array type + using ValueExchangeT = ValueT[TILE_ITEMS]; + + /** + * Shared memory storage layout + */ + union __align__(16) _TempStorage + { + typename BlockLoadKeysT::TempStorage load_keys; + typename BlockLoadValuesT::TempStorage load_values; + typename BlockRadixRankT::TempStorage radix_rank; + + struct KeysAndOffsets + { + bit_ordered_type exchange_keys[TILE_ITEMS]; + OffsetT relative_bin_offsets[RADIX_DIGITS]; + } keys_and_offsets; + + Uninitialized exchange_values; + + OffsetT exclusive_digit_prefix[RADIX_DIGITS]; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Thread fields + //--------------------------------------------------------------------- + + // Shared storage for this CTA + _TempStorage& temp_storage; + + // Input and output device pointers + KeysItr d_keys_in; + ValuesItr d_values_in; + bit_ordered_type* d_keys_out; + ValueT* d_values_out; + + // The global scatter base offset for each digit (valid in the first RADIX_DIGITS threads) + OffsetT bin_offset[BINS_TRACKED_PER_THREAD]; + + std::uint32_t current_bit; + std::uint32_t num_bits; + + // Whether to short-circuit + int short_circuit; + + DecomposerT decomposer; + + //--------------------------------------------------------------------- + // Utility methods + //--------------------------------------------------------------------- + + _CCCL_DEVICE _CCCL_FORCEINLINE digit_extractor_t digit_extractor() + { + return traits::template digit_extractor(current_bit, num_bits, decomposer); + } + + /** + * Scatter ranked keys through shared memory, then to device-accessible memory + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterKeys( + bit_ordered_type (&twiddled_keys)[ITEMS_PER_THREAD], + OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], + int (&ranks)[ITEMS_PER_THREAD], + OffsetT valid_items) + { +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + temp_storage.keys_and_offsets.exchange_keys[ranks[ITEM]] = twiddled_keys[ITEM]; + } + + __syncthreads(); + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + bit_ordered_type key = temp_storage.keys_and_offsets.exchange_keys[threadIdx.x + (ITEM * BLOCK_THREADS)]; + std::uint32_t digit = digit_extractor().Digit(key); + relative_bin_offsets[ITEM] = temp_storage.keys_and_offsets.relative_bin_offsets[digit]; + + key = bit_ordered_conversion::from_bit_ordered(decomposer, key); + + if (FULL_TILE || (static_cast(threadIdx.x + (ITEM * BLOCK_THREADS)) < valid_items)) + { + d_keys_out[relative_bin_offsets[ITEM] + threadIdx.x + (ITEM * BLOCK_THREADS)] = key; + } + } + } + + /** + * Scatter ranked values through shared memory, then to device-accessible memory + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterValues( + ValueT (&values)[ITEMS_PER_THREAD], + OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], + int (&ranks)[ITEMS_PER_THREAD], + OffsetT valid_items) + { + __syncthreads(); + + ValueExchangeT& exchange_values = temp_storage.exchange_values.Alias(); + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + exchange_values[ranks[ITEM]] = values[ITEM]; + } + + __syncthreads(); + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + ValueT value = exchange_values[threadIdx.x + (ITEM * BLOCK_THREADS)]; + + if (FULL_TILE || (static_cast(threadIdx.x + (ITEM * BLOCK_THREADS)) < valid_items)) + { + d_values_out[relative_bin_offsets[ITEM] + threadIdx.x + (ITEM * BLOCK_THREADS)] = value; + } + } + } + + /** + * Load a tile of keys (specialized for full tile, block load) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadKeys( + bit_ordered_type (&keys)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + bit_ordered_type oob_item, + Int2Type is_full_tile, + Int2Type warp_striped) + { + BlockLoadKeysT(temp_storage.load_keys).Load(d_keys_in + block_offset, keys); + + __syncthreads(); + } + + /** + * Load a tile of keys (specialized for partial tile, block load) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadKeys( + bit_ordered_type (&keys)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + bit_ordered_type oob_item, + Int2Type is_full_tile, + Int2Type warp_striped) + { + // Register pressure work-around: moving valid_items through shfl prevents compiler + // from reusing guards/addressing from prior guarded loads + valid_items = ShuffleIndex(valid_items, 0, 0xffffffff); + + BlockLoadKeysT(temp_storage.load_keys).Load(d_keys_in + block_offset, keys, valid_items, oob_item); + + __syncthreads(); + } + + /** + * Load a tile of keys (specialized for full tile, warp-striped load) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadKeys( + bit_ordered_type (&keys)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + bit_ordered_type oob_item, + Int2Type is_full_tile, + Int2Type warp_striped) + { + LoadDirectWarpStriped(threadIdx.x, d_keys_in + block_offset, keys); + } + + /** + * Load a tile of keys (specialized for partial tile, warp-striped load) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadKeys( + bit_ordered_type (&keys)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + bit_ordered_type oob_item, + Int2Type is_full_tile, + Int2Type warp_striped) + { + // Register pressure work-around: moving valid_items through shfl prevents compiler + // from reusing guards/addressing from prior guarded loads + valid_items = ShuffleIndex(valid_items, 0, 0xffffffff); + + LoadDirectWarpStriped(threadIdx.x, d_keys_in + block_offset, keys, valid_items, oob_item); + } + + /** + * Load a tile of values (specialized for full tile, block load) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadValues( + ValueT (&values)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + Int2Type is_full_tile, + Int2Type warp_striped) + { + BlockLoadValuesT(temp_storage.load_values).Load(d_values_in + block_offset, values); + + __syncthreads(); + } + + /** + * Load a tile of values (specialized for partial tile, block load) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadValues( + ValueT (&values)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + Int2Type is_full_tile, + Int2Type warp_striped) + { + // Register pressure work-around: moving valid_items through shfl prevents compiler + // from reusing guards/addressing from prior guarded loads + valid_items = ShuffleIndex(valid_items, 0, 0xffffffff); + + BlockLoadValuesT(temp_storage.load_values).Load(d_values_in + block_offset, values, valid_items); + + __syncthreads(); + } + + /** + * Load a tile of items (specialized for full tile, warp-striped load) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadValues( + ValueT (&values)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + Int2Type is_full_tile, + Int2Type warp_striped) + { + LoadDirectWarpStriped(threadIdx.x, d_values_in + block_offset, values); + } + + /** + * Load a tile of items (specialized for partial tile, warp-striped load) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadValues( + ValueT (&values)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + Int2Type is_full_tile, + Int2Type warp_striped) + { + // Register pressure work-around: moving valid_items through shfl prevents compiler + // from reusing guards/addressing from prior guarded loads + valid_items = ShuffleIndex(valid_items, 0, 0xffffffff); + + LoadDirectWarpStriped(threadIdx.x, d_values_in + block_offset, values, valid_items); + } + + /** + * Truck along associated values + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void GatherScatterValues( + OffsetT (&relative_bin_offsets)[ITEMS_PER_THREAD], + int (&ranks)[ITEMS_PER_THREAD], + OffsetT block_offset, + OffsetT valid_items, + Int2Type /*is_keys_only*/) + { + ValueT values[ITEMS_PER_THREAD]; + + __syncthreads(); + + LoadValues(values, block_offset, valid_items, Int2Type(), Int2Type()); + + ScatterValues(values, relative_bin_offsets, ranks, valid_items); + } + + /** + * Truck along associated values (specialized for key-only sorting) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void GatherScatterValues( + OffsetT (& /*relative_bin_offsets*/)[ITEMS_PER_THREAD], + int (& /*ranks*/)[ITEMS_PER_THREAD], + OffsetT /*block_offset*/, + OffsetT /*valid_items*/, + Int2Type /*is_keys_only*/) + {} + + /** + * Process tile + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ProcessTile(OffsetT block_offset, const OffsetT& valid_items = TILE_ITEMS) + { + bit_ordered_type keys[ITEMS_PER_THREAD]; + int ranks[ITEMS_PER_THREAD]; + OffsetT relative_bin_offsets[ITEMS_PER_THREAD]; + + // Assign default (min/max) value to all keys + bit_ordered_type default_key = + IS_DESCENDING ? traits::min_raw_binary_key(decomposer) : traits::max_raw_binary_key(decomposer); + + // Load tile of keys + LoadKeys(keys, block_offset, valid_items, default_key, Int2Type(), Int2Type()); + +#pragma unroll + for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) + { + keys[KEY] = bit_ordered_conversion::to_bit_ordered(decomposer, keys[KEY]); + } + + // Rank the twiddled keys + int exclusive_digit_prefix[BINS_TRACKED_PER_THREAD]; + BlockRadixRankT(temp_storage.radix_rank).RankKeys(keys, ranks, digit_extractor(), exclusive_digit_prefix); + + __syncthreads(); + +// Share exclusive digit prefix +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + // Store exclusive prefix + temp_storage.exclusive_digit_prefix[bin_idx] = exclusive_digit_prefix[track]; + } + } + + __syncthreads(); + + // Get inclusive digit prefix + int inclusive_digit_prefix[BINS_TRACKED_PER_THREAD]; + +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + if (IS_DESCENDING) + { + // Get inclusive digit prefix from exclusive prefix (higher bins come first) + inclusive_digit_prefix[track] = + (bin_idx == 0) ? (BLOCK_THREADS * ITEMS_PER_THREAD) : temp_storage.exclusive_digit_prefix[bin_idx - 1]; + } + else + { + // Get inclusive digit prefix from exclusive prefix (lower bins come first) + inclusive_digit_prefix[track] = + (bin_idx == RADIX_DIGITS - 1) + ? (BLOCK_THREADS * ITEMS_PER_THREAD) + : temp_storage.exclusive_digit_prefix[bin_idx + 1]; + } + } + } + + __syncthreads(); + +// Update global scatter base offsets for each digit +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + bin_offset[track] -= exclusive_digit_prefix[track]; + temp_storage.keys_and_offsets.relative_bin_offsets[bin_idx] = bin_offset[track]; + bin_offset[track] += inclusive_digit_prefix[track]; + } + } + + __syncthreads(); + + // Scatter keys + ScatterKeys(keys, relative_bin_offsets, ranks, valid_items); + + // Gather/scatter values + GatherScatterValues(relative_bin_offsets, ranks, block_offset, valid_items, Int2Type()); + } + + //--------------------------------------------------------------------- + // Copy shortcut + //--------------------------------------------------------------------- + + /** + * Copy tiles within the range of input + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Copy(InputIteratorT d_in, T* d_out, OffsetT block_offset, OffsetT block_end) + { + // Simply copy the input + while (block_end - block_offset >= TILE_ITEMS) + { + T items[ITEMS_PER_THREAD]; + + LoadDirectStriped(threadIdx.x, d_in + block_offset, items); + __syncthreads(); + StoreDirectStriped(threadIdx.x, d_out + block_offset, items); + + block_offset += TILE_ITEMS; + } + + // Clean up last partial tile with guarded-I/O + if (block_offset < block_end) + { + OffsetT valid_items = block_end - block_offset; + + T items[ITEMS_PER_THREAD]; + + LoadDirectStriped(threadIdx.x, d_in + block_offset, items, valid_items); + __syncthreads(); + StoreDirectStriped(threadIdx.x, d_out + block_offset, items, valid_items); + } + } + + /** + * Copy tiles within the range of input (specialized for NullType) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Copy(InputIteratorT /*d_in*/, NullType* /*d_out*/, OffsetT /*block_offset*/, OffsetT /*block_end*/) + {} + + //--------------------------------------------------------------------- + // Interface + //--------------------------------------------------------------------- + + /** + * Constructor + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentRadixSortDownsweep( + TempStorage& temp_storage, + OffsetT (&bin_offset)[BINS_TRACKED_PER_THREAD], + OffsetT num_items, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + int current_bit, + int num_bits, + DecomposerT decomposer = {}) + : temp_storage(temp_storage.Alias()) + , d_keys_in(reinterpret_cast(d_keys_in)) + , d_values_in(d_values_in) + , d_keys_out(reinterpret_cast(d_keys_out)) + , d_values_out(d_values_out) + , current_bit(current_bit) + , num_bits(num_bits) + , short_circuit(1) + , decomposer(decomposer) + { +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + this->bin_offset[track] = bin_offset[track]; + + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + // Short circuit if the histogram has only bin counts of only zeros or problem-size + short_circuit = short_circuit && ((bin_offset[track] == 0) || (bin_offset[track] == num_items)); + } + } + + short_circuit = __syncthreads_and(short_circuit); + } + + /** + * Constructor + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentRadixSortDownsweep( + TempStorage& temp_storage, + OffsetT num_items, + OffsetT* d_spine, + const KeyT* d_keys_in, + KeyT* d_keys_out, + const ValueT* d_values_in, + ValueT* d_values_out, + int current_bit, + int num_bits, + DecomposerT decomposer = {}) + : temp_storage(temp_storage.Alias()) + , d_keys_in(reinterpret_cast(d_keys_in)) + , d_values_in(d_values_in) + , d_keys_out(reinterpret_cast(d_keys_out)) + , d_values_out(d_values_out) + , current_bit(current_bit) + , num_bits(num_bits) + , short_circuit(1) + , decomposer(decomposer) + { +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + + // Load digit bin offsets (each of the first RADIX_DIGITS threads will load an offset for that digit) + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + if (IS_DESCENDING) + { + bin_idx = RADIX_DIGITS - bin_idx - 1; + } + + // Short circuit if the first block's histogram has only bin counts of only zeros or problem-size + OffsetT first_block_bin_offset = d_spine[gridDim.x * bin_idx]; + short_circuit = short_circuit && ((first_block_bin_offset == 0) || (first_block_bin_offset == num_items)); + + // Load my block's bin offset for my bin + bin_offset[track] = d_spine[(gridDim.x * bin_idx) + blockIdx.x]; + } + } + + short_circuit = __syncthreads_and(short_circuit); + } + + /** + * Distribute keys from a segment of input tiles. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ProcessRegion(OffsetT block_offset, OffsetT block_end) + { + if (short_circuit) + { + // Copy keys + Copy(d_keys_in, d_keys_out, block_offset, block_end); + + // Copy values + Copy(d_values_in, d_values_out, block_offset, block_end); + } + else + { +// Process full tiles of tile_items +#pragma unroll 1 + while (block_end - block_offset >= TILE_ITEMS) + { + ProcessTile(block_offset); + block_offset += TILE_ITEMS; + + __syncthreads(); + } + + // Clean up last partial tile with guarded-I/O + if (block_offset < block_end) + { + ProcessTile(block_offset, block_end - block_offset); + } + } + } +}; + +} // namespace radix_sort +} // namespace detail + +template +using AgentRadixSortDownsweep CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public " + "interface will be removed.") = detail::radix_sort:: + AgentRadixSortDownsweep; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_histogram.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_histogram.cuh new file mode 100644 index 0000000000000000000000000000000000000000..29580897764edd11869ee70aaadf943148576213 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_histogram.cuh @@ -0,0 +1,296 @@ +/****************************************************************************** + * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * agent_radix_sort_histogram.cuh implements a stateful abstraction of CUDA + * thread blocks for participating in the device histogram kernel used for + * one-sweep radix sorting. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +template +struct AgentRadixSortHistogramPolicy +{ + enum + { + BLOCK_THREADS = _BLOCK_THREADS, + ITEMS_PER_THREAD = _ITEMS_PER_THREAD, + /** NUM_PARTS is the number of private histograms (parts) each histogram is split + * into. Each warp lane is assigned to a specific part based on the lane + * ID. However, lanes with the same ID in different warp use the same private + * histogram. This arrangement helps reduce the degree of conflicts in atomic + * operations. */ + NUM_PARTS = CUB_MAX(1, NOMINAL_4B_NUM_PARTS * 4 / CUB_MAX(sizeof(ComputeT), 4)), + RADIX_BITS = _RADIX_BITS, + }; +}; + +template +struct AgentRadixSortExclusiveSumPolicy +{ + enum + { + BLOCK_THREADS = _BLOCK_THREADS, + RADIX_BITS = _RADIX_BITS, + }; +}; + +namespace detail +{ +namespace radix_sort +{ + +template +struct AgentRadixSortHistogram +{ + // constants + enum + { + ITEMS_PER_THREAD = AgentRadixSortHistogramPolicy::ITEMS_PER_THREAD, + BLOCK_THREADS = AgentRadixSortHistogramPolicy::BLOCK_THREADS, + TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, + RADIX_BITS = AgentRadixSortHistogramPolicy::RADIX_BITS, + RADIX_DIGITS = 1 << RADIX_BITS, + MAX_NUM_PASSES = (sizeof(KeyT) * 8 + RADIX_BITS - 1) / RADIX_BITS, + NUM_PARTS = AgentRadixSortHistogramPolicy::NUM_PARTS, + }; + + using traits = radix::traits_t; + using bit_ordered_type = typename traits::bit_ordered_type; + using bit_ordered_conversion = typename traits::bit_ordered_conversion_policy; + + using Twiddle = RadixSortTwiddle; + using ShmemCounterT = std::uint32_t; + using ShmemAtomicCounterT = ShmemCounterT; + + using fundamental_digit_extractor_t = ShiftDigitExtractor; + using digit_extractor_t = typename traits::template digit_extractor_t; + + struct _TempStorage + { + ShmemAtomicCounterT bins[MAX_NUM_PASSES][RADIX_DIGITS][NUM_PARTS]; + }; + + struct TempStorage : Uninitialized<_TempStorage> + {}; + + // thread fields + // shared memory storage + _TempStorage& s; + + // bins for the histogram + OffsetT* d_bins_out; + + // data to compute the histogram + const bit_ordered_type* d_keys_in; + + // number of data items + OffsetT num_items; + + // begin and end bits for sorting + int begin_bit, end_bit; + + // number of sorting passes + int num_passes; + + DecomposerT decomposer; + + _CCCL_DEVICE _CCCL_FORCEINLINE AgentRadixSortHistogram( + TempStorage& temp_storage, + OffsetT* d_bins_out, + const KeyT* d_keys_in, + OffsetT num_items, + int begin_bit, + int end_bit, + DecomposerT decomposer = {}) + : s(temp_storage.Alias()) + , d_bins_out(d_bins_out) + , d_keys_in(reinterpret_cast(d_keys_in)) + , num_items(num_items) + , begin_bit(begin_bit) + , end_bit(end_bit) + , num_passes((end_bit - begin_bit + RADIX_BITS - 1) / RADIX_BITS) + , decomposer(decomposer) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE void Init() + { +// Initialize bins to 0. +#pragma unroll + for (int bin = threadIdx.x; bin < RADIX_DIGITS; bin += BLOCK_THREADS) + { +#pragma unroll + for (int pass = 0; pass < num_passes; ++pass) + { +#pragma unroll + for (int part = 0; part < NUM_PARTS; ++part) + { + s.bins[pass][bin][part] = 0; + } + } + } + __syncthreads(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadTileKeys(OffsetT tile_offset, bit_ordered_type (&keys)[ITEMS_PER_THREAD]) + { + // tile_offset < num_items always, hence the line below works + bool full_tile = num_items - tile_offset >= TILE_ITEMS; + if (full_tile) + { + LoadDirectStriped(threadIdx.x, d_keys_in + tile_offset, keys); + } + else + { + LoadDirectStriped( + threadIdx.x, d_keys_in + tile_offset, keys, num_items - tile_offset, Twiddle::DefaultKey(decomposer)); + } + +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + keys[u] = Twiddle::In(keys[u], decomposer); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void + AccumulateSharedHistograms(OffsetT tile_offset, bit_ordered_type (&keys)[ITEMS_PER_THREAD]) + { + int part = ::cuda::ptx::get_sreg_laneid() % NUM_PARTS; +#pragma unroll + for (int current_bit = begin_bit, pass = 0; current_bit < end_bit; current_bit += RADIX_BITS, ++pass) + { + int num_bits = CUB_MIN(RADIX_BITS, end_bit - current_bit); +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + std::uint32_t bin = digit_extractor(current_bit, num_bits).Digit(keys[u]); + // Using cuda::atomic<> results in lower performance on GP100, + // so atomicAdd() is used instead. + atomicAdd(&s.bins[pass][bin][part], 1); + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void AccumulateGlobalHistograms() + { +#pragma unroll + for (int bin = threadIdx.x; bin < RADIX_DIGITS; bin += BLOCK_THREADS) + { +#pragma unroll + for (int pass = 0; pass < num_passes; ++pass) + { + OffsetT count = cub::ThreadReduce(s.bins[pass][bin], ::cuda::std::plus<>{}); + if (count > 0) + { + // Using cuda::atomic<> here would also require using it in + // other kernels. However, other kernels of onesweep sorting + // (ExclusiveSum, Onesweep) don't need atomic + // access. Therefore, atomicAdd() is used, until + // cuda::atomic_ref<> becomes available. + atomicAdd(&d_bins_out[pass * RADIX_DIGITS + bin], count); + } + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void Process() + { + // Within a portion, avoid overflowing (u)int32 counters. + // Between portions, accumulate results in global memory. + constexpr OffsetT MAX_PORTION_SIZE = 1 << 30; + OffsetT num_portions = ::cuda::ceil_div(num_items, MAX_PORTION_SIZE); + for (OffsetT portion = 0; portion < num_portions; ++portion) + { + // Reset the counters. + Init(); + __syncthreads(); + + // Process the tiles. + OffsetT portion_offset = portion * MAX_PORTION_SIZE; + OffsetT portion_size = CUB_MIN(MAX_PORTION_SIZE, num_items - portion_offset); + for (OffsetT offset = blockIdx.x * TILE_ITEMS; offset < portion_size; offset += TILE_ITEMS * gridDim.x) + { + OffsetT tile_offset = portion_offset + offset; + bit_ordered_type keys[ITEMS_PER_THREAD]; + LoadTileKeys(tile_offset, keys); + AccumulateSharedHistograms(tile_offset, keys); + } + __syncthreads(); + + // Accumulate the result in global memory. + AccumulateGlobalHistograms(); + __syncthreads(); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE digit_extractor_t digit_extractor(int current_bit, int num_bits) + { + return traits::template digit_extractor(current_bit, num_bits, decomposer); + } +}; + +} // namespace radix_sort +} // namespace detail + +template +using AgentRadixSortHistogram CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::radix_sort::AgentRadixSortHistogram; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_onesweep.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_onesweep.cuh new file mode 100644 index 0000000000000000000000000000000000000000..331012d36b9da68a88a8586595a8fa2e7f76abb7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_onesweep.cuh @@ -0,0 +1,706 @@ +/****************************************************************************** + * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * agent_radix_sort_onesweep.cuh implements a stateful abstraction of CUDA + * thread blocks for participating in the device one-sweep radix sort kernel. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include +#include + +CUB_NAMESPACE_BEGIN + +/** \brief cub::RadixSortStoreAlgorithm enumerates different algorithms to write + * partitioned elements (keys, values) stored in shared memory into global + * memory. Currently applies only to writing 4B keys in full tiles; in all other cases, + * RADIX_SORT_STORE_DIRECT is used. + */ +enum RadixSortStoreAlgorithm +{ + /** \brief Elements are statically distributed among block threads, which write them + * into the appropriate partition in global memory. This results in fewer instructions + * and more writes in flight at a given moment, but may generate more transactions. */ + RADIX_SORT_STORE_DIRECT, + /** \brief Elements are distributed among warps in a block distribution. Each warp + * goes through its elements and tries to write them while minimizing the number of + * memory transactions. This results in fewer memory transactions, but more + * instructions and less writes in flight at a given moment. */ + RADIX_SORT_STORE_ALIGNED +}; + +template > +struct AgentRadixSortOnesweepPolicy : ScalingType +{ + enum + { + RANK_NUM_PARTS = _RANK_NUM_PARTS, + RADIX_BITS = _RADIX_BITS, + }; + static constexpr RadixRankAlgorithm RANK_ALGORITHM = _RANK_ALGORITHM; + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; + static constexpr RadixSortStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; +}; + +namespace detail +{ +namespace radix_sort +{ + +template +struct AgentRadixSortOnesweep +{ + // constants + enum + { + ITEMS_PER_THREAD = AgentRadixSortOnesweepPolicy::ITEMS_PER_THREAD, + KEYS_ONLY = std::is_same::value, + BLOCK_THREADS = AgentRadixSortOnesweepPolicy::BLOCK_THREADS, + RANK_NUM_PARTS = AgentRadixSortOnesweepPolicy::RANK_NUM_PARTS, + TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, + RADIX_BITS = AgentRadixSortOnesweepPolicy::RADIX_BITS, + RADIX_DIGITS = 1 << RADIX_BITS, + BINS_PER_THREAD = (RADIX_DIGITS + BLOCK_THREADS - 1) / BLOCK_THREADS, + FULL_BINS = BINS_PER_THREAD * BLOCK_THREADS == RADIX_DIGITS, + WARP_THREADS = CUB_PTX_WARP_THREADS, + BLOCK_WARPS = BLOCK_THREADS / WARP_THREADS, + WARP_MASK = ~0, + LOOKBACK_PARTIAL_MASK = 1 << (PortionOffsetT(sizeof(PortionOffsetT)) * 8 - 2), + LOOKBACK_GLOBAL_MASK = 1 << (PortionOffsetT(sizeof(PortionOffsetT)) * 8 - 1), + LOOKBACK_KIND_MASK = LOOKBACK_PARTIAL_MASK | LOOKBACK_GLOBAL_MASK, + LOOKBACK_VALUE_MASK = ~LOOKBACK_KIND_MASK, + }; + + using traits = radix::traits_t; + using bit_ordered_type = typename traits::bit_ordered_type; + using bit_ordered_conversion = typename traits::bit_ordered_conversion_policy; + + using fundamental_digit_extractor_t = ShiftDigitExtractor; + using digit_extractor_t = typename traits::template digit_extractor_t; + + using AtomicOffsetT = PortionOffsetT; + + static constexpr RadixRankAlgorithm RANK_ALGORITHM = AgentRadixSortOnesweepPolicy::RANK_ALGORITHM; + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = AgentRadixSortOnesweepPolicy::SCAN_ALGORITHM; + static constexpr RadixSortStoreAlgorithm STORE_ALGORITHM = + sizeof(bit_ordered_type) == sizeof(uint32_t) + ? AgentRadixSortOnesweepPolicy::STORE_ALGORITHM + : RADIX_SORT_STORE_DIRECT; + + using Twiddle = RadixSortTwiddle; + + static_assert(RANK_ALGORITHM == RADIX_RANK_MATCH || RANK_ALGORITHM == RADIX_RANK_MATCH_EARLY_COUNTS_ANY + || RANK_ALGORITHM == RADIX_RANK_MATCH_EARLY_COUNTS_ATOMIC_OR, + "for onesweep agent, the ranking algorithm must warp-strided key arrangement"); + + using BlockRadixRankT = ::cuda::std::_If< + RANK_ALGORITHM == RADIX_RANK_MATCH_EARLY_COUNTS_ATOMIC_OR, + BlockRadixRankMatchEarlyCounts, + ::cuda::std::_If< + RANK_ALGORITHM == RADIX_RANK_MATCH, + BlockRadixRankMatch, + BlockRadixRankMatchEarlyCounts>>; + + // temporary storage + struct TempStorage_ + { + union + { + bit_ordered_type keys_out[TILE_ITEMS]; + ValueT values_out[TILE_ITEMS]; + typename BlockRadixRankT::TempStorage rank_temp_storage; + }; + union + { + OffsetT global_offsets[RADIX_DIGITS]; + PortionOffsetT block_idx; + }; + }; + + using TempStorage = Uninitialized; + + // thread variables + TempStorage_& s; + + // kernel parameters + AtomicOffsetT* d_lookback; + AtomicOffsetT* d_ctrs; + OffsetT* d_bins_out; + const OffsetT* d_bins_in; + bit_ordered_type* d_keys_out; + const bit_ordered_type* d_keys_in; + ValueT* d_values_out; + const ValueT* d_values_in; + PortionOffsetT num_items; + int current_bit; + int num_bits; + + // other thread variables + int warp; + int lane; + DecomposerT decomposer; + PortionOffsetT block_idx; + bool full_block; + + _CCCL_DEVICE _CCCL_FORCEINLINE digit_extractor_t digit_extractor() + { + return traits::template digit_extractor(current_bit, num_bits, decomposer); + } + + // helper methods + _CCCL_DEVICE _CCCL_FORCEINLINE std::uint32_t Digit(bit_ordered_type key) + { + return digit_extractor().Digit(key); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE int ThreadBin(int u) + { + return threadIdx.x * BINS_PER_THREAD + u; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void LookbackPartial(int (&bins)[BINS_PER_THREAD]) + { +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + int bin = ThreadBin(u); + if (FULL_BINS || bin < RADIX_DIGITS) + { + // write the local sum into the bin + AtomicOffsetT& loc = d_lookback[block_idx * RADIX_DIGITS + bin]; + PortionOffsetT value = bins[u] | LOOKBACK_PARTIAL_MASK; + ThreadStore(&loc, value); + } + } + } + + struct CountsCallback + { + using AgentT = + AgentRadixSortOnesweep; + AgentT& agent; + int (&bins)[BINS_PER_THREAD]; + bit_ordered_type (&keys)[ITEMS_PER_THREAD]; + static constexpr bool EMPTY = false; + _CCCL_DEVICE _CCCL_FORCEINLINE + CountsCallback(AgentT& agent, int (&bins)[BINS_PER_THREAD], bit_ordered_type (&keys)[ITEMS_PER_THREAD]) + : agent(agent) + , bins(bins) + , keys(keys) + {} + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()(int (&other_bins)[BINS_PER_THREAD]) + { +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + bins[u] = other_bins[u]; + } + agent.LookbackPartial(bins); + + agent.TryShortCircuit(keys, bins); + } + }; + + _CCCL_DEVICE _CCCL_FORCEINLINE void LookbackGlobal(int (&bins)[BINS_PER_THREAD]) + { +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + int bin = ThreadBin(u); + if (FULL_BINS || bin < RADIX_DIGITS) + { + PortionOffsetT inc_sum = bins[u]; + int want_mask = ~0; + // backtrack as long as necessary + for (PortionOffsetT block_jdx = block_idx - 1; block_jdx >= 0; --block_jdx) + { + // wait for some value to appear + PortionOffsetT value_j = 0; + AtomicOffsetT& loc_j = d_lookback[block_jdx * RADIX_DIGITS + bin]; + do + { + __threadfence_block(); // prevent hoisting loads from loop + value_j = ThreadLoad(&loc_j); + } while (value_j == 0); + + inc_sum += value_j & LOOKBACK_VALUE_MASK; + want_mask = __ballot_sync(want_mask, (value_j & LOOKBACK_GLOBAL_MASK) == 0); + if (value_j & LOOKBACK_GLOBAL_MASK) + { + break; + } + } + AtomicOffsetT& loc_i = d_lookback[block_idx * RADIX_DIGITS + bin]; + PortionOffsetT value_i = inc_sum | LOOKBACK_GLOBAL_MASK; + ThreadStore(&loc_i, value_i); + s.global_offsets[bin] += inc_sum - bins[u]; + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadKeys(OffsetT tile_offset, bit_ordered_type (&keys)[ITEMS_PER_THREAD]) + { + if (full_block) + { + LoadDirectWarpStriped(threadIdx.x, d_keys_in + tile_offset, keys); + } + else + { + LoadDirectWarpStriped( + threadIdx.x, d_keys_in + tile_offset, keys, num_items - tile_offset, Twiddle::DefaultKey(decomposer)); + } + +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + keys[u] = Twiddle::In(keys[u], decomposer); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadValues(OffsetT tile_offset, ValueT (&values)[ITEMS_PER_THREAD]) + { + if (full_block) + { + LoadDirectWarpStriped(threadIdx.x, d_values_in + tile_offset, values); + } + else + { + int tile_items = num_items - tile_offset; + LoadDirectWarpStriped(threadIdx.x, d_values_in + tile_offset, values, tile_items); + } + } + + /** Checks whether "short-circuiting" is possible. Short-circuiting happens + * if all TILE_ITEMS keys fall into the same bin, i.e. have the same digit + * value (note that it only happens for full tiles). If short-circuiting is + * performed, the part of the ranking algorithm after the CountsCallback, as + * well as the rest of the sorting (e.g. scattering keys and values to + * shared and global memory) are skipped; updates related to decoupled + * look-back are still performed. Instead, the keys assigned to the current + * thread block are written cooperatively into a contiguous location in + * d_keys_out corresponding to their digit. The values (if also sorting + * values) assigned to the current thread block are similarly copied from + * d_values_in to d_values_out. */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + TryShortCircuit(bit_ordered_type (&keys)[ITEMS_PER_THREAD], int (&bins)[BINS_PER_THREAD]) + { + // check if any bin can be short-circuited + bool short_circuit = false; +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + if (FULL_BINS || ThreadBin(u) < RADIX_DIGITS) + { + short_circuit = short_circuit || bins[u] == TILE_ITEMS; + } + } + short_circuit = __syncthreads_or(short_circuit); + if (!short_circuit) + { + return; + } + + ShortCircuitCopy(keys, bins); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void + ShortCircuitCopy(bit_ordered_type (&keys)[ITEMS_PER_THREAD], int (&bins)[BINS_PER_THREAD]) + { + // short-circuit handling; note that global look-back is still required + + // compute offsets + std::uint32_t common_bin = Digit(keys[0]); + int offsets[BINS_PER_THREAD]; +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + int bin = ThreadBin(u); + offsets[u] = bin > common_bin ? TILE_ITEMS : 0; + } + + // global lookback + LoadBinsToOffsetsGlobal(offsets); + LookbackGlobal(bins); + UpdateBinsGlobal(bins, offsets); + __syncthreads(); + + // scatter the keys + OffsetT global_offset = s.global_offsets[common_bin]; +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + keys[u] = Twiddle::Out(keys[u], decomposer); + } + if (full_block) + { + StoreDirectWarpStriped(threadIdx.x, d_keys_out + global_offset, keys); + } + else + { + int tile_items = num_items - block_idx * TILE_ITEMS; + StoreDirectWarpStriped(threadIdx.x, d_keys_out + global_offset, keys, tile_items); + } + + if (!KEYS_ONLY) + { + // gather and scatter the values + ValueT values[ITEMS_PER_THREAD]; + LoadValues(block_idx * TILE_ITEMS, values); + if (full_block) + { + StoreDirectWarpStriped(threadIdx.x, d_values_out + global_offset, values); + } + else + { + int tile_items = num_items - block_idx * TILE_ITEMS; + StoreDirectWarpStriped(threadIdx.x, d_values_out + global_offset, values, tile_items); + } + } + + // exit early + ThreadExit(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void + ScatterKeysShared(bit_ordered_type (&keys)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD]) + { +// write to shared memory +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + s.keys_out[ranks[u]] = keys[u]; + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void + ScatterValuesShared(ValueT (&values)[ITEMS_PER_THREAD], int (&ranks)[ITEMS_PER_THREAD]) + { +// write to shared memory +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + s.values_out[ranks[u]] = values[u]; + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void LoadBinsToOffsetsGlobal(int (&offsets)[BINS_PER_THREAD]) + { +// global offset - global part +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + int bin = ThreadBin(u); + if (FULL_BINS || bin < RADIX_DIGITS) + { + s.global_offsets[bin] = d_bins_in[bin] - offsets[u]; + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void UpdateBinsGlobal(int (&bins)[BINS_PER_THREAD], int (&offsets)[BINS_PER_THREAD]) + { + bool last_block = (block_idx + 1) * TILE_ITEMS >= num_items; + if (d_bins_out != nullptr && last_block) + { +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + int bin = ThreadBin(u); + if (FULL_BINS || bin < RADIX_DIGITS) + { + d_bins_out[bin] = s.global_offsets[bin] + offsets[u] + bins[u]; + } + } + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterKeysGlobalDirect() + { + int tile_items = FULL_TILE ? TILE_ITEMS : num_items - block_idx * TILE_ITEMS; +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + int idx = threadIdx.x + u * BLOCK_THREADS; + bit_ordered_type key = s.keys_out[idx]; + OffsetT global_idx = idx + s.global_offsets[Digit(key)]; + if (FULL_TILE || idx < tile_items) + { + d_keys_out[global_idx] = Twiddle::Out(key, decomposer); + } + __syncwarp(WARP_MASK); + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterValuesGlobalDirect(int (&digits)[ITEMS_PER_THREAD]) + { + int tile_items = FULL_TILE ? TILE_ITEMS : num_items - block_idx * TILE_ITEMS; +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + int idx = threadIdx.x + u * BLOCK_THREADS; + ValueT value = s.values_out[idx]; + OffsetT global_idx = idx + s.global_offsets[digits[u]]; + if (FULL_TILE || idx < tile_items) + { + d_values_out[global_idx] = value; + } + __syncwarp(WARP_MASK); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterKeysGlobalAligned() + { + // this only works with full tiles + constexpr int ITEMS_PER_WARP = TILE_ITEMS / BLOCK_WARPS; + constexpr int ALIGN = 8; + constexpr auto CACHE_MODIFIER = STORE_CG; + + int warp_start = warp * ITEMS_PER_WARP; + int warp_end = (warp + 1) * ITEMS_PER_WARP; + int warp_offset = warp_start; + while (warp_offset < warp_end - WARP_THREADS) + { + int idx = warp_offset + lane; + bit_ordered_type key = s.keys_out[idx]; + bit_ordered_type key_out = Twiddle::Out(key, decomposer); + OffsetT global_idx = idx + s.global_offsets[Digit(key)]; + int last_lane = WARP_THREADS - 1; + int num_writes = WARP_THREADS; + if (lane == last_lane) + { + num_writes -= int(global_idx + 1) % ALIGN; + } + num_writes = __shfl_sync(WARP_MASK, num_writes, last_lane); + if (lane < num_writes) + { + ThreadStore(&d_keys_out[global_idx], key_out); + } + warp_offset += num_writes; + } + { + int num_writes = warp_end - warp_offset; + if (lane < num_writes) + { + int idx = warp_offset + lane; + bit_ordered_type key = s.keys_out[idx]; + OffsetT global_idx = idx + s.global_offsets[Digit(key)]; + ThreadStore(&d_keys_out[global_idx], Twiddle::Out(key, decomposer)); + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterKeysGlobal() + { + // write block data to global memory + if (full_block) + { + if (STORE_ALGORITHM == RADIX_SORT_STORE_ALIGNED) + { + ScatterKeysGlobalAligned(); + } + else + { + ScatterKeysGlobalDirect(); + } + } + else + { + ScatterKeysGlobalDirect(); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterValuesGlobal(int (&digits)[ITEMS_PER_THREAD]) + { + // write block data to global memory + if (full_block) + { + ScatterValuesGlobalDirect(digits); + } + else + { + ScatterValuesGlobalDirect(digits); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ComputeKeyDigits(int (&digits)[ITEMS_PER_THREAD]) + { +#pragma unroll + for (int u = 0; u < ITEMS_PER_THREAD; ++u) + { + int idx = threadIdx.x + u * BLOCK_THREADS; + digits[u] = Digit(s.keys_out[idx]); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void GatherScatterValues(int (&ranks)[ITEMS_PER_THREAD], Int2Type keys_only) + { + // compute digits corresponding to the keys + int digits[ITEMS_PER_THREAD]; + ComputeKeyDigits(digits); + + // load values + ValueT values[ITEMS_PER_THREAD]; + LoadValues(block_idx * TILE_ITEMS, values); + + // scatter values + __syncthreads(); + ScatterValuesShared(values, ranks); + + __syncthreads(); + ScatterValuesGlobal(digits); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void GatherScatterValues(int (&ranks)[ITEMS_PER_THREAD], Int2Type keys_only) {} + + _CCCL_DEVICE _CCCL_FORCEINLINE void Process() + { + // load keys + // if warp1 < warp2, all elements of warp1 occur before those of warp2 + // in the source array + bit_ordered_type keys[ITEMS_PER_THREAD]; + LoadKeys(block_idx * TILE_ITEMS, keys); + + // rank keys + int ranks[ITEMS_PER_THREAD]; + int exclusive_digit_prefix[BINS_PER_THREAD]; + int bins[BINS_PER_THREAD]; + BlockRadixRankT(s.rank_temp_storage) + .RankKeys(keys, ranks, digit_extractor(), exclusive_digit_prefix, CountsCallback(*this, bins, keys)); + + // scatter keys in shared memory + __syncthreads(); + ScatterKeysShared(keys, ranks); + + // compute global offsets + LoadBinsToOffsetsGlobal(exclusive_digit_prefix); + LookbackGlobal(bins); + UpdateBinsGlobal(bins, exclusive_digit_prefix); + + // scatter keys in global memory + __syncthreads(); + ScatterKeysGlobal(); + + // scatter values if necessary + GatherScatterValues(ranks, Int2Type()); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE // + AgentRadixSortOnesweep( + TempStorage& temp_storage, + AtomicOffsetT* d_lookback, + AtomicOffsetT* d_ctrs, + OffsetT* d_bins_out, + const OffsetT* d_bins_in, + KeyT* d_keys_out, + const KeyT* d_keys_in, + ValueT* d_values_out, + const ValueT* d_values_in, + PortionOffsetT num_items, + int current_bit, + int num_bits, + DecomposerT decomposer = {}) + : s(temp_storage.Alias()) + , d_lookback(d_lookback) + , d_ctrs(d_ctrs) + , d_bins_out(d_bins_out) + , d_bins_in(d_bins_in) + , d_keys_out(reinterpret_cast(d_keys_out)) + , d_keys_in(reinterpret_cast(d_keys_in)) + , d_values_out(d_values_out) + , d_values_in(d_values_in) + , num_items(num_items) + , current_bit(current_bit) + , num_bits(num_bits) + , warp(threadIdx.x / WARP_THREADS) + , lane(::cuda::ptx::get_sreg_laneid()) + , decomposer(decomposer) + { + // initialization + if (threadIdx.x == 0) + { + s.block_idx = atomicAdd(d_ctrs, 1); + } + __syncthreads(); + block_idx = s.block_idx; + full_block = (block_idx + 1) * TILE_ITEMS <= num_items; + } +}; + +} // namespace radix_sort +} // namespace detail + +template +using AgentRadixSortOnesweep CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = detail::radix_sort:: + AgentRadixSortOnesweep; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_upsweep.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_upsweep.cuh new file mode 100644 index 0000000000000000000000000000000000000000..cc0c10464f32ccea488e8f1652a18f266d8fd63c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_radix_sort_upsweep.cuh @@ -0,0 +1,563 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * AgentRadixSortUpsweep implements a stateful abstraction of CUDA thread blocks for participating in device-wide radix + * sort upsweep . + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * @brief Parameterizable tuning policy type for AgentRadixSortUpsweep + * + * @tparam NOMINAL_BLOCK_THREADS_4B + * Threads per thread block + * + * @tparam NOMINAL_ITEMS_PER_THREAD_4B + * Items per thread (per tile of input) + * + * @tparam ComputeT + * Dominant compute type + * + * @tparam _LOAD_MODIFIER + * Cache load modifier for reading keys + * + * @tparam _RADIX_BITS + * The number of radix bits, i.e., log2(bins) + */ +template > +struct AgentRadixSortUpsweepPolicy : ScalingType +{ + enum + { + /// The number of radix bits, i.e., log2(bins) + RADIX_BITS = _RADIX_BITS, + }; + + /// Cache load modifier for reading keys + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace radix_sort +{ + +/** + * @brief AgentRadixSortUpsweep implements a stateful abstraction of CUDA thread blocks for + * participating in device-wide radix sort upsweep . + * + * @tparam AgentRadixSortUpsweepPolicy + * Parameterized AgentRadixSortUpsweepPolicy tuning policy type + * + * @tparam KeyT + * KeyT type + * + * @tparam DecomposerT = identity_decomposer_t + * Signed integer type for global offsets + */ +template +struct AgentRadixSortUpsweep +{ + //--------------------------------------------------------------------- + // Type definitions and constants + //--------------------------------------------------------------------- + using traits = radix::traits_t; + using bit_ordered_type = typename traits::bit_ordered_type; + using bit_ordered_conversion = typename traits::bit_ordered_conversion_policy; + + // Integer type for digit counters (to be packed into words of PackedCounters) + using DigitCounter = unsigned char; + + // Integer type for packing DigitCounters into columns of shared memory banks + using PackedCounter = unsigned int; + + static constexpr CacheLoadModifier LOAD_MODIFIER = AgentRadixSortUpsweepPolicy::LOAD_MODIFIER; + + enum + { + RADIX_BITS = AgentRadixSortUpsweepPolicy::RADIX_BITS, + BLOCK_THREADS = AgentRadixSortUpsweepPolicy::BLOCK_THREADS, + KEYS_PER_THREAD = AgentRadixSortUpsweepPolicy::ITEMS_PER_THREAD, + + RADIX_DIGITS = 1 << RADIX_BITS, + + LOG_WARP_THREADS = CUB_PTX_LOG_WARP_THREADS, + WARP_THREADS = 1 << LOG_WARP_THREADS, + WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, + + TILE_ITEMS = BLOCK_THREADS * KEYS_PER_THREAD, + + BYTES_PER_COUNTER = sizeof(DigitCounter), + LOG_BYTES_PER_COUNTER = Log2::VALUE, + + PACKING_RATIO = sizeof(PackedCounter) / sizeof(DigitCounter), + LOG_PACKING_RATIO = Log2::VALUE, + + LOG_COUNTER_LANES = CUB_MAX(0, int(RADIX_BITS) - int(LOG_PACKING_RATIO)), + COUNTER_LANES = 1 << LOG_COUNTER_LANES, + + // To prevent counter overflow, we must periodically unpack and aggregate the + // digit counters back into registers. Each counter lane is assigned to a + // warp for aggregation. + + LANES_PER_WARP = CUB_MAX(1, (COUNTER_LANES + WARPS - 1) / WARPS), + + // Unroll tiles in batches without risk of counter overflow + UNROLL_COUNT = CUB_MIN(64, 255 / KEYS_PER_THREAD), + UNROLLED_ELEMENTS = UNROLL_COUNT * TILE_ITEMS, + }; + + // Input iterator wrapper type (for applying cache modifier)s + using KeysItr = CacheModifiedInputIterator; + + // Digit extractor type + using fundamental_digit_extractor_t = BFEDigitExtractor; + using digit_extractor_t = typename traits::template digit_extractor_t; + + /** + * Shared memory storage layout + */ + union __align__(16) _TempStorage + { + DigitCounter thread_counters[COUNTER_LANES][BLOCK_THREADS][PACKING_RATIO]; + PackedCounter packed_thread_counters[COUNTER_LANES][BLOCK_THREADS]; + OffsetT block_counters[WARP_THREADS][RADIX_DIGITS]; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Thread fields (aggregate state bundle) + //--------------------------------------------------------------------- + + // Shared storage for this CTA + _TempStorage& temp_storage; + + // Thread-local counters for periodically aggregating composite-counter lanes + OffsetT local_counts[LANES_PER_WARP][PACKING_RATIO]; + + // Input and output device pointers + KeysItr d_keys_in; + + // Target bits + int current_bit; + int num_bits; + DecomposerT decomposer; + + //--------------------------------------------------------------------- + // Helper structure for templated iteration + //--------------------------------------------------------------------- + + // Iterate + template + struct Iterate + { + // BucketKeys + static _CCCL_DEVICE _CCCL_FORCEINLINE void + BucketKeys(AgentRadixSortUpsweep& cta, bit_ordered_type keys[KEYS_PER_THREAD]) + { + cta.Bucket(keys[COUNT]); + + // Next + Iterate::BucketKeys(cta, keys); + } + }; + + // Terminate + template + struct Iterate + { + // BucketKeys + static _CCCL_DEVICE _CCCL_FORCEINLINE void + BucketKeys(AgentRadixSortUpsweep& /*cta*/, bit_ordered_type /*keys*/[KEYS_PER_THREAD]) + {} + }; + + //--------------------------------------------------------------------- + // Utility methods + //--------------------------------------------------------------------- + _CCCL_DEVICE _CCCL_FORCEINLINE digit_extractor_t digit_extractor() + { + return traits::template digit_extractor(current_bit, num_bits, decomposer); + } + + /** + * Decode a key and increment corresponding smem digit counter + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void Bucket(bit_ordered_type key) + { + // Perform transform op + bit_ordered_type converted_key = bit_ordered_conversion::to_bit_ordered(decomposer, key); + + // Extract current digit bits + std::uint32_t digit = digit_extractor().Digit(converted_key); + + // Get sub-counter offset + std::uint32_t sub_counter = digit & (PACKING_RATIO - 1); + + // Get row offset + std::uint32_t row_offset = digit >> LOG_PACKING_RATIO; + + // Increment counter + temp_storage.thread_counters[row_offset][threadIdx.x][sub_counter]++; + } + + /** + * Reset composite counters + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ResetDigitCounters() + { +#pragma unroll + for (int LANE = 0; LANE < COUNTER_LANES; LANE++) + { + temp_storage.packed_thread_counters[LANE][threadIdx.x] = 0; + } + } + + /** + * Reset the unpacked counters in each thread + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ResetUnpackedCounters() + { +#pragma unroll + for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) + { +#pragma unroll + for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) + { + local_counts[LANE][UNPACKED_COUNTER] = 0; + } + } + } + + /** + * Extracts and aggregates the digit counters for each counter lane + * owned by this warp + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void UnpackDigitCounts() + { + unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS; + unsigned int warp_tid = ::cuda::ptx::get_sreg_laneid(); + +#pragma unroll + for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) + { + const int counter_lane = (LANE * WARPS) + warp_id; + if (counter_lane < COUNTER_LANES) + { +#pragma unroll + for (int PACKED_COUNTER = 0; PACKED_COUNTER < BLOCK_THREADS; PACKED_COUNTER += WARP_THREADS) + { +#pragma unroll + for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) + { + OffsetT counter = temp_storage.thread_counters[counter_lane][warp_tid + PACKED_COUNTER][UNPACKED_COUNTER]; + local_counts[LANE][UNPACKED_COUNTER] += counter; + } + } + } + } + } + + /** + * Processes a single, full tile + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ProcessFullTile(OffsetT block_offset) + { + // Tile of keys + bit_ordered_type keys[KEYS_PER_THREAD]; + + LoadDirectStriped(threadIdx.x, d_keys_in + block_offset, keys); + + // Prevent hoisting + __syncthreads(); + + // Bucket tile of keys + Iterate<0, KEYS_PER_THREAD>::BucketKeys(*this, keys); + } + + /** + * Processes a single load (may have some threads masked off) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ProcessPartialTile(OffsetT block_offset, const OffsetT& block_end) + { + // Process partial tile if necessary using single loads + for (OffsetT offset = threadIdx.x; offset < block_end - block_offset; offset += BLOCK_THREADS) + { + // Load and bucket key + bit_ordered_type key = d_keys_in[block_offset + offset]; + Bucket(key); + } + } + + //--------------------------------------------------------------------- + // Interface + //--------------------------------------------------------------------- + + /** + * Constructor + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentRadixSortUpsweep( + TempStorage& temp_storage, const KeyT* d_keys_in, int current_bit, int num_bits, DecomposerT decomposer = {}) + : temp_storage(temp_storage.Alias()) + , d_keys_in(reinterpret_cast(d_keys_in)) + , current_bit(current_bit) + , num_bits(num_bits) + , decomposer(decomposer) + {} + + /** + * Compute radix digit histograms from a segment of input tiles. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ProcessRegion(OffsetT block_offset, const OffsetT& block_end) + { + // Reset digit counters in smem and unpacked counters in registers + ResetDigitCounters(); + ResetUnpackedCounters(); + + // Unroll batches of full tiles + while (block_end - block_offset >= UNROLLED_ELEMENTS) + { + for (int i = 0; i < UNROLL_COUNT; ++i) + { + ProcessFullTile(block_offset); + block_offset += TILE_ITEMS; + } + + __syncthreads(); + + // Aggregate back into local_count registers to prevent overflow + UnpackDigitCounts(); + + __syncthreads(); + + // Reset composite counters in lanes + ResetDigitCounters(); + } + + // Unroll single full tiles + while (block_end - block_offset >= TILE_ITEMS) + { + ProcessFullTile(block_offset); + block_offset += TILE_ITEMS; + } + + // Process partial tile if necessary + ProcessPartialTile(block_offset, block_end); + + __syncthreads(); + + // Aggregate back into local_count registers + UnpackDigitCounts(); + } + + /** + * Extract counts (saving them to the external array) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExtractCounts(OffsetT* counters, int bin_stride = 1, int bin_offset = 0) + { + unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS; + unsigned int warp_tid = ::cuda::ptx::get_sreg_laneid(); + +// Place unpacked digit counters in shared memory +#pragma unroll + for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) + { + int counter_lane = (LANE * WARPS) + warp_id; + if (counter_lane < COUNTER_LANES) + { + int digit_row = counter_lane << LOG_PACKING_RATIO; + +#pragma unroll + for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) + { + int bin_idx = digit_row + UNPACKED_COUNTER; + + temp_storage.block_counters[warp_tid][bin_idx] = local_counts[LANE][UNPACKED_COUNTER]; + } + } + } + + __syncthreads(); + +// Rake-reduce bin_count reductions + +// Whole blocks +#pragma unroll + for (int BIN_BASE = RADIX_DIGITS % BLOCK_THREADS; (BIN_BASE + BLOCK_THREADS) <= RADIX_DIGITS; + BIN_BASE += BLOCK_THREADS) + { + int bin_idx = BIN_BASE + threadIdx.x; + + OffsetT bin_count = 0; +#pragma unroll + for (int i = 0; i < WARP_THREADS; ++i) + { + bin_count += temp_storage.block_counters[i][bin_idx]; + } + + if (IS_DESCENDING) + { + bin_idx = RADIX_DIGITS - bin_idx - 1; + } + + counters[(bin_stride * bin_idx) + bin_offset] = bin_count; + } + + // Remainder + if ((RADIX_DIGITS % BLOCK_THREADS != 0) && (threadIdx.x < RADIX_DIGITS)) + { + int bin_idx = threadIdx.x; + + OffsetT bin_count = 0; +#pragma unroll + for (int i = 0; i < WARP_THREADS; ++i) + { + bin_count += temp_storage.block_counters[i][bin_idx]; + } + + if (IS_DESCENDING) + { + bin_idx = RADIX_DIGITS - bin_idx - 1; + } + + counters[(bin_stride * bin_idx) + bin_offset] = bin_count; + } + } + + /** + * @brief Extract counts + * + * @param[out] bin_count + * The exclusive prefix sum for the digits + * [(threadIdx.x * BINS_TRACKED_PER_THREAD) ... (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - + * 1] + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExtractCounts(OffsetT (&bin_count)[BINS_TRACKED_PER_THREAD]) + { + unsigned int warp_id = threadIdx.x >> LOG_WARP_THREADS; + unsigned int warp_tid = ::cuda::ptx::get_sreg_laneid(); + +// Place unpacked digit counters in shared memory +#pragma unroll + for (int LANE = 0; LANE < LANES_PER_WARP; LANE++) + { + int counter_lane = (LANE * WARPS) + warp_id; + if (counter_lane < COUNTER_LANES) + { + int digit_row = counter_lane << LOG_PACKING_RATIO; + +#pragma unroll + for (int UNPACKED_COUNTER = 0; UNPACKED_COUNTER < PACKING_RATIO; UNPACKED_COUNTER++) + { + int bin_idx = digit_row + UNPACKED_COUNTER; + + temp_storage.block_counters[warp_tid][bin_idx] = local_counts[LANE][UNPACKED_COUNTER]; + } + } + } + + __syncthreads(); + +// Rake-reduce bin_count reductions +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + bin_count[track] = 0; + +#pragma unroll + for (int i = 0; i < WARP_THREADS; ++i) + { + bin_count[track] += temp_storage.block_counters[i][bin_idx]; + } + } + } + } +}; + +} // namespace radix_sort +} // namespace detail + +template +using AgentRadixSortUpsweep CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::radix_sort::AgentRadixSortUpsweep; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_reduce.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_reduce.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b10e2fbee8229dd05e780186b7067e608339aa95 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_reduce.cuh @@ -0,0 +1,466 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file cub::AgentReduce implements a stateful abstraction of CUDA thread + * blocks for participating in device-wide reduction. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include + +#include + +_CCCL_SUPPRESS_DEPRECATED_PUSH +#include +_CCCL_SUPPRESS_DEPRECATED_POP + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * Parameterizable tuning policy type for AgentReduce + * @tparam NOMINAL_BLOCK_THREADS_4B Threads per thread block + * @tparam NOMINAL_ITEMS_PER_THREAD_4B Items per thread (per tile of input) + * @tparam ComputeT Dominant compute type + * @tparam _VECTOR_LOAD_LENGTH Number of items per vectorized load + * @tparam _BLOCK_ALGORITHM Cooperative block-wide reduction algorithm to use + * @tparam _LOAD_MODIFIER Cache load modifier for reading input elements + */ +template > +struct AgentReducePolicy : ScalingType +{ + /// Number of items per vectorized load + static constexpr int VECTOR_LOAD_LENGTH = _VECTOR_LOAD_LENGTH; + + /// Cooperative block-wide reduction algorithm to use + static constexpr BlockReduceAlgorithm BLOCK_ALGORITHM = _BLOCK_ALGORITHM; + + /// Cache load modifier for reading input elements + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace reduce +{ + +/** + * @brief AgentReduce implements a stateful abstraction of CUDA thread blocks + * for participating in device-wide reduction . + * + * Each thread reduces only the values it loads. If `FIRST_TILE`, this partial + * reduction is stored into `thread_aggregate`. Otherwise it is accumulated + * into `thread_aggregate`. + * + * @tparam AgentReducePolicy + * Parameterized AgentReducePolicy tuning policy type + * + * @tparam InputIteratorT + * Random-access iterator type for input + * + * @tparam OutputIteratorT + * Random-access iterator type for output + * + * @tparam OffsetT + * Signed integer type for global offsets + * + * @tparam ReductionOp + * Binary reduction operator type having member + * `auto operator()(T &&a, U &&b)` + * + * @tparam AccumT + * The type of intermediate accumulator (according to P2322R6) + */ +template +struct AgentReduce +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + /// The input value type + using InputT = value_t; + + /// Vector type of InputT for data movement + using VectorT = typename CubVector::Type; + + /// Input iterator wrapper type (for applying cache modifier) + // Wrap the native input pointer with CacheModifiedInputIterator + // or directly use the supplied input iterator type + using WrappedInputIteratorT = + ::cuda::std::_If<::cuda::std::is_pointer::value, + CacheModifiedInputIterator, + InputIteratorT>; + + /// Constants + static constexpr int BLOCK_THREADS = AgentReducePolicy::BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = AgentReducePolicy::ITEMS_PER_THREAD; + static constexpr int TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD; + static constexpr int VECTOR_LOAD_LENGTH = CUB_MIN(ITEMS_PER_THREAD, AgentReducePolicy::VECTOR_LOAD_LENGTH); + + // Can vectorize according to the policy if the input iterator is a native + // pointer to a primitive type + static constexpr bool ATTEMPT_VECTORIZATION = + (VECTOR_LOAD_LENGTH > 1) && (ITEMS_PER_THREAD % VECTOR_LOAD_LENGTH == 0) + && (::cuda::std::is_pointer::value) && Traits::PRIMITIVE; + + static constexpr CacheLoadModifier LOAD_MODIFIER = AgentReducePolicy::LOAD_MODIFIER; + + static constexpr BlockReduceAlgorithm BLOCK_ALGORITHM = AgentReducePolicy::BLOCK_ALGORITHM; + + /// Parameterized BlockReduce primitive + using BlockReduceT = BlockReduce; + + /// Shared memory type required by this thread block + struct _TempStorage + { + typename BlockReduceT::TempStorage reduce; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + _TempStorage& temp_storage; ///< Reference to temp_storage + InputIteratorT d_in; ///< Input data to reduce + WrappedInputIteratorT d_wrapped_in; ///< Wrapped input data to reduce + ReductionOp reduction_op; ///< Binary reduction operator + TransformOp transform_op; ///< Transform operator + + //--------------------------------------------------------------------- + // Utility + //--------------------------------------------------------------------- + + // Whether or not the input is aligned with the vector type (specialized for + // types we can vectorize) + template + static _CCCL_DEVICE _CCCL_FORCEINLINE bool IsAligned(Iterator d_in, Int2Type /*can_vectorize*/) + { + return (size_t(d_in) & (sizeof(VectorT) - 1)) == 0; + } + + // Whether or not the input is aligned with the vector type (specialized for + // types we cannot vectorize) + template + static _CCCL_DEVICE _CCCL_FORCEINLINE bool IsAligned(Iterator /*d_in*/, Int2Type /*can_vectorize*/) + { + return false; + } + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + /** + * @brief Constructor + * @param temp_storage Reference to temp_storage + * @param d_in Input data to reduce + * @param reduction_op Binary reduction operator + */ + _CCCL_DEVICE _CCCL_FORCEINLINE + AgentReduce(TempStorage& temp_storage, InputIteratorT d_in, ReductionOp reduction_op, TransformOp transform_op = {}) + : temp_storage(temp_storage.Alias()) + , d_in(d_in) + , d_wrapped_in(d_in) + , reduction_op(reduction_op) + , transform_op(transform_op) + {} + + //--------------------------------------------------------------------- + // Tile consumption + //--------------------------------------------------------------------- + + /** + * @brief Consume a full tile of input (non-vectorized) + * @param block_offset The offset the tile to consume + * @param valid_items The number of valid items in the tile + * @param is_full_tile Whether or not this is a full tile + * @param can_vectorize Whether or not we can vectorize loads + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTile( + AccumT& thread_aggregate, + OffsetT block_offset, + int /*valid_items*/, + Int2Type /*is_full_tile*/, + Int2Type /*can_vectorize*/) + { + AccumT items[ITEMS_PER_THREAD]; + + // Load items in striped fashion + load_transform_direct_striped(threadIdx.x, d_wrapped_in + block_offset, items, transform_op); + + // Reduce items within each thread stripe + thread_aggregate = (IS_FIRST_TILE) ? cub::ThreadReduce(items, reduction_op) + : cub::ThreadReduce(items, reduction_op, thread_aggregate); + } + + /** + * Consume a full tile of input (vectorized) + * @param block_offset The offset the tile to consume + * @param valid_items The number of valid items in the tile + * @param is_full_tile Whether or not this is a full tile + * @param can_vectorize Whether or not we can vectorize loads + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTile( + AccumT& thread_aggregate, + OffsetT block_offset, + int /*valid_items*/, + Int2Type /*is_full_tile*/, + Int2Type /*can_vectorize*/) + { + // Alias items as an array of VectorT and load it in striped fashion + enum + { + WORDS = ITEMS_PER_THREAD / VECTOR_LOAD_LENGTH + }; + + // Fabricate a vectorized input iterator + InputT* d_in_unqualified = const_cast(d_in) + block_offset + (threadIdx.x * VECTOR_LOAD_LENGTH); + CacheModifiedInputIterator d_vec_in( + reinterpret_cast(d_in_unqualified)); + + // Load items as vector items + InputT input_items[ITEMS_PER_THREAD]; + VectorT* vec_items = reinterpret_cast(input_items); +#pragma unroll + for (int i = 0; i < WORDS; ++i) + { + vec_items[i] = d_vec_in[BLOCK_THREADS * i]; + } + + // Convert from input type to output type + AccumT items[ITEMS_PER_THREAD]; +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; ++i) + { + items[i] = transform_op(input_items[i]); + } + + // Reduce items within each thread stripe + thread_aggregate = (IS_FIRST_TILE) ? cub::ThreadReduce(items, reduction_op) + : cub::ThreadReduce(items, reduction_op, thread_aggregate); + } + + /** + * Consume a partial tile of input + * @param block_offset The offset the tile to consume + * @param valid_items The number of valid items in the tile + * @param is_full_tile Whether or not this is a full tile + * @param can_vectorize Whether or not we can vectorize loads + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTile( + AccumT& thread_aggregate, + OffsetT block_offset, + int valid_items, + Int2Type /*is_full_tile*/, + Int2Type /*can_vectorize*/) + { + // Partial tile + int thread_offset = threadIdx.x; + + // Read first item + if ((IS_FIRST_TILE) && (thread_offset < valid_items)) + { + thread_aggregate = transform_op(d_wrapped_in[block_offset + thread_offset]); + thread_offset += BLOCK_THREADS; + } + + // Continue reading items (block-striped) + while (thread_offset < valid_items) + { + InputT item(d_wrapped_in[block_offset + thread_offset]); + + thread_aggregate = reduction_op(thread_aggregate, transform_op(item)); + thread_offset += BLOCK_THREADS; + } + } + + //--------------------------------------------------------------- + // Consume a contiguous segment of tiles + //--------------------------------------------------------------------- + + /** + * @brief Reduce a contiguous segment of input tiles + * @param even_share GridEvenShare descriptor + * @param can_vectorize Whether or not we can vectorize loads + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE AccumT + ConsumeRange(GridEvenShare& even_share, Int2Type can_vectorize) + { + AccumT thread_aggregate{}; + + if (even_share.block_end - even_share.block_offset < TILE_ITEMS) + { + // First tile isn't full (not all threads have valid items) + int valid_items = even_share.block_end - even_share.block_offset; + ConsumeTile(thread_aggregate, even_share.block_offset, valid_items, Int2Type(), can_vectorize); + return BlockReduceT(temp_storage.reduce).Reduce(thread_aggregate, reduction_op, valid_items); + } + + // Extracting this into a function saves 8% of generated kernel size by allowing to reuse + // the block reduction below. This also workaround hang in nvcc. + ConsumeFullTileRange(thread_aggregate, even_share, can_vectorize); + + // Compute block-wide reduction (all threads have valid items) + return BlockReduceT(temp_storage.reduce).Reduce(thread_aggregate, reduction_op); + } + + /** + * @brief Reduce a contiguous segment of input tiles + * @param[in] block_offset Threadblock begin offset (inclusive) + * @param[in] block_end Threadblock end offset (exclusive) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AccumT ConsumeRange(OffsetT block_offset, OffsetT block_end) + { + GridEvenShare even_share; + even_share.template BlockInit(block_offset, block_end); + + return (IsAligned(d_in + block_offset, Int2Type())) + ? ConsumeRange(even_share, Int2Type < true && ATTEMPT_VECTORIZATION > ()) + : ConsumeRange(even_share, Int2Type < false && ATTEMPT_VECTORIZATION > ()); + } + + /** + * Reduce a contiguous segment of input tiles + * @param[in] even_share GridEvenShare descriptor + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AccumT ConsumeTiles(GridEvenShare& even_share) + { + // Initialize GRID_MAPPING_STRIP_MINE even-share descriptor for this thread block + even_share.template BlockInit(); + + return (IsAligned(d_in, Int2Type())) + ? ConsumeRange(even_share, Int2Type < true && ATTEMPT_VECTORIZATION > ()) + : ConsumeRange(even_share, Int2Type < false && ATTEMPT_VECTORIZATION > ()); + } + +private: + /** + * @brief Reduce a contiguous segment of input tiles with more than `TILE_ITEMS` elements + * @param even_share GridEvenShare descriptor + * @param can_vectorize Whether or not we can vectorize loads + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeFullTileRange( + AccumT& thread_aggregate, GridEvenShare& even_share, Int2Type can_vectorize) + { + // At least one full block + ConsumeTile(thread_aggregate, even_share.block_offset, TILE_ITEMS, Int2Type(), can_vectorize); + + if (even_share.block_end - even_share.block_offset < even_share.block_stride) + { + // Exit early to handle offset overflow + return; + } + + even_share.block_offset += even_share.block_stride; + + // Consume subsequent full tiles of input, at least one full tile was processed, so + // `even_share.block_end >= TILE_ITEMS` + while (even_share.block_offset <= even_share.block_end - TILE_ITEMS) + { + ConsumeTile(thread_aggregate, even_share.block_offset, TILE_ITEMS, Int2Type(), can_vectorize); + + if (even_share.block_end - even_share.block_offset < even_share.block_stride) + { + // Exit early to handle offset overflow + return; + } + + even_share.block_offset += even_share.block_stride; + } + + // Consume a partially-full tile + if (even_share.block_offset < even_share.block_end) + { + int valid_items = even_share.block_end - even_share.block_offset; + ConsumeTile(thread_aggregate, even_share.block_offset, valid_items, Int2Type(), can_vectorize); + } + } +}; + +} // namespace reduce +} // namespace detail + +template +using AgentReduce CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public interface " + "will be removed.") = detail::reduce:: + AgentReduce; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_reduce_by_key.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_reduce_by_key.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a90399f432538843d2ab6cf02454850e97f4ea89 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_reduce_by_key.cuh @@ -0,0 +1,728 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file cub::AgentReduceByKey implements a stateful abstraction of CUDA thread + * blocks for participating in device-wide reduce-value-by-key. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * @brief Parameterizable tuning policy type for AgentReduceByKey + * + * @tparam _BLOCK_THREADS + * Threads per thread block + * + * @tparam _ITEMS_PER_THREAD + * Items per thread (per tile of input) + * + * @tparam _LOAD_ALGORITHM + * The BlockLoad algorithm to use + * + * @tparam _LOAD_MODIFIER + * Cache load modifier for reading input elements + * + * @tparam _SCAN_ALGORITHM + * The BlockScan algorithm to use + * + * @tparam DelayConstructorT + * Implementation detail, do not specify directly, requirements on the + * content of this type are subject to breaking change. + */ +template > +struct AgentReduceByKeyPolicy +{ + ///< Threads per thread block + static constexpr int BLOCK_THREADS = _BLOCK_THREADS; + + ///< Items per thread (per tile of input) + static constexpr int ITEMS_PER_THREAD = _ITEMS_PER_THREAD; + + ///< The BlockLoad algorithm to use + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + + ///< Cache load modifier for reading input elements + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + + ///< The BlockScan algorithm to use + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; + + struct detail + { + using delay_constructor_t = DelayConstructorT; + }; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace reduce +{ + +/** + * @brief AgentReduceByKey implements a stateful abstraction of CUDA thread + * blocks for participating in device-wide reduce-value-by-key + * + * @tparam AgentReduceByKeyPolicyT + * Parameterized AgentReduceByKeyPolicy tuning policy type + * + * @tparam KeysInputIteratorT + * Random-access input iterator type for keys + * + * @tparam UniqueOutputIteratorT + * Random-access output iterator type for keys + * + * @tparam ValuesInputIteratorT + * Random-access input iterator type for values + * + * @tparam AggregatesOutputIteratorT + * Random-access output iterator type for values + * + * @tparam NumRunsOutputIteratorT + * Output iterator type for recording number of items selected + * + * @tparam EqualityOpT + * KeyT equality operator type + * + * @tparam ReductionOpT + * ValueT reduction operator type + * + * @tparam OffsetT + * Signed integer type for global offsets + * + * @tparam AccumT + * The type of intermediate accumulator (according to P2322R6) + */ +template +struct AgentReduceByKey +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + // The input keys type + using KeyInputT = value_t; + + // The output keys type + using KeyOutputT = non_void_value_t; + + // The input values type + using ValueInputT = value_t; + + // Tuple type for scanning (pairs accumulated segment-value with + // segment-index) + using OffsetValuePairT = KeyValuePair; + + // Tuple type for pairing keys and values + using KeyValuePairT = KeyValuePair; + + // Tile status descriptor interface type + using ScanTileStateT = ReduceByKeyScanTileState; + + // Guarded inequality functor + template + struct GuardedInequalityWrapper + { + /// Wrapped equality operator + _EqualityOpT op; + + /// Items remaining + int num_remaining; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE GuardedInequalityWrapper(_EqualityOpT op, int num_remaining) + : op(op) + , num_remaining(num_remaining) + {} + + /// Boolean inequality operator, returns (a != b) + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator()(const T& a, const T& b, int idx) const + { + if (idx < num_remaining) + { + return !op(a, b); // In bounds + } + + // Return true if first out-of-bounds item, false otherwise + return (idx == num_remaining); + } + }; + + // Constants + static constexpr int BLOCK_THREADS = AgentReduceByKeyPolicyT::BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = AgentReduceByKeyPolicyT::ITEMS_PER_THREAD; + static constexpr int TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD; + static constexpr int TWO_PHASE_SCATTER = (ITEMS_PER_THREAD > 1); + + // Whether or not the scan operation has a zero-valued identity value (true + // if we're performing addition on a primitive type) + static constexpr int HAS_IDENTITY_ZERO = + (std::is_same>::value) && (Traits::PRIMITIVE); + + // Cache-modified Input iterator wrapper type (for applying cache modifier) + // for keys Wrap the native input pointer with + // CacheModifiedValuesInputIterator or directly use the supplied input + // iterator type + using WrappedKeysInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + KeysInputIteratorT>; + + // Cache-modified Input iterator wrapper type (for applying cache modifier) + // for values Wrap the native input pointer with + // CacheModifiedValuesInputIterator or directly use the supplied input + // iterator type + using WrappedValuesInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + ValuesInputIteratorT>; + + // Cache-modified Input iterator wrapper type (for applying cache modifier) + // for fixup values Wrap the native input pointer with + // CacheModifiedValuesInputIterator or directly use the supplied input + // iterator type + using WrappedFixupInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + AggregatesOutputIteratorT>; + + // Reduce-value-by-segment scan operator + using ReduceBySegmentOpT = ReduceBySegmentOp; + + // Parameterized BlockLoad type for keys + using BlockLoadKeysT = + BlockLoad; + + // Parameterized BlockLoad type for values + using BlockLoadValuesT = BlockLoad; + + // Parameterized BlockDiscontinuity type for keys + using BlockDiscontinuityKeys = BlockDiscontinuity; + + // Parameterized BlockScan type + using BlockScanT = BlockScan; + + // Callback type for obtaining tile prefix during block scan + using DelayConstructorT = typename AgentReduceByKeyPolicyT::detail::delay_constructor_t; + using TilePrefixCallbackOpT = + TilePrefixCallbackOp; + + // Key and value exchange types + using KeyExchangeT = KeyOutputT[TILE_ITEMS + 1]; + using ValueExchangeT = AccumT[TILE_ITEMS + 1]; + + // Shared memory type for this thread block + union _TempStorage + { + struct ScanStorage + { + // Smem needed for tile scanning + typename BlockScanT::TempStorage scan; + + // Smem needed for cooperative prefix callback + typename TilePrefixCallbackOpT::TempStorage prefix; + + // Smem needed for discontinuity detection + typename BlockDiscontinuityKeys::TempStorage discontinuity; + } scan_storage; + + // Smem needed for loading keys + typename BlockLoadKeysT::TempStorage load_keys; + + // Smem needed for loading values + typename BlockLoadValuesT::TempStorage load_values; + + // Smem needed for compacting key value pairs(allows non POD items in this + // union) + Uninitialized raw_exchange; + }; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + /// Reference to temp_storage + _TempStorage& temp_storage; + + /// Input keys + WrappedKeysInputIteratorT d_keys_in; + + /// Unique output keys + UniqueOutputIteratorT d_unique_out; + + /// Input values + WrappedValuesInputIteratorT d_values_in; + + /// Output value aggregates + AggregatesOutputIteratorT d_aggregates_out; + + /// Output pointer for total number of segments identified + NumRunsOutputIteratorT d_num_runs_out; + + /// KeyT equality operator + EqualityOpT equality_op; + + /// Reduction operator + ReductionOpT reduction_op; + + /// Reduce-by-segment scan operator + ReduceBySegmentOpT scan_op; + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + /** + * @param temp_storage + * Reference to temp_storage + * + * @param d_keys_in + * Input keys + * + * @param d_unique_out + * Unique output keys + * + * @param d_values_in + * Input values + * + * @param d_aggregates_out + * Output value aggregates + * + * @param d_num_runs_out + * Output pointer for total number of segments identified + * + * @param equality_op + * KeyT equality operator + * + * @param reduction_op + * ValueT reduction operator + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentReduceByKey( + TempStorage& temp_storage, + KeysInputIteratorT d_keys_in, + UniqueOutputIteratorT d_unique_out, + ValuesInputIteratorT d_values_in, + AggregatesOutputIteratorT d_aggregates_out, + NumRunsOutputIteratorT d_num_runs_out, + EqualityOpT equality_op, + ReductionOpT reduction_op) + : temp_storage(temp_storage.Alias()) + , d_keys_in(d_keys_in) + , d_unique_out(d_unique_out) + , d_values_in(d_values_in) + , d_aggregates_out(d_aggregates_out) + , d_num_runs_out(d_num_runs_out) + , equality_op(equality_op) + , reduction_op(reduction_op) + , scan_op(reduction_op) + {} + + //--------------------------------------------------------------------- + // Scatter utility methods + //--------------------------------------------------------------------- + + /** + * Directly scatter flagged items to output offsets + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterDirect( + KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD], + OffsetT (&segment_flags)[ITEMS_PER_THREAD], + OffsetT (&segment_indices)[ITEMS_PER_THREAD]) + { +// Scatter flagged keys and values +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + if (segment_flags[ITEM]) + { + d_unique_out[segment_indices[ITEM]] = scatter_items[ITEM].key; + d_aggregates_out[segment_indices[ITEM]] = scatter_items[ITEM].value; + } + } + } + + /** + * 2-phase scatter flagged items to output offsets + * + * The exclusive scan causes each head flag to be paired with the previous + * value aggregate: the scatter offsets must be decremented for value + * aggregates + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterTwoPhase( + KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD], + OffsetT (&segment_flags)[ITEMS_PER_THREAD], + OffsetT (&segment_indices)[ITEMS_PER_THREAD], + OffsetT num_tile_segments, + OffsetT num_tile_segments_prefix) + { + __syncthreads(); + +// Compact and scatter pairs +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + if (segment_flags[ITEM]) + { + temp_storage.raw_exchange.Alias()[segment_indices[ITEM] - num_tile_segments_prefix] = scatter_items[ITEM]; + } + } + + __syncthreads(); + + for (int item = threadIdx.x; item < num_tile_segments; item += BLOCK_THREADS) + { + KeyValuePairT pair = temp_storage.raw_exchange.Alias()[item]; + d_unique_out[num_tile_segments_prefix + item] = pair.key; + d_aggregates_out[num_tile_segments_prefix + item] = pair.value; + } + } + + /** + * Scatter flagged items + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void Scatter( + KeyValuePairT (&scatter_items)[ITEMS_PER_THREAD], + OffsetT (&segment_flags)[ITEMS_PER_THREAD], + OffsetT (&segment_indices)[ITEMS_PER_THREAD], + OffsetT num_tile_segments, + OffsetT num_tile_segments_prefix) + { + // Do a one-phase scatter if (a) two-phase is disabled or (b) the average + // number of selected items per thread is less than one + if (TWO_PHASE_SCATTER && (num_tile_segments > BLOCK_THREADS)) + { + ScatterTwoPhase(scatter_items, segment_flags, segment_indices, num_tile_segments, num_tile_segments_prefix); + } + else + { + ScatterDirect(scatter_items, segment_flags, segment_indices); + } + } + + //--------------------------------------------------------------------- + // Cooperatively scan a device-wide sequence of tiles with other CTAs + //--------------------------------------------------------------------- + + /** + * @brief Process a tile of input (dynamic chained scan) + * + * @tparam IS_LAST_TILE + * Whether the current tile is the last tile + * + * @param num_remaining + * Number of global input items remaining (including this tile) + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param tile_state + * Global tile state descriptor + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeTile(OffsetT num_remaining, int tile_idx, OffsetT tile_offset, ScanTileStateT& tile_state) + { + // Tile keys + KeyOutputT keys[ITEMS_PER_THREAD]; + + // Tile keys shuffled up + KeyOutputT prev_keys[ITEMS_PER_THREAD]; + + // Tile values + AccumT values[ITEMS_PER_THREAD]; + + // Segment head flags + OffsetT head_flags[ITEMS_PER_THREAD]; + + // Segment indices + OffsetT segment_indices[ITEMS_PER_THREAD]; + + // Zipped values and segment flags|indices + OffsetValuePairT scan_items[ITEMS_PER_THREAD]; + + // Zipped key value pairs for scattering + KeyValuePairT scatter_items[ITEMS_PER_THREAD]; + + // Load keys + if (IS_LAST_TILE) + { + BlockLoadKeysT(temp_storage.load_keys).Load(d_keys_in + tile_offset, keys, num_remaining); + } + else + { + BlockLoadKeysT(temp_storage.load_keys).Load(d_keys_in + tile_offset, keys); + } + + // Load tile predecessor key in first thread + KeyOutputT tile_predecessor; + if (threadIdx.x == 0) + { + // if (tile_idx == 0) + // first tile gets repeat of first item (thus first item will not + // be flagged as a head) + // else + // Subsequent tiles get last key from previous tile + tile_predecessor = (tile_idx == 0) ? keys[0] : d_keys_in[tile_offset - 1]; + } + + __syncthreads(); + + // Load values + if (IS_LAST_TILE) + { + BlockLoadValuesT(temp_storage.load_values).Load(d_values_in + tile_offset, values, num_remaining); + } + else + { + BlockLoadValuesT(temp_storage.load_values).Load(d_values_in + tile_offset, values); + } + + __syncthreads(); + + // Initialize head-flags and shuffle up the previous keys + if (IS_LAST_TILE) + { + // Use custom flag operator to additionally flag the first out-of-bounds + // item + GuardedInequalityWrapper flag_op(equality_op, num_remaining); + BlockDiscontinuityKeys(temp_storage.scan_storage.discontinuity) + .FlagHeads(head_flags, keys, prev_keys, flag_op, tile_predecessor); + } + else + { + InequalityWrapper flag_op(equality_op); + BlockDiscontinuityKeys(temp_storage.scan_storage.discontinuity) + .FlagHeads(head_flags, keys, prev_keys, flag_op, tile_predecessor); + } + + // Reset head-flag on the very first item to make sure we don't start a new run for data where + // (key[0] == key[0]) is false (e.g., when key[0] is NaN) + if (threadIdx.x == 0 && tile_idx == 0) + { + head_flags[0] = 0; + } + + // Zip values and head flags +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + scan_items[ITEM].value = values[ITEM]; + scan_items[ITEM].key = head_flags[ITEM]; + } + + // Perform exclusive tile scan + // Inclusive block-wide scan aggregate + OffsetValuePairT block_aggregate; + + // Number of segments prior to this tile + OffsetT num_segments_prefix; + + // The tile prefix folded with block_aggregate + OffsetValuePairT total_aggregate; + + if (tile_idx == 0) + { + // Scan first tile + BlockScanT(temp_storage.scan_storage.scan).ExclusiveScan(scan_items, scan_items, scan_op, block_aggregate); + num_segments_prefix = 0; + total_aggregate = block_aggregate; + + // Update tile status if there are successor tiles + if ((!IS_LAST_TILE) && (threadIdx.x == 0)) + { + tile_state.SetInclusive(0, block_aggregate); + } + } + else + { + // Scan non-first tile + TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.scan_storage.prefix, scan_op, tile_idx); + BlockScanT(temp_storage.scan_storage.scan).ExclusiveScan(scan_items, scan_items, scan_op, prefix_op); + + block_aggregate = prefix_op.GetBlockAggregate(); + num_segments_prefix = prefix_op.GetExclusivePrefix().key; + total_aggregate = prefix_op.GetInclusivePrefix(); + } + +// Rezip scatter items and segment indices +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + scatter_items[ITEM].key = prev_keys[ITEM]; + scatter_items[ITEM].value = scan_items[ITEM].value; + segment_indices[ITEM] = scan_items[ITEM].key; + } + + // At this point, each flagged segment head has: + // - The key for the previous segment + // - The reduced value from the previous segment + // - The segment index for the reduced value + + // Scatter flagged keys and values + OffsetT num_tile_segments = block_aggregate.key; + Scatter(scatter_items, head_flags, segment_indices, num_tile_segments, num_segments_prefix); + + // Last thread in last tile will output final count (and last pair, if + // necessary) + if ((IS_LAST_TILE) && (threadIdx.x == BLOCK_THREADS - 1)) + { + OffsetT num_segments = num_segments_prefix + num_tile_segments; + + // If the last tile is a whole tile, output the final_value + if (num_remaining == TILE_ITEMS) + { + d_unique_out[num_segments] = keys[ITEMS_PER_THREAD - 1]; + d_aggregates_out[num_segments] = total_aggregate.value; + num_segments++; + } + + // Output the total number of items selected + *d_num_runs_out = num_segments; + } + } + + /** + * @brief Scan tiles of items as part of a dynamic chained scan + * + * @param num_items + * Total number of input items + * + * @param tile_state + * Global tile state descriptor + * + * @param start_tile + * The starting tile for the current grid + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeRange(OffsetT num_items, ScanTileStateT& tile_state, int start_tile) + { + // Blocks are launched in increasing order, so just assign one tile per + // block + + // Current tile index + int tile_idx = start_tile + blockIdx.x; + + // Global offset for the current tile + OffsetT tile_offset = OffsetT(TILE_ITEMS) * tile_idx; + + // Remaining items (including this tile) + OffsetT num_remaining = num_items - tile_offset; + + if (num_remaining > TILE_ITEMS) + { + // Not last tile + ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); + } + else if (num_remaining > 0) + { + // Last tile + ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); + } + } +}; + +} // namespace reduce +} // namespace detail + +template +using AgentReduceByKey CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::reduce::AgentReduceByKey< + AgentReduceByKeyPolicyT, + KeysInputIteratorT, + UniqueOutputIteratorT, + ValuesInputIteratorT, + AggregatesOutputIteratorT, + NumRunsOutputIteratorT, + EqualityOpT, + ReductionOpT, + OffsetT, + AccumT>; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_rle.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_rle.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2ea0729db92a44c5fd23b387ce00dffe2610ece1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_rle.cuh @@ -0,0 +1,1010 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * cub::AgentRle implements a stateful abstraction of CUDA thread blocks for participating in device-wide + * run-length-encode. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * Parameterizable tuning policy type for AgentRle + * + * @tparam _BLOCK_THREADS + * Threads per thread block + * + * @tparam _ITEMS_PER_THREAD + * Items per thread (per tile of input) + * + * @tparam _LOAD_ALGORITHM + * The BlockLoad algorithm to use + * + * @tparam _LOAD_MODIFIER + * Cache load modifier for reading input elements + * + * @tparam _STORE_WARP_TIME_SLICING + * Whether or not only one warp's worth of shared memory should be allocated and time-sliced among + * block-warps during any store-related data transpositions + * (versus each warp having its own storage) + * + * @tparam _SCAN_ALGORITHM + * The BlockScan algorithm to use + * + * @tparam DelayConstructorT + * Implementation detail, do not specify directly, requirements on the + * content of this type are subject to breaking change. + */ +template > +struct AgentRlePolicy +{ + enum + { + /// Threads per thread block + BLOCK_THREADS = _BLOCK_THREADS, + + /// Items per thread (per tile of input) + ITEMS_PER_THREAD = _ITEMS_PER_THREAD, + + /// Whether or not only one warp's worth of shared memory should be allocated and time-sliced + /// among block-warps during any store-related data transpositions (versus each warp having its + /// own storage) + STORE_WARP_TIME_SLICING = _STORE_WARP_TIME_SLICING, + }; + + /// The BlockLoad algorithm to use + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + + /// Cache load modifier for reading input elements + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + + /// The BlockScan algorithm to use + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; + + struct detail + { + using delay_constructor_t = DelayConstructorT; + }; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace rle +{ + +/** + * @brief AgentRle implements a stateful abstraction of CUDA thread blocks for participating in device-wide + * run-length-encode + * + * @tparam AgentRlePolicyT + * Parameterized AgentRlePolicyT tuning policy type + * + * @tparam InputIteratorT + * Random-access input iterator type for data + * + * @tparam OffsetsOutputIteratorT + * Random-access output iterator type for offset values + * + * @tparam LengthsOutputIteratorT + * Random-access output iterator type for length values + * + * @tparam EqualityOpT + * T equality operator type + * + * @tparam OffsetT + * Signed integer type for global offsets + */ +template +struct AgentRle +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + /// The input value type + using T = cub::detail::value_t; + + /// The lengths output value type + using LengthT = cub::detail::non_void_value_t; + + /// Tuple type for scanning (pairs run-length and run-index) + using LengthOffsetPair = KeyValuePair; + + /// Tile status descriptor interface type + using ScanTileStateT = ReduceByKeyScanTileState; + + // Constants + enum + { + WARP_THREADS = CUB_WARP_THREADS(0), + BLOCK_THREADS = AgentRlePolicyT::BLOCK_THREADS, + ITEMS_PER_THREAD = AgentRlePolicyT::ITEMS_PER_THREAD, + WARP_ITEMS = WARP_THREADS * ITEMS_PER_THREAD, + TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, + WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, + + /// Whether or not to sync after loading data + SYNC_AFTER_LOAD = (AgentRlePolicyT::LOAD_ALGORITHM != BLOCK_LOAD_DIRECT), + + /// Whether or not only one warp's worth of shared memory should be allocated and time-sliced + /// among block-warps during any store-related data transpositions (versus each warp having + /// its own storage) + STORE_WARP_TIME_SLICING = AgentRlePolicyT::STORE_WARP_TIME_SLICING, + ACTIVE_EXCHANGE_WARPS = (STORE_WARP_TIME_SLICING) ? 1 : WARPS, + }; + + /** + * Special operator that signals all out-of-bounds items are not equal to everything else, + * forcing both (1) the last item to be tail-flagged and (2) all oob items to be marked + * trivial. + */ + template + struct OobInequalityOp + { + OffsetT num_remaining; + EqualityOpT equality_op; + + _CCCL_DEVICE _CCCL_FORCEINLINE OobInequalityOp(OffsetT num_remaining, EqualityOpT equality_op) + : num_remaining(num_remaining) + , equality_op(equality_op) + {} + + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator()(T first, T second, Index idx) + { + if (!LAST_TILE || (idx < num_remaining)) + { + return !equality_op(first, second); + } + else + { + return true; + } + } + }; + + // Cache-modified Input iterator wrapper type (for applying cache modifier) for data + // Wrap the native input pointer with CacheModifiedVLengthnputIterator + // Directly use the supplied input iterator type + using WrappedInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + InputIteratorT>; + + // Parameterized BlockLoad type for data + using BlockLoadT = + BlockLoad; + + // Parameterized BlockDiscontinuity type for data + using BlockDiscontinuityT = BlockDiscontinuity; + + // Parameterized WarpScan type + using WarpScanPairs = WarpScan; + + // Reduce-length-by-run scan operator + using ReduceBySegmentOpT = ReduceBySegmentOp<::cuda::std::plus<>>; + + // Callback type for obtaining tile prefix during block scan + using DelayConstructorT = typename AgentRlePolicyT::detail::delay_constructor_t; + using TilePrefixCallbackOpT = + TilePrefixCallbackOp; + + // Warp exchange types + using WarpExchangePairs = WarpExchange; + + using WarpExchangePairsStorage = + ::cuda::std::_If; + + using WarpExchangeOffsets = WarpExchange; + using WarpExchangeLengths = WarpExchange; + + using WarpAggregates = LengthOffsetPair[WARPS]; + + // Shared memory type for this thread block + struct _TempStorage + { + // Aliasable storage layout + union Aliasable + { + struct ScanStorage + { + // Smem needed for discontinuity detection + typename BlockDiscontinuityT::TempStorage discontinuity; + + // Smem needed for warp-synchronous scans + typename WarpScanPairs::TempStorage warp_scan[WARPS]; + + // Smem needed for sharing warp-wide aggregates + Uninitialized warp_aggregates; + + // Smem needed for cooperative prefix callback + typename TilePrefixCallbackOpT::TempStorage prefix; + } scan_storage; + + // Smem needed for input loading + typename BlockLoadT::TempStorage load; + + // Aliasable layout needed for two-phase scatter + union ScatterAliasable + { + unsigned long long align; + WarpExchangePairsStorage exchange_pairs[ACTIVE_EXCHANGE_WARPS]; + typename WarpExchangeOffsets::TempStorage exchange_offsets[ACTIVE_EXCHANGE_WARPS]; + typename WarpExchangeLengths::TempStorage exchange_lengths[ACTIVE_EXCHANGE_WARPS]; + } scatter_aliasable; + + } aliasable; + + OffsetT tile_idx; // Shared tile index + LengthOffsetPair tile_inclusive; // Inclusive tile prefix + LengthOffsetPair tile_exclusive; // Exclusive tile prefix + }; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + _TempStorage& temp_storage; ///< Reference to temp_storage + + WrappedInputIteratorT d_in; ///< Pointer to input sequence of data items + OffsetsOutputIteratorT d_offsets_out; ///< Input run offsets + LengthsOutputIteratorT d_lengths_out; ///< Output run lengths + + EqualityOpT equality_op; ///< T equality operator + ReduceBySegmentOpT scan_op; ///< Reduce-length-by-flag scan operator + OffsetT num_items; ///< Total number of input items + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + /** + * @param[in] temp_storage + * Reference to temp_storage + * + * @param[in] d_in + * Pointer to input sequence of data items + * + * @param[out] d_offsets_out + * Pointer to output sequence of run offsets + * + * @param[out] d_lengths_out + * Pointer to output sequence of run lengths + * + * @param[in] equality_op + * Equality operator + * + * @param[in] num_items + * Total number of input items + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentRle( + TempStorage& temp_storage, + InputIteratorT d_in, + OffsetsOutputIteratorT d_offsets_out, + LengthsOutputIteratorT d_lengths_out, + EqualityOpT equality_op, + OffsetT num_items) + : temp_storage(temp_storage.Alias()) + , d_in(d_in) + , d_offsets_out(d_offsets_out) + , d_lengths_out(d_lengths_out) + , equality_op(equality_op) + , scan_op(::cuda::std::plus<>{}) + , num_items(num_items) + {} + + //--------------------------------------------------------------------- + // Utility methods for initializing the selections + //--------------------------------------------------------------------- + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InitializeSelections( + OffsetT tile_offset, + OffsetT num_remaining, + T (&items)[ITEMS_PER_THREAD], + LengthOffsetPair (&lengths_and_num_runs)[ITEMS_PER_THREAD]) + { + bool head_flags[ITEMS_PER_THREAD]; + bool tail_flags[ITEMS_PER_THREAD]; + + OobInequalityOp inequality_op(num_remaining, equality_op); + + if (FIRST_TILE && LAST_TILE) + { + // First-and-last-tile always head-flags the first item and tail-flags the last item + + BlockDiscontinuityT(temp_storage.aliasable.scan_storage.discontinuity) + .FlagHeadsAndTails(head_flags, tail_flags, items, inequality_op); + } + else if (FIRST_TILE) + { + // First-tile always head-flags the first item + + // Get the first item from the next tile + T tile_successor_item; + if (threadIdx.x == BLOCK_THREADS - 1) + { + tile_successor_item = d_in[tile_offset + TILE_ITEMS]; + } + + BlockDiscontinuityT(temp_storage.aliasable.scan_storage.discontinuity) + .FlagHeadsAndTails(head_flags, tail_flags, tile_successor_item, items, inequality_op); + } + else if (LAST_TILE) + { + // Last-tile always flags the last item + + // Get the last item from the previous tile + T tile_predecessor_item; + if (threadIdx.x == 0) + { + tile_predecessor_item = d_in[tile_offset - 1]; + } + + BlockDiscontinuityT(temp_storage.aliasable.scan_storage.discontinuity) + .FlagHeadsAndTails(head_flags, tile_predecessor_item, tail_flags, items, inequality_op); + } + else + { + // Get the first item from the next tile + T tile_successor_item; + if (threadIdx.x == BLOCK_THREADS - 1) + { + tile_successor_item = d_in[tile_offset + TILE_ITEMS]; + } + + // Get the last item from the previous tile + T tile_predecessor_item; + if (threadIdx.x == 0) + { + tile_predecessor_item = d_in[tile_offset - 1]; + } + + BlockDiscontinuityT(temp_storage.aliasable.scan_storage.discontinuity) + .FlagHeadsAndTails(head_flags, tile_predecessor_item, tail_flags, tile_successor_item, items, inequality_op); + } + +// Zip counts and runs +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + // input output + // items [ 0 0 0 1 2 3 3 ] + // heads [ 1 0 0 1 1 1 0 ] + // tails [ 0 0 1 1 1 0 1 ] + // key [ 1 0 0 0 0 1 0 ] head && !tail - heads of non-trivial (length > 1) runs + // value [ 1 1 1 0 0 1 1 ] !head || !tail - elements of non-trivial runs + lengths_and_num_runs[ITEM].key = head_flags[ITEM] && (!tail_flags[ITEM]); + lengths_and_num_runs[ITEM].value = ((!head_flags[ITEM]) || (!tail_flags[ITEM])); + } + } + + //--------------------------------------------------------------------- + // Scan utility methods + //--------------------------------------------------------------------- + + /** + * Scan of allocations + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void WarpScanAllocations( + LengthOffsetPair& tile_aggregate, + LengthOffsetPair& warp_aggregate, + LengthOffsetPair& warp_exclusive_in_tile, + LengthOffsetPair& thread_exclusive_in_warp, + LengthOffsetPair (&lengths_and_num_runs)[ITEMS_PER_THREAD]) + { + // Perform warpscans + unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS); + int lane_id = ::cuda::ptx::get_sreg_laneid(); + + LengthOffsetPair identity; + identity.key = 0; + identity.value = 0; + + LengthOffsetPair thread_inclusive; + + // `thread_exclusive_in_warp.key`: + // number of non-trivial runs starts in previous threads + // `thread_exclusive_in_warp.val`: + // number of items in the last non-trivial run in previous threads + + // `thread_aggregate.key`: + // number of non-trivial runs starts in this thread + // `thread_aggregate.val`: + // number of items in the last non-trivial run in this thread + LengthOffsetPair thread_aggregate = cub::ThreadReduce(lengths_and_num_runs, scan_op); + WarpScanPairs(temp_storage.aliasable.scan_storage.warp_scan[warp_id]) + .Scan(thread_aggregate, thread_inclusive, thread_exclusive_in_warp, identity, scan_op); + + // `thread_inclusive.key`: + // number of non-trivial runs starts in this and previous warp threads + // `thread_inclusive.val`: + // number of items in the last non-trivial run in this or previous warp threads + + // Last lane in each warp shares its warp-aggregate + if (lane_id == WARP_THREADS - 1) + { + // `temp_storage.aliasable.scan_storage.warp_aggregates[warp_id].key`: + // number of non-trivial runs starts in this warp + // `temp_storage.aliasable.scan_storage.warp_aggregates[warp_id].val`: + // number of items in the last non-trivial run in this warp + temp_storage.aliasable.scan_storage.warp_aggregates.Alias()[warp_id] = thread_inclusive; + } + + __syncthreads(); + + // Accumulate total selected and the warp-wide prefix + + // `warp_exclusive_in_tile.key`: + // number of non-trivial runs starts in previous warps + // `warp_exclusive_in_tile.val`: + // number of items in the last non-trivial run in previous warps + warp_exclusive_in_tile = identity; + warp_aggregate = temp_storage.aliasable.scan_storage.warp_aggregates.Alias()[warp_id]; + + // `tile_aggregate.key`: + // number of non-trivial runs starts in this CTA + // `tile_aggregate.val`: + // number of items in the last non-trivial run in this CTA + tile_aggregate = temp_storage.aliasable.scan_storage.warp_aggregates.Alias()[0]; + +#pragma unroll + for (int WARP = 1; WARP < WARPS; ++WARP) + { + if (warp_id == WARP) + { + warp_exclusive_in_tile = tile_aggregate; + } + + tile_aggregate = scan_op(tile_aggregate, temp_storage.aliasable.scan_storage.warp_aggregates.Alias()[WARP]); + } + + // Ensure all threads have read warp aggregates before temp_storage is repurposed in the + // subsequent scatter stage + __syncthreads(); + } + + //--------------------------------------------------------------------- + // Utility methods for scattering selections + //--------------------------------------------------------------------- + + /** + * Two-phase scatter, specialized for warp time-slicing + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterTwoPhase( + OffsetT tile_num_runs_exclusive_in_global, + OffsetT warp_num_runs_aggregate, + OffsetT warp_num_runs_exclusive_in_tile, + OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD], + LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD], + Int2Type is_warp_time_slice) + { + unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS); + int lane_id = ::cuda::ptx::get_sreg_laneid(); + + // Locally compact items within the warp (first warp) + if (warp_id == 0) + { + WarpExchangePairs(temp_storage.aliasable.scatter_aliasable.exchange_pairs[0]) + .ScatterToStriped(lengths_and_offsets, thread_num_runs_exclusive_in_warp); + } + +// Locally compact items within the warp (remaining warps) +#pragma unroll + for (int SLICE = 1; SLICE < WARPS; ++SLICE) + { + __syncthreads(); + + if (warp_id == SLICE) + { + WarpExchangePairs(temp_storage.aliasable.scatter_aliasable.exchange_pairs[0]) + .ScatterToStriped(lengths_and_offsets, thread_num_runs_exclusive_in_warp); + } + } + +// Global scatter +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + // warp_num_runs_aggregate - number of non-trivial runs starts in current warp + if ((ITEM * WARP_THREADS) < warp_num_runs_aggregate - lane_id) + { + OffsetT item_offset = + tile_num_runs_exclusive_in_global + warp_num_runs_exclusive_in_tile + (ITEM * WARP_THREADS) + lane_id; + + // Scatter offset + d_offsets_out[item_offset] = lengths_and_offsets[ITEM].key; + + // Scatter length if not the first (global) length + if ((ITEM != 0) || (item_offset > 0)) + { + d_lengths_out[item_offset - 1] = lengths_and_offsets[ITEM].value; + } + } + } + } + + /** + * Two-phase scatter + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterTwoPhase( + OffsetT tile_num_runs_exclusive_in_global, + OffsetT warp_num_runs_aggregate, + OffsetT warp_num_runs_exclusive_in_tile, + OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD], + LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD], + Int2Type is_warp_time_slice) + { + unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS); + int lane_id = ::cuda::ptx::get_sreg_laneid(); + + // Unzip + OffsetT run_offsets[ITEMS_PER_THREAD]; + LengthT run_lengths[ITEMS_PER_THREAD]; + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + run_offsets[ITEM] = lengths_and_offsets[ITEM].key; + run_lengths[ITEM] = lengths_and_offsets[ITEM].value; + } + + WarpExchangeOffsets(temp_storage.aliasable.scatter_aliasable.exchange_offsets[warp_id]) + .ScatterToStriped(run_offsets, thread_num_runs_exclusive_in_warp); + + __syncwarp(0xffffffff); + + WarpExchangeLengths(temp_storage.aliasable.scatter_aliasable.exchange_lengths[warp_id]) + .ScatterToStriped(run_lengths, thread_num_runs_exclusive_in_warp); + +// Global scatter +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + if ((ITEM * WARP_THREADS) + lane_id < warp_num_runs_aggregate) + { + OffsetT item_offset = + tile_num_runs_exclusive_in_global + warp_num_runs_exclusive_in_tile + (ITEM * WARP_THREADS) + lane_id; + + // Scatter offset + d_offsets_out[item_offset] = run_offsets[ITEM]; + + // Scatter length if not the first (global) length + if ((ITEM != 0) || (item_offset > 0)) + { + d_lengths_out[item_offset - 1] = run_lengths[ITEM]; + } + } + } + } + + /** + * Direct scatter + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterDirect( + OffsetT tile_num_runs_exclusive_in_global, + OffsetT warp_num_runs_aggregate, + OffsetT warp_num_runs_exclusive_in_tile, + OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD], + LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD]) + { +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + if (thread_num_runs_exclusive_in_warp[ITEM] < warp_num_runs_aggregate) + { + OffsetT item_offset = + tile_num_runs_exclusive_in_global + warp_num_runs_exclusive_in_tile + thread_num_runs_exclusive_in_warp[ITEM]; + + // Scatter offset + d_offsets_out[item_offset] = lengths_and_offsets[ITEM].key; + + // Scatter length if not the first (global) length + if (item_offset > 0) + { + d_lengths_out[item_offset - 1] = lengths_and_offsets[ITEM].value; + } + } + } + } + + /** + * Scatter + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Scatter( + OffsetT tile_num_runs_aggregate, + OffsetT tile_num_runs_exclusive_in_global, + OffsetT warp_num_runs_aggregate, + OffsetT warp_num_runs_exclusive_in_tile, + OffsetT (&thread_num_runs_exclusive_in_warp)[ITEMS_PER_THREAD], + LengthOffsetPair (&lengths_and_offsets)[ITEMS_PER_THREAD]) + { + if ((ITEMS_PER_THREAD == 1) || (tile_num_runs_aggregate < BLOCK_THREADS)) + { + // Direct scatter if the warp has any items + if (warp_num_runs_aggregate) + { + ScatterDirect( + tile_num_runs_exclusive_in_global, + warp_num_runs_aggregate, + warp_num_runs_exclusive_in_tile, + thread_num_runs_exclusive_in_warp, + lengths_and_offsets); + } + } + else + { + // Scatter two phase + ScatterTwoPhase( + tile_num_runs_exclusive_in_global, + warp_num_runs_aggregate, + warp_num_runs_exclusive_in_tile, + thread_num_runs_exclusive_in_warp, + lengths_and_offsets, + Int2Type()); + } + } + + //--------------------------------------------------------------------- + // Cooperatively scan a device-wide sequence of tiles with other CTAs + //--------------------------------------------------------------------- + + /** + * @brief Process a tile of input (dynamic chained scan) + * + * @param num_items + * Total number of global input items + * + * @param num_remaining + * Number of global input items remaining (including this tile) + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param &tile_status + * Global list of tile status + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE LengthOffsetPair + ConsumeTile(OffsetT num_items, OffsetT num_remaining, int tile_idx, OffsetT tile_offset, ScanTileStateT& tile_status) + { + if (tile_idx == 0) + { + // First tile + + // Load items + T items[ITEMS_PER_THREAD]; + if (LAST_TILE) + { + BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items, num_remaining, T()); + } + else + { + BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items); + } + + if (SYNC_AFTER_LOAD) + { + __syncthreads(); + } + + // Set flags + LengthOffsetPair lengths_and_num_runs[ITEMS_PER_THREAD]; + + InitializeSelections(tile_offset, num_remaining, items, lengths_and_num_runs); + + // Exclusive scan of lengths and runs + LengthOffsetPair tile_aggregate; + LengthOffsetPair warp_aggregate; + LengthOffsetPair warp_exclusive_in_tile; + LengthOffsetPair thread_exclusive_in_warp; + + WarpScanAllocations( + tile_aggregate, warp_aggregate, warp_exclusive_in_tile, thread_exclusive_in_warp, lengths_and_num_runs); + + // Update tile status if this is not the last tile + if (!LAST_TILE && (threadIdx.x == 0)) + { + tile_status.SetInclusive(0, tile_aggregate); + } + + // Update thread_exclusive_in_warp to fold in warp run-length + if (thread_exclusive_in_warp.key == 0) + { + // If there are no non-trivial runs starts in the previous warp threads, then + // `thread_exclusive_in_warp.val` denotes the number of items in the last + // non-trivial run of the previous CTA threads, so the better name for it is + // `thread_exclusive_in_tile`. + thread_exclusive_in_warp.value += warp_exclusive_in_tile.value; + } + + LengthOffsetPair lengths_and_offsets[ITEMS_PER_THREAD]; + OffsetT thread_num_runs_exclusive_in_warp[ITEMS_PER_THREAD]; + LengthOffsetPair lengths_and_num_runs2[ITEMS_PER_THREAD]; + + // Downsweep scan through lengths_and_num_runs + internal::ThreadScanExclusive(lengths_and_num_runs, lengths_and_num_runs2, scan_op, thread_exclusive_in_warp); + + // Zip + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + lengths_and_offsets[ITEM].value = lengths_and_num_runs2[ITEM].value; + lengths_and_offsets[ITEM].key = tile_offset + (threadIdx.x * ITEMS_PER_THREAD) + ITEM; + thread_num_runs_exclusive_in_warp[ITEM] = + (lengths_and_num_runs[ITEM].key) ? lengths_and_num_runs2[ITEM].key : // keep + WARP_THREADS * ITEMS_PER_THREAD; // discard + } + + OffsetT tile_num_runs_aggregate = tile_aggregate.key; + OffsetT tile_num_runs_exclusive_in_global = 0; + OffsetT warp_num_runs_aggregate = warp_aggregate.key; + OffsetT warp_num_runs_exclusive_in_tile = warp_exclusive_in_tile.key; + + // Scatter + Scatter( + tile_num_runs_aggregate, + tile_num_runs_exclusive_in_global, + warp_num_runs_aggregate, + warp_num_runs_exclusive_in_tile, + thread_num_runs_exclusive_in_warp, + lengths_and_offsets); + + // Return running total (inclusive of this tile) + return tile_aggregate; + } + else + { + // Not first tile + + // Load items + T items[ITEMS_PER_THREAD]; + if (LAST_TILE) + { + BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items, num_remaining, T()); + } + else + { + BlockLoadT(temp_storage.aliasable.load).Load(d_in + tile_offset, items); + } + + if (SYNC_AFTER_LOAD) + { + __syncthreads(); + } + + // Set flags + LengthOffsetPair lengths_and_num_runs[ITEMS_PER_THREAD]; + + InitializeSelections(tile_offset, num_remaining, items, lengths_and_num_runs); + + // Exclusive scan of lengths and runs + LengthOffsetPair tile_aggregate; + LengthOffsetPair warp_aggregate; + LengthOffsetPair warp_exclusive_in_tile; + LengthOffsetPair thread_exclusive_in_warp; + + WarpScanAllocations( + tile_aggregate, warp_aggregate, warp_exclusive_in_tile, thread_exclusive_in_warp, lengths_and_num_runs); + + // First warp computes tile prefix in lane 0 + TilePrefixCallbackOpT prefix_op( + tile_status, temp_storage.aliasable.scan_storage.prefix, ::cuda::std::plus<>{}, tile_idx); + unsigned int warp_id = ((WARPS == 1) ? 0 : threadIdx.x / WARP_THREADS); + if (warp_id == 0) + { + prefix_op(tile_aggregate); + if (threadIdx.x == 0) + { + temp_storage.tile_exclusive = prefix_op.exclusive_prefix; + } + } + + __syncthreads(); + + LengthOffsetPair tile_exclusive_in_global = temp_storage.tile_exclusive; + + // Update thread_exclusive_in_warp to fold in warp and tile run-lengths + LengthOffsetPair thread_exclusive = scan_op(tile_exclusive_in_global, warp_exclusive_in_tile); + if (thread_exclusive_in_warp.key == 0) + { + // If there are no non-trivial runs starts in the previous warp threads, then + // `thread_exclusive_in_warp.val` denotes the number of items in the last + // non-trivial run of the previous grid threads, so the better name for it is + // `thread_exclusive_in_grid`. + thread_exclusive_in_warp.value += thread_exclusive.value; + } + + // Downsweep scan through lengths_and_num_runs + + // `lengths_and_num_runs2.key`: + // number of non-trivial runs starts in previous grid threads + // `lengths_and_num_runs2.val`: + // number of items in the last non-trivial run in previous grid threads + LengthOffsetPair lengths_and_num_runs2[ITEMS_PER_THREAD]; + + // `lengths_and_offsets.key`: + // offset to the item in the input sequence + // `lengths_and_offsets.val`: + // number of items in the last non-trivial run in previous grid threads + LengthOffsetPair lengths_and_offsets[ITEMS_PER_THREAD]; + OffsetT thread_num_runs_exclusive_in_warp[ITEMS_PER_THREAD]; + + internal::ThreadScanExclusive(lengths_and_num_runs, lengths_and_num_runs2, scan_op, thread_exclusive_in_warp); + +// Zip +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + lengths_and_offsets[ITEM].value = lengths_and_num_runs2[ITEM].value; + lengths_and_offsets[ITEM].key = tile_offset + (threadIdx.x * ITEMS_PER_THREAD) + ITEM; + thread_num_runs_exclusive_in_warp[ITEM] = + (lengths_and_num_runs[ITEM].key) ? lengths_and_num_runs2[ITEM].key : // keep + WARP_THREADS * ITEMS_PER_THREAD; // discard + } + + OffsetT tile_num_runs_aggregate = tile_aggregate.key; + OffsetT tile_num_runs_exclusive_in_global = tile_exclusive_in_global.key; + OffsetT warp_num_runs_aggregate = warp_aggregate.key; + OffsetT warp_num_runs_exclusive_in_tile = warp_exclusive_in_tile.key; + + // Scatter + Scatter( + tile_num_runs_aggregate, + tile_num_runs_exclusive_in_global, + warp_num_runs_aggregate, + warp_num_runs_exclusive_in_tile, + thread_num_runs_exclusive_in_warp, + lengths_and_offsets); + + // Return running total (inclusive of this tile) + return prefix_op.inclusive_prefix; + } + } + + /** + * @brief Scan tiles of items as part of a dynamic chained scan + * + * @param num_tiles + * Total number of input tiles + * + * @param tile_status + * Global list of tile status + * + * @param d_num_runs_out + * Output pointer for total number of runs identified + * + * @tparam NumRunsIteratorT + * Output iterator type for recording number of items selected + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeRange(int num_tiles, ScanTileStateT& tile_status, NumRunsIteratorT d_num_runs_out) + { + // Blocks are launched in increasing order, so just assign one tile per block + int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index + OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile + OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile) + + if (tile_idx < num_tiles - 1) + { + // Not the last tile (full) + ConsumeTile(num_items, num_remaining, tile_idx, tile_offset, tile_status); + } + else if (num_remaining > 0) + { + // The last tile (possibly partially-full) + LengthOffsetPair running_total = ConsumeTile(num_items, num_remaining, tile_idx, tile_offset, tile_status); + + if (threadIdx.x == 0) + { + // Output the total number of items selected + *d_num_runs_out = running_total.key; + + // The inclusive prefix contains accumulated length reduction for the last run + if (running_total.key > 0) + { + d_lengths_out[running_total.key - 1] = running_total.value; + } + } + } + } +}; + +} // namespace rle +} // namespace detail + +template +using AgentRle CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public interface " + "will be removed.") = detail::rle:: + AgentRle; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_scan.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_scan.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c7b518b0c65adf74d4fcf2bc637e850366ba4e5a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_scan.cuh @@ -0,0 +1,607 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file cub::AgentScan implements a stateful abstraction of CUDA thread blocks + * for participating in device-wide prefix scan . + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * @brief Parameterizable tuning policy type for AgentScan + * + * @tparam NOMINAL_BLOCK_THREADS_4B + * Threads per thread block + * + * @tparam NOMINAL_ITEMS_PER_THREAD_4B + * Items per thread (per tile of input) + * + * @tparam ComputeT + * Dominant compute type + * + * @tparam _LOAD_ALGORITHM + * The BlockLoad algorithm to use + * + * @tparam _LOAD_MODIFIER + * Cache load modifier for reading input elements + * + * @tparam _STORE_ALGORITHM + * The BlockStore algorithm to use + * + * @tparam _SCAN_ALGORITHM + * The BlockScan algorithm to use + * + * @tparam DelayConstructorT + * Implementation detail, do not specify directly, requirements on the + * content of this type are subject to breaking change. + */ +template , + typename DelayConstructorT = detail::default_delay_constructor_t> +struct AgentScanPolicy : ScalingType +{ + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + static constexpr BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; + + struct detail + { + using delay_constructor_t = DelayConstructorT; + }; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace scan +{ + +/** + * @brief AgentScan implements a stateful abstraction of CUDA thread blocks for + * participating in device-wide prefix scan. + * @tparam AgentScanPolicyT + * Parameterized AgentScanPolicyT tuning policy type + * + * @tparam InputIteratorT + * Random-access input iterator type + * + * @tparam OutputIteratorT + * Random-access output iterator type + * + * @tparam ScanOpT + * Scan functor type + * + * @tparam InitValueT + * The init_value element for ScanOpT type (cub::NullType for inclusive scan) + * + * @tparam OffsetT + * Signed integer type for global offsets + * + * @tparam AccumT + * The type of intermediate accumulator (according to P2322R6) + */ +template +struct AgentScan +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + // The input value type + using InputT = cub::detail::value_t; + + // Tile status descriptor interface type + using ScanTileStateT = ScanTileState; + + // Input iterator wrapper type (for applying cache modifier) + // Wrap the native input pointer with CacheModifiedInputIterator + // or directly use the supplied input iterator type + using WrappedInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + InputIteratorT>; + + // Constants + enum + { + // Inclusive scan if no init_value type is provided + HAS_INIT = !std::is_same::value, + IS_INCLUSIVE = ForceInclusive || !HAS_INIT, // We are relying on either initial value not being `NullType` + // or the ForceInclusive tag to be true for inclusive scan + // to get picked up. + BLOCK_THREADS = AgentScanPolicyT::BLOCK_THREADS, + ITEMS_PER_THREAD = AgentScanPolicyT::ITEMS_PER_THREAD, + TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, + }; + + // Parameterized BlockLoad type + using BlockLoadT = + BlockLoad; + + // Parameterized BlockStore type + using BlockStoreT = + BlockStore; + + // Parameterized BlockScan type + using BlockScanT = BlockScan; + + // Callback type for obtaining tile prefix during block scan + using DelayConstructorT = typename AgentScanPolicyT::detail::delay_constructor_t; + using TilePrefixCallbackOpT = TilePrefixCallbackOp; + + // Stateful BlockScan prefix callback type for managing a running total while + // scanning consecutive tiles + using RunningPrefixCallbackOp = BlockScanRunningPrefixOp; + + // Shared memory type for this thread block + union _TempStorage + { + // Smem needed for tile loading + typename BlockLoadT::TempStorage load; + + // Smem needed for tile storing + typename BlockStoreT::TempStorage store; + + struct ScanStorage + { + // Smem needed for cooperative prefix callback + typename TilePrefixCallbackOpT::TempStorage prefix; + + // Smem needed for tile scanning + typename BlockScanT::TempStorage scan; + } scan_storage; + }; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + _TempStorage& temp_storage; ///< Reference to temp_storage + WrappedInputIteratorT d_in; ///< Input data + OutputIteratorT d_out; ///< Output data + ScanOpT scan_op; ///< Binary scan operator + InitValueT init_value; ///< The init_value element for ScanOpT + + //--------------------------------------------------------------------- + // Block scan utility methods + //--------------------------------------------------------------------- + + /** + * Exclusive scan specialization (first tile) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTile( + AccumT (&items)[ITEMS_PER_THREAD], + AccumT init_value, + ScanOpT scan_op, + AccumT& block_aggregate, + Int2Type /*is_inclusive*/) + { + BlockScanT(temp_storage.scan_storage.scan).ExclusiveScan(items, items, init_value, scan_op, block_aggregate); + block_aggregate = scan_op(init_value, block_aggregate); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTileInclusive( + AccumT (&items)[ITEMS_PER_THREAD], + AccumT init_value, + ScanOpT scan_op, + AccumT& block_aggregate, + Int2Type /*has_init*/) + { + BlockScanT(temp_storage.scan_storage.scan).InclusiveScan(items, items, init_value, scan_op, block_aggregate); + block_aggregate = scan_op(init_value, block_aggregate); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTileInclusive( + AccumT (&items)[ITEMS_PER_THREAD], + InitValueT /*init_value*/, + ScanOpT scan_op, + AccumT& block_aggregate, + Int2Type /*has_init*/) + + { + BlockScanT(temp_storage.scan_storage.scan).InclusiveScan(items, items, scan_op, block_aggregate); + } + + /** + * Inclusive scan specialization (first tile) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTile( + AccumT (&items)[ITEMS_PER_THREAD], + InitValueT init_value, + ScanOpT scan_op, + AccumT& block_aggregate, + Int2Type /*is_inclusive*/) + { + ScanTileInclusive(items, init_value, scan_op, block_aggregate, Int2Type()); + } + + /** + * Exclusive scan specialization (subsequent tiles) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTile( + AccumT (&items)[ITEMS_PER_THREAD], ScanOpT scan_op, PrefixCallback& prefix_op, Int2Type /*is_inclusive*/) + { + BlockScanT(temp_storage.scan_storage.scan).ExclusiveScan(items, items, scan_op, prefix_op); + } + + /** + * Inclusive scan specialization (subsequent tiles) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTile( + AccumT (&items)[ITEMS_PER_THREAD], ScanOpT scan_op, PrefixCallback& prefix_op, Int2Type /*is_inclusive*/) + { + BlockScanT(temp_storage.scan_storage.scan).InclusiveScan(items, items, scan_op, prefix_op); + } + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + /** + * @param temp_storage + * Reference to temp_storage + * + * @param d_in + * Input data + * + * @param d_out + * Output data + * + * @param scan_op + * Binary scan operator + * + * @param init_value + * Initial value to seed the exclusive scan + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentScan( + TempStorage& temp_storage, InputIteratorT d_in, OutputIteratorT d_out, ScanOpT scan_op, InitValueT init_value) + : temp_storage(temp_storage.Alias()) + , d_in(d_in) + , d_out(d_out) + , scan_op(scan_op) + , init_value(init_value) + {} + + //--------------------------------------------------------------------- + // Cooperatively scan a device-wide sequence of tiles with other CTAs + //--------------------------------------------------------------------- + + /** + * Process a tile of input (dynamic chained scan) + * @tparam IS_LAST_TILE + * Whether the current tile is the last tile + * + * @param num_remaining + * Number of global input items remaining (including this tile) + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param tile_state + * Global tile state descriptor + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeTile(OffsetT num_remaining, int tile_idx, OffsetT tile_offset, ScanTileStateT& tile_state) + { + // Load items + AccumT items[ITEMS_PER_THREAD]; + + if (IS_LAST_TILE) + { + // Fill last element with the first element because collectives are + // not suffix guarded. + BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items, num_remaining, *(d_in + tile_offset)); + } + else + { + BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items); + } + + __syncthreads(); + + // Perform tile scan + if (tile_idx == 0) + { + // Scan first tile + AccumT block_aggregate; + ScanTile(items, init_value, scan_op, block_aggregate, Int2Type()); + + if ((!IS_LAST_TILE) && (threadIdx.x == 0)) + { + tile_state.SetInclusive(0, block_aggregate); + } + } + else + { + // Scan non-first tile + TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.scan_storage.prefix, scan_op, tile_idx); + ScanTile(items, scan_op, prefix_op, Int2Type()); + } + + __syncthreads(); + + // Store items + if (IS_LAST_TILE) + { + BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items, num_remaining); + } + else + { + BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items); + } + } + + /** + * @brief Scan tiles of items as part of a dynamic chained scan + * + * @param num_items + * Total number of input items + * + * @param tile_state + * Global tile state descriptor + * + * @param start_tile + * The starting tile for the current grid + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeRange(OffsetT num_items, ScanTileStateT& tile_state, int start_tile) + { + // Blocks are launched in increasing order, so just assign one tile per + // block + + // Current tile index + int tile_idx = start_tile + blockIdx.x; + + // Global offset for the current tile + OffsetT tile_offset = OffsetT(TILE_ITEMS) * tile_idx; + + // Remaining items (including this tile) + OffsetT num_remaining = num_items - tile_offset; + + if (num_remaining > TILE_ITEMS) + { + // Not last tile + ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); + } + else if (num_remaining > 0) + { + // Last tile + ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); + } + } + + //--------------------------------------------------------------------------- + // Scan an sequence of consecutive tiles (independent of other thread blocks) + //--------------------------------------------------------------------------- + + /** + * @brief Process a tile of input + * + * @param tile_offset + * Tile offset + * + * @param prefix_op + * Running prefix operator + * + * @param valid_items + * Number of valid items in the tile + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeTile(OffsetT tile_offset, RunningPrefixCallbackOp& prefix_op, int valid_items = TILE_ITEMS) + { + // Load items + AccumT items[ITEMS_PER_THREAD]; + + if (IS_LAST_TILE) + { + // Fill last element with the first element because collectives are + // not suffix guarded. + BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items, valid_items, *(d_in + tile_offset)); + } + else + { + BlockLoadT(temp_storage.load).Load(d_in + tile_offset, items); + } + + __syncthreads(); + + // Block scan + if (IS_FIRST_TILE) + { + AccumT block_aggregate; + ScanTile(items, init_value, scan_op, block_aggregate, Int2Type()); + prefix_op.running_total = block_aggregate; + } + else + { + ScanTile(items, scan_op, prefix_op, Int2Type()); + } + + __syncthreads(); + + // Store items + if (IS_LAST_TILE) + { + BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items, valid_items); + } + else + { + BlockStoreT(temp_storage.store).Store(d_out + tile_offset, items); + } + } + + /** + * @brief Scan a consecutive share of input tiles + * + * @param[in] range_offset + * Threadblock begin offset (inclusive) + * + * @param[in] range_end + * Threadblock end offset (exclusive) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeRange(OffsetT range_offset, OffsetT range_end) + { + BlockScanRunningPrefixOp prefix_op(scan_op); + + if (range_offset + TILE_ITEMS <= range_end) + { + // Consume first tile of input (full) + ConsumeTile(range_offset, prefix_op); + range_offset += TILE_ITEMS; + + // Consume subsequent full tiles of input + while (range_offset + TILE_ITEMS <= range_end) + { + ConsumeTile(range_offset, prefix_op); + range_offset += TILE_ITEMS; + } + + // Consume a partially-full tile + if (range_offset < range_end) + { + int valid_items = range_end - range_offset; + ConsumeTile(range_offset, prefix_op, valid_items); + } + } + else + { + // Consume the first tile of input (partially-full) + int valid_items = range_end - range_offset; + ConsumeTile(range_offset, prefix_op, valid_items); + } + } + + /** + * @brief Scan a consecutive share of input tiles, seeded with the + * specified prefix value + * @param[in] range_offset + * Threadblock begin offset (inclusive) + * + * @param[in] range_end + * Threadblock end offset (exclusive) + * + * @param[in] prefix + * The prefix to apply to the scan segment + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeRange(OffsetT range_offset, OffsetT range_end, AccumT prefix) + { + BlockScanRunningPrefixOp prefix_op(prefix, scan_op); + + // Consume full tiles of input + while (range_offset + TILE_ITEMS <= range_end) + { + ConsumeTile(range_offset, prefix_op); + range_offset += TILE_ITEMS; + } + + // Consume a partially-full tile + if (range_offset < range_end) + { + int valid_items = range_end - range_offset; + ConsumeTile(range_offset, prefix_op, valid_items); + } + } +}; + +} // namespace scan +} // namespace detail + +template +using AgentScan CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public interface " + "will be removed.") = detail::scan:: + AgentScan; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_scan_by_key.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_scan_by_key.cuh new file mode 100644 index 0000000000000000000000000000000000000000..722a44ac0746fd9bffb7a20a13c548039c15ef7e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_scan_by_key.cuh @@ -0,0 +1,493 @@ +/****************************************************************************** + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file AgentScanByKey implements a stateful abstraction of CUDA thread blocks + * for participating in device-wide prefix scan by key. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * Parameterizable tuning policy type for AgentScanByKey + * + * @tparam DelayConstructorT + * Implementation detail, do not specify directly, requirements on the + * content of this type are subject to breaking change. + */ +template > +struct AgentScanByKeyPolicy +{ + static constexpr int BLOCK_THREADS = _BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = _ITEMS_PER_THREAD; + + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; + static constexpr BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; + + struct detail + { + using delay_constructor_t = DelayConstructorT; + }; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace scan_by_key +{ + +/** + * @brief AgentScanByKey implements a stateful abstraction of CUDA thread + * blocks for participating in device-wide prefix scan by key. + * + * @tparam AgentScanByKeyPolicyT + * Parameterized AgentScanPolicyT tuning policy type + * + * @tparam KeysInputIteratorT + * Random-access input iterator type + * + * @tparam ValuesInputIteratorT + * Random-access input iterator type + * + * @tparam ValuesOutputIteratorT + * Random-access output iterator type + * + * @tparam EqualityOp + * Equality functor type + * + * @tparam ScanOpT + * Scan functor type + * + * @tparam InitValueT + * The init_value element for ScanOpT type (cub::NullType for inclusive scan) + * + * @tparam OffsetT + * Signed integer type for global offsets + * + * @tparam AccumT + * The type of intermediate accumulator (according to P2322R6) + */ +template +struct AgentScanByKey +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + using KeyT = value_t; + using InputT = value_t; + using FlagValuePairT = KeyValuePair; + using ReduceBySegmentOpT = ScanBySegmentOp; + + using ScanTileStateT = ReduceByKeyScanTileState; + + // Constants + // Inclusive scan if no init_value type is provided + static constexpr int IS_INCLUSIVE = std::is_same::value; + static constexpr int BLOCK_THREADS = AgentScanByKeyPolicyT::BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = AgentScanByKeyPolicyT::ITEMS_PER_THREAD; + static constexpr int ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD; + + using WrappedKeysInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + KeysInputIteratorT>; + + using WrappedValuesInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + ValuesInputIteratorT>; + + using BlockLoadKeysT = BlockLoad; + + using BlockLoadValuesT = BlockLoad; + + using BlockStoreValuesT = BlockStore; + + using BlockDiscontinuityKeysT = BlockDiscontinuity; + + using DelayConstructorT = typename AgentScanByKeyPolicyT::detail::delay_constructor_t; + using TilePrefixCallbackT = + TilePrefixCallbackOp; + + using BlockScanT = BlockScan; + + union TempStorage_ + { + struct ScanStorage + { + typename BlockScanT::TempStorage scan; + typename TilePrefixCallbackT::TempStorage prefix; + typename BlockDiscontinuityKeysT::TempStorage discontinuity; + } scan_storage; + + typename BlockLoadKeysT::TempStorage load_keys; + typename BlockLoadValuesT::TempStorage load_values; + typename BlockStoreValuesT::TempStorage store_values; + }; + + struct TempStorage : cub::Uninitialized + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + TempStorage_& storage; + WrappedKeysInputIteratorT d_keys_in; + KeyT* d_keys_prev_in; + WrappedValuesInputIteratorT d_values_in; + ValuesOutputIteratorT d_values_out; + InequalityWrapper inequality_op; + ScanOpT scan_op; + ReduceBySegmentOpT pair_scan_op; + InitValueT init_value; + + //--------------------------------------------------------------------- + // Block scan utility methods (first tile) + //--------------------------------------------------------------------- + + // Exclusive scan specialization + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTile( + FlagValuePairT (&scan_items)[ITEMS_PER_THREAD], FlagValuePairT& tile_aggregate, Int2Type /* is_inclusive */) + { + BlockScanT(storage.scan_storage.scan).ExclusiveScan(scan_items, scan_items, pair_scan_op, tile_aggregate); + } + + // Inclusive scan specialization + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTile( + FlagValuePairT (&scan_items)[ITEMS_PER_THREAD], FlagValuePairT& tile_aggregate, Int2Type /* is_inclusive */) + { + BlockScanT(storage.scan_storage.scan).InclusiveScan(scan_items, scan_items, pair_scan_op, tile_aggregate); + } + + //--------------------------------------------------------------------- + // Block scan utility methods (subsequent tiles) + //--------------------------------------------------------------------- + + // Exclusive scan specialization (with prefix from predecessors) + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTile( + FlagValuePairT (&scan_items)[ITEMS_PER_THREAD], + FlagValuePairT& tile_aggregate, + TilePrefixCallbackT& prefix_op, + Int2Type /* is_inclusive */) + { + BlockScanT(storage.scan_storage.scan).ExclusiveScan(scan_items, scan_items, pair_scan_op, prefix_op); + tile_aggregate = prefix_op.GetBlockAggregate(); + } + + // Inclusive scan specialization (with prefix from predecessors) + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanTile( + FlagValuePairT (&scan_items)[ITEMS_PER_THREAD], + FlagValuePairT& tile_aggregate, + TilePrefixCallbackT& prefix_op, + Int2Type /* is_inclusive */) + { + BlockScanT(storage.scan_storage.scan).InclusiveScan(scan_items, scan_items, pair_scan_op, prefix_op); + tile_aggregate = prefix_op.GetBlockAggregate(); + } + + //--------------------------------------------------------------------- + // Zip utility methods + //--------------------------------------------------------------------- + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ZipValuesAndFlags( + OffsetT num_remaining, + AccumT (&values)[ITEMS_PER_THREAD], + OffsetT (&segment_flags)[ITEMS_PER_THREAD], + FlagValuePairT (&scan_items)[ITEMS_PER_THREAD]) + { +// Zip values and segment_flags +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + // Set segment_flags for first out-of-bounds item, zero for others + if (IS_LAST_TILE && OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM == num_remaining) + { + segment_flags[ITEM] = 1; + } + + scan_items[ITEM].value = values[ITEM]; + scan_items[ITEM].key = segment_flags[ITEM]; + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void + UnzipValues(AccumT (&values)[ITEMS_PER_THREAD], FlagValuePairT (&scan_items)[ITEMS_PER_THREAD]) + { +// Unzip values and segment_flags +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + values[ITEM] = scan_items[ITEM].value; + } + } + + template ::value, typename std::enable_if::type = 0> + _CCCL_DEVICE _CCCL_FORCEINLINE void + AddInitToScan(AccumT (&items)[ITEMS_PER_THREAD], OffsetT (&flags)[ITEMS_PER_THREAD]) + { +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + items[ITEM] = flags[ITEM] ? init_value : scan_op(init_value, items[ITEM]); + } + } + + template ::value, typename std::enable_if::type = 0> + _CCCL_DEVICE _CCCL_FORCEINLINE void + AddInitToScan(AccumT (& /*items*/)[ITEMS_PER_THREAD], OffsetT (& /*flags*/)[ITEMS_PER_THREAD]) + {} + + //--------------------------------------------------------------------- + // Cooperatively scan a device-wide sequence of tiles with other CTAs + //--------------------------------------------------------------------- + + // Process a tile of input (dynamic chained scan) + // + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeTile(OffsetT /*num_items*/, OffsetT num_remaining, int tile_idx, OffsetT tile_base, ScanTileStateT& tile_state) + { + // Load items + KeyT keys[ITEMS_PER_THREAD]; + AccumT values[ITEMS_PER_THREAD]; + OffsetT segment_flags[ITEMS_PER_THREAD]; + FlagValuePairT scan_items[ITEMS_PER_THREAD]; + + if (IS_LAST_TILE) + { + // Fill last element with the first element + // because collectives are not suffix guarded + BlockLoadKeysT(storage.load_keys).Load(d_keys_in + tile_base, keys, num_remaining, *(d_keys_in + tile_base)); + } + else + { + BlockLoadKeysT(storage.load_keys).Load(d_keys_in + tile_base, keys); + } + + __syncthreads(); + + if (IS_LAST_TILE) + { + // Fill last element with the first element + // because collectives are not suffix guarded + BlockLoadValuesT(storage.load_values) + .Load(d_values_in + tile_base, values, num_remaining, *(d_values_in + tile_base)); + } + else + { + BlockLoadValuesT(storage.load_values).Load(d_values_in + tile_base, values); + } + + __syncthreads(); + + // first tile + if (tile_idx == 0) + { + BlockDiscontinuityKeysT(storage.scan_storage.discontinuity).FlagHeads(segment_flags, keys, inequality_op); + + // Zip values and segment_flags + ZipValuesAndFlags(num_remaining, values, segment_flags, scan_items); + + // Exclusive scan of values and segment_flags + FlagValuePairT tile_aggregate; + ScanTile(scan_items, tile_aggregate, Int2Type()); + + if (threadIdx.x == 0) + { + if (!IS_LAST_TILE) + { + tile_state.SetInclusive(0, tile_aggregate); + } + + scan_items[0].key = 0; + } + } + else + { + KeyT tile_pred_key = (threadIdx.x == 0) ? d_keys_prev_in[tile_idx] : KeyT(); + + BlockDiscontinuityKeysT(storage.scan_storage.discontinuity) + .FlagHeads(segment_flags, keys, inequality_op, tile_pred_key); + + // Zip values and segment_flags + ZipValuesAndFlags(num_remaining, values, segment_flags, scan_items); + + FlagValuePairT tile_aggregate; + TilePrefixCallbackT prefix_op(tile_state, storage.scan_storage.prefix, pair_scan_op, tile_idx); + ScanTile(scan_items, tile_aggregate, prefix_op, Int2Type()); + } + + __syncthreads(); + + UnzipValues(values, scan_items); + + AddInitToScan(values, segment_flags); + + // Store items + if (IS_LAST_TILE) + { + BlockStoreValuesT(storage.store_values).Store(d_values_out + tile_base, values, num_remaining); + } + else + { + BlockStoreValuesT(storage.store_values).Store(d_values_out + tile_base, values); + } + } + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + // Dequeue and scan tiles of items as part of a dynamic chained scan + // with Init functor + _CCCL_DEVICE _CCCL_FORCEINLINE AgentScanByKey( + TempStorage& storage, + KeysInputIteratorT d_keys_in, + KeyT* d_keys_prev_in, + ValuesInputIteratorT d_values_in, + ValuesOutputIteratorT d_values_out, + EqualityOp equality_op, + ScanOpT scan_op, + InitValueT init_value) + : storage(storage.Alias()) + , d_keys_in(d_keys_in) + , d_keys_prev_in(d_keys_prev_in) + , d_values_in(d_values_in) + , d_values_out(d_values_out) + , inequality_op(equality_op) + , scan_op(scan_op) + , pair_scan_op(scan_op) + , init_value(init_value) + {} + + /** + * Scan tiles of items as part of a dynamic chained scan + * + * @param num_items + * Total number of input items + * + * @param tile_state + * Global tile state descriptor + * + * start_tile + * The starting tile for the current grid + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeRange(OffsetT num_items, ScanTileStateT& tile_state, int start_tile) + { + int tile_idx = blockIdx.x; + OffsetT tile_base = OffsetT(ITEMS_PER_TILE) * tile_idx; + OffsetT num_remaining = num_items - tile_base; + + if (num_remaining > ITEMS_PER_TILE) + { + // Not the last tile (full) + ConsumeTile(num_items, num_remaining, tile_idx, tile_base, tile_state); + } + else if (num_remaining > 0) + { + // The last tile (possibly partially-full) + ConsumeTile(num_items, num_remaining, tile_idx, tile_base, tile_state); + } + } +}; + +} // namespace scan_by_key +} // namespace detail + +template +using AgentScanByKey CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::scan_by_key::AgentScanByKey< + AgentScanByKeyPolicyT, + KeysInputIteratorT, + ValuesInputIteratorT, + ValuesOutputIteratorT, + EqualityOp, + ScanOpT, + InitValueT, + OffsetT, + AccumT>; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_segment_fixup.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_segment_fixup.cuh new file mode 100644 index 0000000000000000000000000000000000000000..515b34d7c726a555abd1ccdd1d08b44d572369b2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_segment_fixup.cuh @@ -0,0 +1,495 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * cub::AgentSegmentFixup implements a stateful abstraction of CUDA thread blocks for participating in device-wide + * reduce-value-by-key. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * @brief Parameterizable tuning policy type for AgentSegmentFixup + * + * @tparam _BLOCK_THREADS + * Threads per thread block + * + * @tparam _ITEMS_PER_THREAD + * Items per thread (per tile of input) + * + * @tparam _LOAD_ALGORITHM + * The BlockLoad algorithm to use + * + * @tparam _LOAD_MODIFIER + * Cache load modifier for reading input elements + * + * @tparam _SCAN_ALGORITHM + * The BlockScan algorithm to use + */ +template +struct CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") AgentSegmentFixupPolicy +{ + enum + { + /// Threads per thread block + BLOCK_THREADS = _BLOCK_THREADS, + + /// Items per thread (per tile of input) + ITEMS_PER_THREAD = _ITEMS_PER_THREAD, + }; + + /// The BlockLoad algorithm to use + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + + /// Cache load modifier for reading input elements + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + + /// The BlockScan algorithm to use + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace segment_fixup +{ + +/** + * @brief AgentSegmentFixup implements a stateful abstraction of CUDA thread blocks for + * participating in device-wide reduce-value-by-key + * + * @tparam AgentSegmentFixupPolicyT + * Parameterized AgentSegmentFixupPolicy tuning policy type + * + * @tparam PairsInputIteratorT + * Random-access input iterator type for keys + * + * @tparam AggregatesOutputIteratorT + * Random-access output iterator type for values + * + * @tparam EqualityOpT + * KeyT equality operator type + * + * @tparam ReductionOpT + * ValueT reduction operator type + * + * @tparam OffsetT + * Signed integer type for global offsets + */ +template +struct AgentSegmentFixup +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + // Data type of key-value input iterator + using KeyValuePairT = value_t; + + // Value type + using ValueT = typename KeyValuePairT::Value; + + // Tile status descriptor interface type + using ScanTileStateT = ReduceByKeyScanTileState; + + // Constants + enum + { + BLOCK_THREADS = AgentSegmentFixupPolicyT::BLOCK_THREADS, + ITEMS_PER_THREAD = AgentSegmentFixupPolicyT::ITEMS_PER_THREAD, + TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, + + // Whether or not do fixup using RLE + global atomics + USE_ATOMIC_FIXUP = + (std::is_same::value || std::is_same::value + || std::is_same::value || std::is_same::value), + + // Whether or not the scan operation has a zero-valued identity value + // (true if we're performing addition on a primitive type) + HAS_IDENTITY_ZERO = (std::is_same>::value) && (Traits::PRIMITIVE), + }; + + // Cache-modified Input iterator wrapper type (for applying cache modifier) for keys + // Wrap the native input pointer with CacheModifiedValuesInputIterator + // or directly use the supplied input iterator type + using WrappedPairsInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + PairsInputIteratorT>; + + // Cache-modified Input iterator wrapper type (for applying cache modifier) for fixup values + // Wrap the native input pointer with CacheModifiedValuesInputIterator + // or directly use the supplied input iterator type + using WrappedFixupInputIteratorT = + ::cuda::std::_If::value, + CacheModifiedInputIterator, + AggregatesOutputIteratorT>; + + // Reduce-value-by-segment scan operator + using ReduceBySegmentOpT = ReduceByKeyOp<::cuda::std::plus<>>; + + // Parameterized BlockLoad type for pairs + using BlockLoadPairs = + BlockLoad; + + // Parameterized BlockScan type + using BlockScanT = BlockScan; + + // Callback type for obtaining tile prefix during block scan + using TilePrefixCallbackOpT = TilePrefixCallbackOp; + + // Shared memory type for this thread block + union _TempStorage + { + struct ScanStorage + { + // Smem needed for tile scanning + typename BlockScanT::TempStorage scan; + + // Smem needed for cooperative prefix callback + typename TilePrefixCallbackOpT::TempStorage prefix; + } scan_storage; + + // Smem needed for loading keys + typename BlockLoadPairs::TempStorage load_pairs; + }; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + _TempStorage& temp_storage; ///< Reference to temp_storage + WrappedPairsInputIteratorT d_pairs_in; ///< Input keys + AggregatesOutputIteratorT d_aggregates_out; ///< Output value aggregates + WrappedFixupInputIteratorT d_fixup_in; ///< Fixup input values + InequalityWrapper inequality_op; ///< KeyT inequality operator + ReductionOpT reduction_op; ///< Reduction operator + ReduceBySegmentOpT scan_op; ///< Reduce-by-segment scan operator + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + /** + * @param temp_storage + * Reference to temp_storage + * + * @param d_pairs_in + * Input keys + * + * @param d_aggregates_out + * Output value aggregates + * + * @param equality_op + * KeyT equality operator + * + * @param reduction_op + * ValueT reduction operator + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentSegmentFixup( + TempStorage& temp_storage, + PairsInputIteratorT d_pairs_in, + AggregatesOutputIteratorT d_aggregates_out, + EqualityOpT equality_op, + ReductionOpT reduction_op) + : temp_storage(temp_storage.Alias()) + , d_pairs_in(d_pairs_in) + , d_aggregates_out(d_aggregates_out) + , d_fixup_in(d_aggregates_out) + , inequality_op(equality_op) + , reduction_op(reduction_op) + , scan_op(reduction_op) + {} + + //--------------------------------------------------------------------- + // Cooperatively scan a device-wide sequence of tiles with other CTAs + //--------------------------------------------------------------------- + + /** + * @brief Process input tile. Specialized for atomic-fixup + * + * @param num_remaining + * Number of global input items remaining (including this tile) + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param tile_state + * Global tile state descriptor + * + * @param use_atomic_fixup + * Marker whether to use atomicAdd (instead of reduce-by-key) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTile( + OffsetT num_remaining, + int tile_idx, + OffsetT tile_offset, + ScanTileStateT& tile_state, + Int2Type use_atomic_fixup) + { + KeyValuePairT pairs[ITEMS_PER_THREAD]; + + // Load pairs + KeyValuePairT oob_pair; + oob_pair.key = -1; + + if (IS_LAST_TILE) + { + BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs, num_remaining, oob_pair); + } + else + { + BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs); + } + +// RLE +#pragma unroll + for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + ValueT* d_scatter = d_aggregates_out + pairs[ITEM - 1].key; + if (pairs[ITEM].key != pairs[ITEM - 1].key) + { + atomicAdd(d_scatter, pairs[ITEM - 1].value); + } + else + { + pairs[ITEM].value = reduction_op(pairs[ITEM - 1].value, pairs[ITEM].value); + } + } + + // Flush last item if valid + ValueT* d_scatter = d_aggregates_out + pairs[ITEMS_PER_THREAD - 1].key; + if ((!IS_LAST_TILE) || (pairs[ITEMS_PER_THREAD - 1].key >= 0)) + { + atomicAdd(d_scatter, pairs[ITEMS_PER_THREAD - 1].value); + } + } + + /** + * @brief Process input tile. Specialized for reduce-by-key fixup + * + * @param num_remaining + * Number of global input items remaining (including this tile) + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param tile_state + * Global tile state descriptor + * + * @param use_atomic_fixup + * Marker whether to use atomicAdd (instead of reduce-by-key) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeTile( + OffsetT num_remaining, + int tile_idx, + OffsetT tile_offset, + ScanTileStateT& tile_state, + Int2Type use_atomic_fixup) + { + KeyValuePairT pairs[ITEMS_PER_THREAD]; + KeyValuePairT scatter_pairs[ITEMS_PER_THREAD]; + + // Load pairs + KeyValuePairT oob_pair; + oob_pair.key = -1; + + if (IS_LAST_TILE) + { + BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs, num_remaining, oob_pair); + } + else + { + BlockLoadPairs(temp_storage.load_pairs).Load(d_pairs_in + tile_offset, pairs); + } + + __syncthreads(); + + KeyValuePairT tile_aggregate; + if (tile_idx == 0) + { + // Exclusive scan of values and segment_flags + BlockScanT(temp_storage.scan_storage.scan).ExclusiveScan(pairs, scatter_pairs, scan_op, tile_aggregate); + + // Update tile status if this is not the last tile + if (threadIdx.x == 0) + { + // Set first segment id to not trigger a flush (invalid from exclusive scan) + scatter_pairs[0].key = pairs[0].key; + + if (!IS_LAST_TILE) + { + tile_state.SetInclusive(0, tile_aggregate); + } + } + } + else + { + // Exclusive scan of values and segment_flags + TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.scan_storage.prefix, scan_op, tile_idx); + BlockScanT(temp_storage.scan_storage.scan).ExclusiveScan(pairs, scatter_pairs, scan_op, prefix_op); + tile_aggregate = prefix_op.GetBlockAggregate(); + } + +// Scatter updated values +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + if (scatter_pairs[ITEM].key != pairs[ITEM].key) + { + // Update the value at the key location + ValueT value = d_fixup_in[scatter_pairs[ITEM].key]; + value = reduction_op(value, scatter_pairs[ITEM].value); + + d_aggregates_out[scatter_pairs[ITEM].key] = value; + } + } + + // Finalize the last item + if (IS_LAST_TILE) + { + // Last thread will output final count and last item, if necessary + if (threadIdx.x == BLOCK_THREADS - 1) + { + // If the last tile is a whole tile, the inclusive prefix contains accumulated value reduction for the last + // segment + if (num_remaining == TILE_ITEMS) + { + // Update the value at the key location + OffsetT last_key = pairs[ITEMS_PER_THREAD - 1].key; + d_aggregates_out[last_key] = reduction_op(tile_aggregate.value, d_fixup_in[last_key]); + } + } + } + } + + /** + * @brief Scan tiles of items as part of a dynamic chained scan + * + * @param num_items + * Total number of input items + * + * @param num_tiles + * Total number of input tiles + * + * @param tile_state + * Global tile state descriptor + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeRange(OffsetT num_items, int num_tiles, ScanTileStateT& tile_state) + { + // Blocks are launched in increasing order, so just assign one tile per block + int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index + OffsetT tile_offset = tile_idx * TILE_ITEMS; // Global offset for the current tile + OffsetT num_remaining = num_items - tile_offset; // Remaining items (including this tile) + + if (num_remaining > TILE_ITEMS) + { + // Not the last tile (full) + ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state, Int2Type()); + } + else if (num_remaining > 0) + { + // The last tile (possibly partially-full) + ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state, Int2Type()); + } + } +}; + +} // namespace segment_fixup +} // namespace detail + +template +using AgentSegmentFixup CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::segment_fixup::AgentSegmentFixup< + AgentSegmentFixupPolicyT, + PairsInputIteratorT, + AggregatesOutputIteratorT, + EqualityOpT, + ReductionOpT, + OffsetT>; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_segmented_radix_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_segmented_radix_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e8921aaf045f0b6fa7cb0b465041aea53b2530e0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_segmented_radix_sort.cuh @@ -0,0 +1,297 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +CUB_NAMESPACE_BEGIN + +namespace detail +{ +namespace radix_sort +{ + +/** + * This agent will be implementing the `DeviceSegmentedRadixSort` when the + * https://github.com/NVIDIA/cub/issues/383 is addressed. + * + * @tparam IS_DESCENDING + * Whether or not the sorted-order is high-to-low + * + * @tparam SegmentedPolicyT + * Chained tuning policy + * + * @tparam KeyT + * Key type + * + * @tparam ValueT + * Value type + * + * @tparam OffsetT + * Signed integer type for global offsets + */ +template +struct AgentSegmentedRadixSort +{ + OffsetT num_items; + + static constexpr int ITEMS_PER_THREAD = SegmentedPolicyT::ITEMS_PER_THREAD; + static constexpr int BLOCK_THREADS = SegmentedPolicyT::BLOCK_THREADS; + static constexpr int RADIX_BITS = SegmentedPolicyT::RADIX_BITS; + static constexpr int RADIX_DIGITS = 1 << RADIX_BITS; + static constexpr int KEYS_ONLY = std::is_same::value; + + using traits = radix::traits_t; + using bit_ordered_type = typename traits::bit_ordered_type; + + // Huge segment handlers + using BlockUpsweepT = AgentRadixSortUpsweep; + using DigitScanT = BlockScan; + using BlockDownsweepT = AgentRadixSortDownsweep; + + /// Number of bin-starting offsets tracked per thread + static constexpr int BINS_TRACKED_PER_THREAD = BlockDownsweepT::BINS_TRACKED_PER_THREAD; + + // Small segment handlers + using BlockRadixSortT = + BlockRadixSort; + + using BlockKeyLoadT = BlockLoad; + + using BlockValueLoadT = BlockLoad; + + union _TempStorage + { + // Huge segment handlers + typename BlockUpsweepT::TempStorage upsweep; + typename BlockDownsweepT::TempStorage downsweep; + + struct UnboundBlockSort + { + OffsetT reverse_counts_in[RADIX_DIGITS]; + OffsetT reverse_counts_out[RADIX_DIGITS]; + typename DigitScanT::TempStorage scan; + } unbound_sort; + + // Small segment handlers + typename BlockKeyLoadT::TempStorage keys_load; + typename BlockValueLoadT::TempStorage values_load; + typename BlockRadixSortT::TempStorage sort; + }; + + using TempStorage = Uninitialized<_TempStorage>; + _TempStorage& temp_storage; + + DecomposerT decomposer; + + _CCCL_DEVICE _CCCL_FORCEINLINE + AgentSegmentedRadixSort(OffsetT num_items, TempStorage& temp_storage, DecomposerT decomposer = {}) + : num_items(num_items) + , temp_storage(temp_storage.Alias()) + , decomposer(decomposer) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE void ProcessSinglePass( + int begin_bit, int end_bit, const KeyT* d_keys_in, const ValueT* d_values_in, KeyT* d_keys_out, ValueT* d_values_out) + { + KeyT thread_keys[ITEMS_PER_THREAD]; + ValueT thread_values[ITEMS_PER_THREAD]; + + // For FP64 the difference is: + // Lowest() -> -1.79769e+308 = 00...00b -> TwiddleIn -> -0 = 10...00b + // LOWEST -> -nan = 11...11b -> TwiddleIn -> 0 = 00...00b + + bit_ordered_type default_key_bits = + IS_DESCENDING ? traits::min_raw_binary_key(decomposer) : traits::max_raw_binary_key(decomposer); + KeyT oob_default = reinterpret_cast(default_key_bits); + + if (!KEYS_ONLY) + { + BlockValueLoadT(temp_storage.values_load).Load(d_values_in, thread_values, num_items); + + __syncthreads(); + } + + { + BlockKeyLoadT(temp_storage.keys_load).Load(d_keys_in, thread_keys, num_items, oob_default); + + __syncthreads(); + } + + BlockRadixSortT(temp_storage.sort) + .SortBlockedToStriped( + thread_keys, thread_values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + + cub::StoreDirectStriped(threadIdx.x, d_keys_out, thread_keys, num_items); + + if (!KEYS_ONLY) + { + cub::StoreDirectStriped(threadIdx.x, d_values_out, thread_values, num_items); + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ProcessIterative( + int current_bit, + int pass_bits, + const KeyT* d_keys_in, + const ValueT* d_values_in, + KeyT* d_keys_out, + ValueT* d_values_out) + { + // Upsweep + BlockUpsweepT upsweep(temp_storage.upsweep, d_keys_in, current_bit, pass_bits, decomposer); + upsweep.ProcessRegion(OffsetT{}, num_items); + + __syncthreads(); + + // The count of each digit value in this pass (valid in the first RADIX_DIGITS threads) + OffsetT bin_count[BINS_TRACKED_PER_THREAD]; + upsweep.ExtractCounts(bin_count); + + __syncthreads(); + + if (IS_DESCENDING) + { +// Reverse bin counts +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + temp_storage.unbound_sort.reverse_counts_in[bin_idx] = bin_count[track]; + } + } + + __syncthreads(); + +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + bin_count[track] = temp_storage.unbound_sort.reverse_counts_in[RADIX_DIGITS - bin_idx - 1]; + } + } + } + + // Scan + // The global scatter base offset for each digit value in this pass + // (valid in the first RADIX_DIGITS threads) + OffsetT bin_offset[BINS_TRACKED_PER_THREAD]; + DigitScanT(temp_storage.unbound_sort.scan).ExclusiveSum(bin_count, bin_offset); + + if (IS_DESCENDING) + { +// Reverse bin offsets +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + temp_storage.unbound_sort.reverse_counts_out[threadIdx.x] = bin_offset[track]; + } + } + + __syncthreads(); + +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; + + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + bin_offset[track] = temp_storage.unbound_sort.reverse_counts_out[RADIX_DIGITS - bin_idx - 1]; + } + } + } + + __syncthreads(); + + // Downsweep + BlockDownsweepT downsweep( + temp_storage.downsweep, + bin_offset, + num_items, + d_keys_in, + d_keys_out, + d_values_in, + d_values_out, + current_bit, + pass_bits, + decomposer); + downsweep.ProcessRegion(OffsetT{}, num_items); + } +}; + +} // namespace radix_sort +} // namespace detail + +template +using AgentSegmentedRadixSort CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::radix_sort::AgentSegmentedRadixSort; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_select_if.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_select_if.cuh new file mode 100644 index 0000000000000000000000000000000000000000..37e7b838adf5a6c60f93054a2a4ba79b0837023d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_select_if.cuh @@ -0,0 +1,1051 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * cub::AgentSelectIf implements a stateful abstraction of CUDA thread blocks for participating in device-wide select. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * Parameterizable tuning policy type for AgentSelectIf + * + * @tparam _BLOCK_THREADS + * Threads per thread block + * + * @tparam _ITEMS_PER_THREAD + * Items per thread (per tile of input) + * + * @tparam _LOAD_ALGORITHM + * The BlockLoad algorithm to use + * + * @tparam _LOAD_MODIFIER + * Cache load modifier for reading input elements + * + * @tparam _SCAN_ALGORITHM + * The BlockScan algorithm to use + * + * @tparam DelayConstructorT + * Implementation detail, do not specify directly, requirements on the + * content of this type are subject to breaking change. + */ +template > +struct AgentSelectIfPolicy +{ + enum + { + /// Threads per thread block + BLOCK_THREADS = _BLOCK_THREADS, + + /// Items per thread (per tile of input) + ITEMS_PER_THREAD = _ITEMS_PER_THREAD, + }; + + /// The BlockLoad algorithm to use + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + + /// Cache load modifier for reading input elements + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + + /// The BlockScan algorithm to use + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; + + struct detail + { + using delay_constructor_t = DelayConstructorT; + }; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace select +{ + +template +struct partition_distinct_output_t +{ + using selected_iterator_t = SelectedOutputItT; + using rejected_iterator_t = RejectedOutputItT; + + selected_iterator_t selected_it; + rejected_iterator_t rejected_it; +}; + +/** + * @brief AgentSelectIf implements a stateful abstraction of CUDA thread blocks for participating in + * device-wide selection + * + * Performs functor-based selection if SelectOpT functor type != NullType + * Otherwise performs flag-based selection if FlagsInputIterator's value type != NullType + * Otherwise performs discontinuity selection (keep unique) + * + * @tparam AgentSelectIfPolicyT + * Parameterized AgentSelectIfPolicy tuning policy type + * + * @tparam InputIteratorT + * Random-access input iterator type for selection items + * + * @tparam FlagsInputIteratorT + * Random-access input iterator type for selections (NullType* if a selection functor or + * discontinuity flagging is to be used for selection) + * + * @tparam OutputIteratorWrapperT + * Either a random-access iterator or an instance of the `partition_distinct_output_t` template. + * + * @tparam SelectOpT + * Selection operator type (NullType if selections or discontinuity flagging is to be used for + * selection) + * + * @tparam EqualityOpT + * Equality operator type (NullType if selection functor or selections is to be used for + * selection) + * + * @tparam OffsetT + * Signed integer type for offsets within a partition + * + * @tparam StreamingContextT + * Type providing the context information for the current partition, with the following member functions: + * input_offset() -> base offset for the input (and flags) iterator + * is_first_partition() -> [Select::Unique-only] whether this is the first partition + * num_previously_selected() -> base offset for the output iterator for selected items + * num_previously_rejected() -> base offset for the output iterator for rejected items (partition only) + * num_total_items() -> total number of items across all partitions (partition only) + * update_num_selected(d_num_sel_out, num_selected) -> invoked by last CTA with number of selected + * + * @tparam KeepRejects + * Whether or not we push rejected items to the back of the output + */ +template +struct AgentSelectIf +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + using ScanTileStateT = ScanTileState; + + // Indicates whether the BlockLoad algorithm uses shared memory to load or exchange the data + static constexpr bool loads_via_smem = + !(AgentSelectIfPolicyT::LOAD_ALGORITHM == BLOCK_LOAD_DIRECT + || AgentSelectIfPolicyT::LOAD_ALGORITHM == BLOCK_LOAD_STRIPED + || AgentSelectIfPolicyT::LOAD_ALGORITHM == BLOCK_LOAD_VECTORIZE); + + // If this may be an *in-place* stream compaction, we need to ensure that all of a tile's items have been loaded + // before signalling a subsequent thread block's partial or inclusive state, hence we need a store release when + // updating a tile state. Similarly, we need to make sure that the load of previous tile states precede writing of + // the stream-compacted items and, hence, we need a load acquire when reading those tile states. + static constexpr MemoryOrder memory_order = + ((!KeepRejects) && MayAlias && (!loads_via_smem)) ? MemoryOrder::acquire_release : MemoryOrder::relaxed; + + // If we need to enforce memory order for in-place stream compaction, wrap the default decoupled look-back tile + // state in a helper class that enforces memory order on reads and writes + using MemoryOrderedTileStateT = tile_state_with_memory_order; + + // The input value type + using InputT = value_t; + + // The flag value type + using FlagT = value_t; + + // Constants + enum + { + USE_SELECT_OP, + USE_SELECT_FLAGS, + USE_DISCONTINUITY, + USE_STENCIL_WITH_OP + }; + + static constexpr ::cuda::std::int32_t BLOCK_THREADS = AgentSelectIfPolicyT::BLOCK_THREADS; + static constexpr ::cuda::std::int32_t ITEMS_PER_THREAD = AgentSelectIfPolicyT::ITEMS_PER_THREAD; + static constexpr ::cuda::std::int32_t TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD; + static constexpr bool TWO_PHASE_SCATTER = (ITEMS_PER_THREAD > 1); + + static constexpr bool has_select_op = (!::cuda::std::is_same::value); + static constexpr bool has_flags_it = (!::cuda::std::is_same::value); + static constexpr bool use_stencil_with_op = has_select_op && has_flags_it; + static constexpr auto SELECT_METHOD = + use_stencil_with_op ? USE_STENCIL_WITH_OP + : has_select_op ? USE_SELECT_OP + : has_flags_it ? USE_SELECT_FLAGS + : USE_DISCONTINUITY; + + // Cache-modified Input iterator wrapper type (for applying cache modifier) for items + // Wrap the native input pointer with CacheModifiedValuesInputIterator + // or directly use the supplied input iterator type + using WrappedInputIteratorT = + ::cuda::std::_If<::cuda::std::is_pointer::value, + CacheModifiedInputIterator, + InputIteratorT>; + + // Cache-modified Input iterator wrapper type (for applying cache modifier) for values + // Wrap the native input pointer with CacheModifiedValuesInputIterator + // or directly use the supplied input iterator type + using WrappedFlagsInputIteratorT = + ::cuda::std::_If<::cuda::std::is_pointer::value, + CacheModifiedInputIterator, + FlagsInputIteratorT>; + + // Parameterized BlockLoad type for input data + using BlockLoadT = BlockLoad; + + // Parameterized BlockLoad type for flags + using BlockLoadFlags = BlockLoad; + + // Parameterized BlockDiscontinuity type for items + using BlockDiscontinuityT = BlockDiscontinuity; + + // Parameterized BlockScan type + using BlockScanT = BlockScan; + + // Callback type for obtaining tile prefix during block scan + using DelayConstructorT = typename AgentSelectIfPolicyT::detail::delay_constructor_t; + using TilePrefixCallbackOpT = + TilePrefixCallbackOp, MemoryOrderedTileStateT, 0, DelayConstructorT>; + + // Item exchange type + using ItemExchangeT = InputT[TILE_ITEMS]; + + // Shared memory type for this thread block + union _TempStorage + { + struct ScanStorage + { + // Smem needed for tile scanning + typename BlockScanT::TempStorage scan; + + // Smem needed for cooperative prefix callback + typename TilePrefixCallbackOpT::TempStorage prefix; + + // Smem needed for discontinuity detection + typename BlockDiscontinuityT::TempStorage discontinuity; + } scan_storage; + + // Smem needed for loading items + typename BlockLoadT::TempStorage load_items; + + // Smem needed for loading values + typename BlockLoadFlags::TempStorage load_flags; + + // Smem needed for compacting items (allows non POD items in this union) + Uninitialized raw_exchange; + }; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + _TempStorage& temp_storage; ///< Reference to temp_storage + WrappedInputIteratorT d_in; ///< Input items + OutputIteratorWrapperT d_selected_out; ///< Output iterator for the selected items + WrappedFlagsInputIteratorT d_flags_in; ///< Input selection flags (if applicable) + InequalityWrapper inequality_op; ///< T inequality operator + SelectOpT select_op; ///< Selection operator + OffsetT num_items; ///< Total number of input items + + // Note: This is a const reference because we have seen double-digit percentage perf regressions otherwise + const StreamingContextT& streaming_context; ///< Context for the current partition + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + /** + * @param temp_storage + * Reference to temp_storage + * + * @param d_in + * Input data + * + * @param d_flags_in + * Input selection flags (if applicable) + * + * @param d_selected_out + * Output data + * + * @param select_op + * Selection operator + * + * @param equality_op + * Equality operator + * + * @param num_items + * Total number of input items + * + * @param streaming_context + * Context for the current partition + */ + _CCCL_DEVICE _CCCL_FORCEINLINE AgentSelectIf( + TempStorage& temp_storage, + InputIteratorT d_in, + FlagsInputIteratorT d_flags_in, + OutputIteratorWrapperT d_selected_out, + SelectOpT select_op, + EqualityOpT equality_op, + OffsetT num_items, + const StreamingContextT& streaming_context) + : temp_storage(temp_storage.Alias()) + , d_in(d_in) + , d_selected_out(d_selected_out) + , d_flags_in(d_flags_in) + , inequality_op(equality_op) + , select_op(select_op) + , num_items(num_items) + , streaming_context(streaming_context) + {} + + //--------------------------------------------------------------------- + // Utility methods for initializing the selections + //--------------------------------------------------------------------- + + /** + * Initialize selections (specialized for selection operator) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InitializeSelections( + OffsetT /*tile_offset*/, + OffsetT num_tile_items, + InputT (&items)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + Int2Type /*select_method*/) + { +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + // Out-of-bounds items are selection_flags + selection_flags[ITEM] = 1; + + if (!IS_LAST_TILE || (static_cast(threadIdx.x * ITEMS_PER_THREAD + ITEM) < num_tile_items)) + { + selection_flags[ITEM] = static_cast(select_op(items[ITEM])); + } + } + } + + /** + * Initialize selections (specialized for selection_op applied to d_flags_in) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InitializeSelections( + OffsetT tile_offset, + OffsetT num_tile_items, + InputT (& /*items*/)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + Int2Type /*select_method*/) + { + __syncthreads(); + + FlagT flags[ITEMS_PER_THREAD]; + if (IS_LAST_TILE) + { + // Initialize the out-of-bounds flags +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + selection_flags[ITEM] = true; + } + // Guarded loads + BlockLoadFlags(temp_storage.load_flags) + .Load((d_flags_in + streaming_context.input_offset()) + tile_offset, flags, num_tile_items); + } + else + { + BlockLoadFlags(temp_storage.load_flags).Load((d_flags_in + streaming_context.input_offset()) + tile_offset, flags); + } + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + // Set selection_flags for out-of-bounds items + if ((!IS_LAST_TILE) || (static_cast(threadIdx.x * ITEMS_PER_THREAD + ITEM) < num_tile_items)) + { + selection_flags[ITEM] = static_cast(select_op(flags[ITEM])); + } + } + } + + /** + * Initialize selections (specialized for valid flags) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InitializeSelections( + OffsetT tile_offset, + OffsetT num_tile_items, + InputT (& /*items*/)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + Int2Type /*select_method*/) + { + __syncthreads(); + + FlagT flags[ITEMS_PER_THREAD]; + + if (IS_LAST_TILE) + { + // Out-of-bounds items are selection_flags + BlockLoadFlags(temp_storage.load_flags) + .Load((d_flags_in + streaming_context.input_offset()) + tile_offset, flags, num_tile_items, 1); + } + else + { + BlockLoadFlags(temp_storage.load_flags).Load((d_flags_in + streaming_context.input_offset()) + tile_offset, flags); + } + +// Convert flag type to selection_flags type +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + selection_flags[ITEM] = static_cast(flags[ITEM]); + } + } + + /** + * Initialize selections (specialized for discontinuity detection) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InitializeSelections( + OffsetT tile_offset, + OffsetT num_tile_items, + InputT (&items)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + Int2Type /*select_method*/) + { + if (IS_FIRST_TILE && streaming_context.is_first_partition()) + { + __syncthreads(); + + // Set head selection_flags. First tile sets the first flag for the first item + BlockDiscontinuityT(temp_storage.scan_storage.discontinuity).FlagHeads(selection_flags, items, inequality_op); + } + else + { + InputT tile_predecessor; + if (threadIdx.x == 0) + { + tile_predecessor = d_in[tile_offset + streaming_context.input_offset() - 1]; + } + + __syncthreads(); + + BlockDiscontinuityT(temp_storage.scan_storage.discontinuity) + .FlagHeads(selection_flags, items, inequality_op, tile_predecessor); + } + +// Set selection flags for out-of-bounds items +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + // Set selection_flags for out-of-bounds items + if ((IS_LAST_TILE) && (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items)) + { + selection_flags[ITEM] = 1; + } + } + } + + //--------------------------------------------------------------------- + // Scatter utility methods + //--------------------------------------------------------------------- + + /** + * Scatter flagged items to output offsets (specialized for direct scattering). + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterSelectedDirect( + InputT (&items)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + OffsetT (&selection_indices)[ITEMS_PER_THREAD], + OffsetT num_selections) + { +// Scatter flagged items +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + if (selection_flags[ITEM]) + { + if ((!IS_LAST_TILE) || selection_indices[ITEM] < num_selections) + { + *((d_selected_out + streaming_context.num_previously_selected()) + selection_indices[ITEM]) = items[ITEM]; + } + } + } + } + + /** + * @brief Scatter flagged items to output offsets (specialized for two-phase scattering) + * + * @param num_tile_items + * Number of valid items in this tile + * + * @param num_tile_selections + * Number of selections in this tile + * + * @param num_selections_prefix + * Total number of selections prior to this tile + * + * @param num_rejected_prefix + * Total number of rejections prior to this tile + * + * @param is_keep_rejects + * Marker type indicating whether to keep rejected items in the second partition + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterSelectedTwoPhase( + InputT (&items)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + OffsetT (&selection_indices)[ITEMS_PER_THREAD], + int num_tile_selections, + OffsetT num_selections_prefix) + { + __syncthreads(); + +// Compact and scatter items +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int local_scatter_offset = selection_indices[ITEM] - num_selections_prefix; + if (selection_flags[ITEM]) + { + temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM]; + } + } + + __syncthreads(); + + for (int item = threadIdx.x; item < num_tile_selections; item += BLOCK_THREADS) + { + *((d_selected_out + streaming_context.num_previously_selected()) + (num_selections_prefix + item)) = + temp_storage.raw_exchange.Alias()[item]; + } + } + + /** + * @brief Scatter flagged items. Specialized for selection algorithm that simply discards rejected items + * + * @param num_tile_items + * Number of valid items in this tile + * + * @param num_tile_selections + * Number of selections in this tile + * + * @param num_selections_prefix + * Total number of selections prior to this tile + * + * @param num_rejected_prefix + * Total number of rejections prior to this tile + * + * @param num_selections + * Total number of selections including this tile + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Scatter( + InputT (&items)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + OffsetT (&selection_indices)[ITEMS_PER_THREAD], + int num_tile_items, + int num_tile_selections, + OffsetT num_selections_prefix, + OffsetT num_rejected_prefix, + OffsetT num_selections, + Int2Type /*is_keep_rejects*/) + { + // Do a two-phase scatter if two-phase is enabled and the average number of selection_flags items per thread is + // greater than one + if (TWO_PHASE_SCATTER && (num_tile_selections > BLOCK_THREADS)) + { + ScatterSelectedTwoPhase( + items, selection_flags, selection_indices, num_tile_selections, num_selections_prefix); + } + else + { + ScatterSelectedDirect(items, selection_flags, selection_indices, num_selections); + } + } + + /** + * @brief Scatter flagged items. Specialized for partitioning algorithm that writes rejected items to a second + * partition. + * + * @param num_tile_items + * Number of valid items in this tile + * + * @param num_tile_selections + * Number of selections in this tile + * + * @param num_selections_prefix + * Total number of selections prior to this tile + * + * @param num_rejected_prefix + * Total number of rejections prior to this tile + * + * @param is_keep_rejects + * Marker type indicating whether to keep rejected items in the second partition + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Scatter( + InputT (&items)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + OffsetT (&selection_indices)[ITEMS_PER_THREAD], + int num_tile_items, + int num_tile_selections, + OffsetT num_selections_prefix, + OffsetT num_rejected_prefix, + OffsetT num_selections, + Int2Type /*is_keep_rejects*/) + { + __syncthreads(); + + int tile_num_rejections = num_tile_items - num_tile_selections; + +// Scatter items to shared memory (rejections first) +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int item_idx = (threadIdx.x * ITEMS_PER_THREAD) + ITEM; + int local_selection_idx = selection_indices[ITEM] - num_selections_prefix; + int local_rejection_idx = item_idx - local_selection_idx; + int local_scatter_offset = + (selection_flags[ITEM]) ? tile_num_rejections + local_selection_idx : local_rejection_idx; + + temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM]; + } + + // Ensure all threads finished scattering to shared memory + __syncthreads(); + + // Gather items from shared memory and scatter to global + ScatterPartitionsToGlobal( + num_tile_items, tile_num_rejections, num_selections_prefix, num_rejected_prefix, d_selected_out); + } + + /** + * @brief Second phase of scattering partitioned items to global memory. Specialized for partitioning to two + * distinct partitions. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterPartitionsToGlobal( + int num_tile_items, + int tile_num_rejections, + OffsetT num_selections_prefix, + OffsetT num_rejected_prefix, + partition_distinct_output_t partitioned_out_wrapper) + { + auto selected_out_it = partitioned_out_wrapper.selected_it + streaming_context.num_previously_selected(); + auto rejected_out_it = partitioned_out_wrapper.rejected_it + streaming_context.num_previously_rejected(); + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int item_idx = (ITEM * BLOCK_THREADS) + threadIdx.x; + int rejection_idx = item_idx; + int selection_idx = item_idx - tile_num_rejections; + OffsetT scatter_offset = + (item_idx < tile_num_rejections) ? num_rejected_prefix + rejection_idx : num_selections_prefix + selection_idx; + + InputT item = temp_storage.raw_exchange.Alias()[item_idx]; + + if (!IS_LAST_TILE || (item_idx < num_tile_items)) + { + if (item_idx >= tile_num_rejections) + { + selected_out_it[scatter_offset] = item; + } + else + { + rejected_out_it[scatter_offset] = item; + } + } + } + } + + /** + * @brief Second phase of scattering partitioned items to global memory. Specialized for partitioning to a single + * iterator, where selected items are written in order from the beginning of the iterator and rejected items are + * writtem from the iterators end backwards. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterPartitionsToGlobal( + int num_tile_items, + int tile_num_rejections, + OffsetT num_selections_prefix, + OffsetT num_rejected_prefix, + PartitionedOutputItT partitioned_out_it) + { + using total_offset_t = typename StreamingContextT::total_num_items_t; + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int item_idx = (ITEM * BLOCK_THREADS) + threadIdx.x; + int rejection_idx = item_idx; + int selection_idx = item_idx - tile_num_rejections; + total_offset_t scatter_offset = + (item_idx < tile_num_rejections) + ? (streaming_context.num_total_items(num_items) - streaming_context.num_previously_rejected() + - static_cast(num_rejected_prefix) - static_cast(rejection_idx) + - total_offset_t{1}) + : (streaming_context.num_previously_selected() + static_cast(num_selections_prefix) + + static_cast(selection_idx)); + + InputT item = temp_storage.raw_exchange.Alias()[item_idx]; + if (!IS_LAST_TILE || (item_idx < num_tile_items)) + { + partitioned_out_it[scatter_offset] = item; + } + } + } + + //--------------------------------------------------------------------- + // Cooperatively scan a device-wide sequence of tiles with other CTAs + //--------------------------------------------------------------------- + + /** + * @brief Process first tile of input (dynamic chained scan). + * + * @param num_tile_items + * Number of input items comprising this tile + * + * @param tile_offset + * Tile offset + * + * @param tile_state_wrapper + * A global tile state descriptor wrapped in a MemoryOrderedTileStateT that ensures consistent memory order across + * all tile status updates and loads + * + * @return The running count of selections (including this tile) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT + ConsumeFirstTile(int num_tile_items, OffsetT tile_offset, MemoryOrderedTileStateT& tile_state_wrapper) + { + InputT items[ITEMS_PER_THREAD]; + OffsetT selection_flags[ITEMS_PER_THREAD]; + OffsetT selection_indices[ITEMS_PER_THREAD]; + + // Load items + if (IS_LAST_TILE) + { + BlockLoadT(temp_storage.load_items) + .Load((d_in + streaming_context.input_offset()) + tile_offset, items, num_tile_items); + } + else + { + BlockLoadT(temp_storage.load_items).Load((d_in + streaming_context.input_offset()) + tile_offset, items); + } + + // Initialize selection_flags + InitializeSelections( + tile_offset, num_tile_items, items, selection_flags, Int2Type()); + + // Ensure temporary storage used during block load can be reused + // Also, in case of in-place stream compaction, this is needed to order the loads of + // *all threads of this thread block* before the st.release of the thread writing this thread block's tile state + __syncthreads(); + + // Exclusive scan of selection_flags + OffsetT num_tile_selections; + BlockScanT(temp_storage.scan_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_tile_selections); + + if (threadIdx.x == 0) + { + // Update tile status if this is not the last tile + if (!IS_LAST_TILE) + { + tile_state_wrapper.SetInclusive(0, num_tile_selections); + } + } + + // Discount any out-of-bounds selections + if (IS_LAST_TILE) + { + num_tile_selections -= (TILE_ITEMS - num_tile_items); + } + + // Scatter flagged items + Scatter( + items, + selection_flags, + selection_indices, + num_tile_items, + num_tile_selections, + 0, + 0, + num_tile_selections, + cub::Int2Type{}); + + return num_tile_selections; + } + + /** + * @brief Process subsequent tile of input (dynamic chained scan). + * + * @param num_tile_items + * Number of input items comprising this tile + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param tile_state_wrapper + * A global tile state descriptor wrapped in a MemoryOrderedTileStateT that ensures consistent memory order across + * all tile status updates and loads + * + * @return The running count of selections (including this tile) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT ConsumeSubsequentTile( + int num_tile_items, int tile_idx, OffsetT tile_offset, MemoryOrderedTileStateT& tile_state_wrapper) + { + InputT items[ITEMS_PER_THREAD]; + OffsetT selection_flags[ITEMS_PER_THREAD]; + OffsetT selection_indices[ITEMS_PER_THREAD]; + + // Load items + if (IS_LAST_TILE) + { + BlockLoadT(temp_storage.load_items) + .Load((d_in + streaming_context.input_offset()) + tile_offset, items, num_tile_items); + } + else + { + BlockLoadT(temp_storage.load_items).Load((d_in + streaming_context.input_offset()) + tile_offset, items); + } + + // Initialize selection_flags + InitializeSelections( + tile_offset, num_tile_items, items, selection_flags, Int2Type()); + + // Ensure temporary storage used during block load can be reused + // Also, in case of in-place stream compaction, this is needed to order the loads of + // *all threads of this thread block* before the st.release of the thread writing this thread block's tile state + __syncthreads(); + + // Exclusive scan of values and selection_flags + TilePrefixCallbackOpT prefix_op( + tile_state_wrapper, temp_storage.scan_storage.prefix, ::cuda::std::plus<>{}, tile_idx); + BlockScanT(temp_storage.scan_storage.scan).ExclusiveSum(selection_flags, selection_indices, prefix_op); + + OffsetT num_tile_selections = prefix_op.GetBlockAggregate(); + OffsetT num_selections = prefix_op.GetInclusivePrefix(); + OffsetT num_selections_prefix = prefix_op.GetExclusivePrefix(); + OffsetT num_rejected_prefix = tile_offset - num_selections_prefix; + + // Discount any out-of-bounds selections + if (IS_LAST_TILE) + { + int num_discount = TILE_ITEMS - num_tile_items; + num_selections -= num_discount; + num_tile_selections -= num_discount; + } + + // note (only applies to in-place stream compaction): We can avoid having to introduce explicit memory order between + // the look-back (i.e., loading previous tiles' states) and scattering items (which means, potentially overwriting + // previous tiles' input items, in case of in-place compaction), because this is implicitly ensured through + // execution dependency: The scatter stage requires the offset from the prefix-sum and it can only know the + // prefix-sum after having read that from the decoupled look-back. Scatter flagged items + Scatter( + items, + selection_flags, + selection_indices, + num_tile_items, + num_tile_selections, + num_selections_prefix, + num_rejected_prefix, + num_selections, + cub::Int2Type{}); + + return num_selections; + } + + /** + * @brief Process a tile of input + * + * @param num_tile_items + * Number of input items comprising this tile + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param tile_state_wrapper + * A global tile state descriptor wrapped in a MemoryOrderedTileStateT that ensures consistent memory order across + * all tile status updates and loads + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT + ConsumeTile(int num_tile_items, int tile_idx, OffsetT tile_offset, MemoryOrderedTileStateT& tile_state_wrapper) + { + OffsetT num_selections; + if (tile_idx == 0) + { + num_selections = ConsumeFirstTile(num_tile_items, tile_offset, tile_state_wrapper); + } + else + { + num_selections = ConsumeSubsequentTile(num_tile_items, tile_idx, tile_offset, tile_state_wrapper); + } + + return num_selections; + } + + /** + * @brief Scan tiles of items as part of a dynamic chained scan + * + * @param num_tiles + * Total number of input tiles + * + * @param tile_state + * Global tile state descriptor + * + * @param d_num_selected_out + * Output total number selection_flags + * + * @tparam NumSelectedIteratorT + * Output iterator type for recording number of items selection_flags + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeRange(int num_tiles, ScanTileStateT& tile_state, NumSelectedIteratorT d_num_selected_out) + { + // Ensure consistent memory order across all tile status updates and loads + auto tile_state_wrapper = MemoryOrderedTileStateT{tile_state}; + + // Blocks are launched in increasing order, so just assign one tile per block + // TODO (elstehle): replacing this term with just `blockIdx.x` degrades perf for partition. Once we get to re-tune + // the algorithm, we want to replace this term with `blockIdx.x` + int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index + OffsetT tile_offset = static_cast(tile_idx) * static_cast(TILE_ITEMS); + + if (tile_idx < num_tiles - 1) + { + // Not the last tile (full) + ConsumeTile(TILE_ITEMS, tile_idx, tile_offset, tile_state_wrapper); + } + else + { + // The last tile (possibly partially-full) + OffsetT num_remaining = num_items - tile_offset; + OffsetT num_selections = ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state_wrapper); + + if (threadIdx.x == 0) + { + // Update the number of selected items with this partition's selections + streaming_context.update_num_selected(d_num_selected_out, num_selections); + } + } + } +}; + +} // namespace select +} // namespace detail + +template +using partition_distinct_output_t CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the " + "public interface will be removed.") = + detail::select::partition_distinct_output_t; + +template +using AgentSelectIf CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::select::AgentSelectIf< + AgentSelectIfPolicyT, + InputIteratorT, + FlagsInputIteratorT, + OutputIteratorWrapperT, + SelectOpT, + EqualityOpT, + OffsetT, + StreamingContextT, + KeepRejects, + MayAlias>; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_spmv_orig.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_spmv_orig.cuh new file mode 100644 index 0000000000000000000000000000000000000000..90a5e3aa6c9cad75393cff975d8e9b4ce19cf5cc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_spmv_orig.cuh @@ -0,0 +1,764 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * cub::AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy + ******************************************************************************/ + +/** + * @param Parameterizable tuning policy type for AgentSpmv + * + * @tparam _BLOCK_THREADS + * Threads per thread block + * + * @tparam _ITEMS_PER_THREAD + * Items per thread (per tile of input) + * + * @tparam _ROW_OFFSETS_SEARCH_LOAD_MODIFIER + * Cache load modifier for reading CSR row-offsets during search + * + * @tparam _ROW_OFFSETS_LOAD_MODIFIER + * Cache load modifier for reading CSR row-offsets + * + * @tparam _COLUMN_INDICES_LOAD_MODIFIER + * Cache load modifier for reading CSR column-indices + * + * @tparam _VALUES_LOAD_MODIFIER + * Cache load modifier for reading CSR values + * + * @tparam _VECTOR_VALUES_LOAD_MODIFIER + * Cache load modifier for reading vector values + * + * @tparam _DIRECT_LOAD_NONZEROS + * Whether to load nonzeros directly from global during sequential merging (vs. pre-staged through + * shared memory) + * + * @tparam _SCAN_ALGORITHM + * The BlockScan algorithm to use + */ +template +struct CCCL_DEPRECATED_BECAUSE("Use the cuSPARSE library instead") AgentSpmvPolicy +{ + enum + { + /// Threads per thread block + BLOCK_THREADS = _BLOCK_THREADS, + + /// Items per thread (per tile of input) + ITEMS_PER_THREAD = _ITEMS_PER_THREAD, + + /// Whether to load nonzeros directly from global during sequential merging (pre-staged through + /// shared memory) + DIRECT_LOAD_NONZEROS = _DIRECT_LOAD_NONZEROS, + }; + + /// Cache load modifier for reading CSR row-offsets + static constexpr CacheLoadModifier ROW_OFFSETS_SEARCH_LOAD_MODIFIER = _ROW_OFFSETS_SEARCH_LOAD_MODIFIER; + + /// Cache load modifier for reading CSR row-offsets + static constexpr CacheLoadModifier ROW_OFFSETS_LOAD_MODIFIER = _ROW_OFFSETS_LOAD_MODIFIER; + + /// Cache load modifier for reading CSR column-indices + static constexpr CacheLoadModifier COLUMN_INDICES_LOAD_MODIFIER = _COLUMN_INDICES_LOAD_MODIFIER; + + /// Cache load modifier for reading CSR values + static constexpr CacheLoadModifier VALUES_LOAD_MODIFIER = _VALUES_LOAD_MODIFIER; + + /// Cache load modifier for reading vector values + static constexpr CacheLoadModifier VECTOR_VALUES_LOAD_MODIFIER = _VECTOR_VALUES_LOAD_MODIFIER; + + /// The BlockScan algorithm to use + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +/** + * @tparam ValueT + * Matrix and vector value type + * + * @tparam OffsetT + * Signed integer type for sequence offsets + */ +template +struct +// with NVHPC, we get a deprecation warning in the implementation of cudaLaunchKernelEx, which we cannot suppress :/ +#if !_CCCL_COMPILER(NVHPC) + CCCL_DEPRECATED_BECAUSE("Use the cuSPARSE library instead") +#endif + SpmvParams +{ + /// Pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix + /// A. + const ValueT* d_values; + + /// Pointer to the array of \p m offsets demarcating the end of every row in \p d_column_indices + /// and \p d_values + const OffsetT* d_row_end_offsets; + + /// Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements + /// of matrix A. (Indices are zero-valued.) + const OffsetT* d_column_indices; + + /// Pointer to the array of \p num_cols values corresponding to the dense input vector x + const ValueT* d_vector_x; + + /// Pointer to the array of \p num_rows values corresponding to the dense output vector y + ValueT* d_vector_y; + + /// Number of rows of matrix A. + int num_rows; + + /// Number of columns of matrix A. + int num_cols; + + /// Number of nonzero elements of matrix A. + int num_nonzeros; + + /// Alpha multiplicand + ValueT alpha; + + /// Beta addend-multiplicand + ValueT beta; +}; + +/** + * @brief AgentSpmv implements a stateful abstraction of CUDA thread blocks for participating in device-wide SpMV. + * + * @tparam AgentSpmvPolicyT + * Parameterized AgentSpmvPolicy tuning policy type + * + * @tparam ValueT + * Matrix and vector value type + * + * @tparam OffsetT + * Signed integer type for sequence offsets + * + * @tparam HAS_ALPHA + * Whether the input parameter \p alpha is 1 + * + * @tparam HAS_BETA + * Whether the input parameter \p beta is 0 + * + * @tparam LEGACY_PTX_ARCH + * PTX compute capability (unused) + */ +template +struct CCCL_DEPRECATED_BECAUSE("Use the cuSPARSE library instead") AgentSpmv +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + /// Constants + enum + { + BLOCK_THREADS = AgentSpmvPolicyT::BLOCK_THREADS, + ITEMS_PER_THREAD = AgentSpmvPolicyT::ITEMS_PER_THREAD, + TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, + }; + + /// 2D merge path coordinate type + using CoordinateT = typename CubVector::Type; + + /// Input iterator wrapper types (for applying cache modifiers) + + using RowOffsetsSearchIteratorT = + CacheModifiedInputIterator; + + using RowOffsetsIteratorT = CacheModifiedInputIterator; + + using ColumnIndicesIteratorT = + CacheModifiedInputIterator; + + using ValueIteratorT = CacheModifiedInputIterator; + + using VectorValueIteratorT = + CacheModifiedInputIterator; + + // Tuple type for scanning (pairs accumulated segment-value with segment-index) + using KeyValuePairT = KeyValuePair; + + // Reduce-value-by-segment scan operator + using ReduceBySegmentOpT = ReduceByKeyOp<::cuda::std::plus<>>; + + // BlockReduce specialization + using BlockReduceT = BlockReduce; + + // BlockScan specialization + using BlockScanT = BlockScan; + + // BlockScan specialization + using BlockPrefixSumT = BlockScan; + + // BlockExchange specialization + using BlockExchangeT = BlockExchange; + + /// Merge item type (either a non-zero value or a row-end offset) + union MergeItem + { + // Value type to pair with index type OffsetT + // (NullType if loading values directly during merge) + using MergeValueT = ::cuda::std::_If; + + OffsetT row_end_offset; + MergeValueT nonzero; + }; + + /// Shared memory type required by this thread block + struct _TempStorage + { + CoordinateT tile_coords[2]; + + union Aliasable + { + // Smem needed for tile of merge items + MergeItem merge_items[ITEMS_PER_THREAD + TILE_ITEMS + 1]; + + // Smem needed for block exchange + typename BlockExchangeT::TempStorage exchange; + + // Smem needed for block-wide reduction + typename BlockReduceT::TempStorage reduce; + + // Smem needed for tile scanning + typename BlockScanT::TempStorage scan; + + // Smem needed for tile prefix sum + typename BlockPrefixSumT::TempStorage prefix_sum; + + } aliasable; + }; + + /// Temporary storage type (unionable) + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + /// Reference to temp_storage + _TempStorage& temp_storage; + + _CCCL_SUPPRESS_DEPRECATED_PUSH + SpmvParams& spmv_params; + _CCCL_SUPPRESS_DEPRECATED_POP + + /// Wrapped pointer to the array of \p num_nonzeros values of the corresponding nonzero elements + /// of matrix A. + ValueIteratorT wd_values; + + /// Wrapped Pointer to the array of \p m offsets demarcating the end of every row in \p + /// d_column_indices and \p d_values + RowOffsetsIteratorT wd_row_end_offsets; + + /// Wrapped Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero + /// elements of matrix A. (Indices are zero-valued.) + ColumnIndicesIteratorT wd_column_indices; + + /// Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector + /// x + VectorValueIteratorT wd_vector_x; + + /// Wrapped Pointer to the array of \p num_cols values corresponding to the dense input vector + /// x + VectorValueIteratorT wd_vector_y; + + //--------------------------------------------------------------------- + // Interface + //--------------------------------------------------------------------- + + /** + * @param temp_storage + * Reference to temp_storage + * + * @param spmv_params + * SpMV input parameter bundle + */ + _CCCL_SUPPRESS_DEPRECATED_PUSH + _CCCL_DEVICE _CCCL_FORCEINLINE AgentSpmv(TempStorage& temp_storage, SpmvParams& spmv_params) + : temp_storage(temp_storage.Alias()) + , spmv_params(spmv_params) + , wd_values(spmv_params.d_values) + , wd_row_end_offsets(spmv_params.d_row_end_offsets) + , wd_column_indices(spmv_params.d_column_indices) + , wd_vector_x(spmv_params.d_vector_x) + , wd_vector_y(spmv_params.d_vector_y) + {} + _CCCL_SUPPRESS_DEPRECATED_POP + + /** + * @brief Consume a merge tile, specialized for direct-load of nonzeros + * + * @param is_direct_load + * Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch + */ + _CCCL_DEVICE _CCCL_FORCEINLINE KeyValuePairT + ConsumeTile(int tile_idx, CoordinateT tile_start_coord, CoordinateT tile_end_coord, Int2Type is_direct_load) + { + int tile_num_rows = tile_end_coord.x - tile_start_coord.x; + int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; + OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset; + + // Gather the row end-offsets for the merge tile into shared memory + for (int item = threadIdx.x; item < tile_num_rows + ITEMS_PER_THREAD; item += BLOCK_THREADS) + { + const OffsetT offset = (::cuda::std::min)( + static_cast(tile_start_coord.x + item), static_cast(spmv_params.num_rows - 1)); + s_tile_row_end_offsets[item] = wd_row_end_offsets[offset]; + } + + __syncthreads(); + + // Search for the thread's starting coordinate within the merge tile + _CCCL_SUPPRESS_DEPRECATED_PUSH + CountingInputIterator tile_nonzero_indices(tile_start_coord.y); + _CCCL_SUPPRESS_DEPRECATED_POP + CoordinateT thread_start_coord; + + MergePathSearch( + OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal + s_tile_row_end_offsets, // List A + tile_nonzero_indices, // List B + tile_num_rows, + tile_num_nonzeros, + thread_start_coord); + + __syncthreads(); // Perf-sync + + // Compute the thread's merge path segment + CoordinateT thread_current_coord = thread_start_coord; + KeyValuePairT scan_segment[ITEMS_PER_THREAD]; + + ValueT running_total = 0.0; + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + OffsetT nonzero_idx = CUB_MIN(tile_nonzero_indices[thread_current_coord.y], spmv_params.num_nonzeros - 1); + OffsetT column_idx = wd_column_indices[nonzero_idx]; + ValueT value = wd_values[nonzero_idx]; + + ValueT vector_value = wd_vector_x[column_idx]; + + ValueT nonzero = value * vector_value; + + OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; + + if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset) + { + // Move down (accumulate) + running_total += nonzero; + scan_segment[ITEM].value = running_total; + scan_segment[ITEM].key = tile_num_rows; + ++thread_current_coord.y; + } + else + { + // Move right (reset) + scan_segment[ITEM].value = running_total; + scan_segment[ITEM].key = thread_current_coord.x; + running_total = 0.0; + ++thread_current_coord.x; + } + } + + __syncthreads(); + + // Block-wide reduce-value-by-segment + KeyValuePairT tile_carry; + ReduceBySegmentOpT scan_op; + KeyValuePairT scan_item; + + scan_item.value = running_total; + scan_item.key = thread_current_coord.x; + + BlockScanT(temp_storage.aliasable.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry); + + if (tile_num_rows > 0) + { + if (threadIdx.x == 0) + { + scan_item.key = -1; + } + +// Direct scatter +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + if (scan_segment[ITEM].key < tile_num_rows) + { + if (scan_item.key == scan_segment[ITEM].key) + { + scan_segment[ITEM].value = scan_item.value + scan_segment[ITEM].value; + } + + if (HAS_ALPHA) + { + scan_segment[ITEM].value *= spmv_params.alpha; + } + + if (HAS_BETA) + { + // Update the output vector element + ValueT addend = spmv_params.beta * wd_vector_y[tile_start_coord.x + scan_segment[ITEM].key]; + scan_segment[ITEM].value += addend; + } + + // Set the output vector element + spmv_params.d_vector_y[tile_start_coord.x + scan_segment[ITEM].key] = scan_segment[ITEM].value; + } + } + } + + // Return the tile's running carry-out + return tile_carry; + } + + /** + * @brief Consume a merge tile, specialized for indirect load of nonzeros + * + * @param is_direct_load + * Marker type indicating whether to load nonzeros directly during path-discovery or beforehand in batch + */ + _CCCL_DEVICE _CCCL_FORCEINLINE KeyValuePairT + ConsumeTile(int tile_idx, CoordinateT tile_start_coord, CoordinateT tile_end_coord, Int2Type is_direct_load) + { + int tile_num_rows = tile_end_coord.x - tile_start_coord.x; + int tile_num_nonzeros = tile_end_coord.y - tile_start_coord.y; + +#if (CUB_PTX_ARCH >= 520) + + OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset; + ValueT* s_tile_nonzeros = &temp_storage.aliasable.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero; + +// Gather the nonzeros for the merge tile into shared memory +# pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); + + ValueIteratorT a = wd_values + tile_start_coord.y + nonzero_idx; + ColumnIndicesIteratorT ci = wd_column_indices + tile_start_coord.y + nonzero_idx; + ValueT* s = s_tile_nonzeros + nonzero_idx; + + if (nonzero_idx < tile_num_nonzeros) + { + OffsetT column_idx = *ci; + ValueT value = *a; + + ValueT vector_value = wd_vector_x[column_idx]; + + ValueT nonzero = value * vector_value; + + *s = nonzero; + } + } + +#else + + OffsetT* s_tile_row_end_offsets = &temp_storage.aliasable.merge_items[0].row_end_offset; + ValueT* s_tile_nonzeros = &temp_storage.aliasable.merge_items[tile_num_rows + ITEMS_PER_THREAD].nonzero; + + // Gather the nonzeros for the merge tile into shared memory + if (tile_num_nonzeros > 0) + { +# pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int nonzero_idx = threadIdx.x + (ITEM * BLOCK_THREADS); + nonzero_idx = CUB_MIN(nonzero_idx, tile_num_nonzeros - 1); + + OffsetT column_idx = wd_column_indices[tile_start_coord.y + nonzero_idx]; + ValueT value = wd_values[tile_start_coord.y + nonzero_idx]; + + ValueT vector_value = wd_vector_x[column_idx]; + + ValueT nonzero = value * vector_value; + + s_tile_nonzeros[nonzero_idx] = nonzero; + } + } + +#endif + +// Gather the row end-offsets for the merge tile into shared memory +#pragma unroll 1 + for (int item = threadIdx.x; item < tile_num_rows + ITEMS_PER_THREAD; item += BLOCK_THREADS) + { + const OffsetT offset = (::cuda::std::min)( + static_cast(tile_start_coord.x + item), static_cast(spmv_params.num_rows - 1)); + s_tile_row_end_offsets[item] = wd_row_end_offsets[offset]; + } + + __syncthreads(); + + // Search for the thread's starting coordinate within the merge tile + _CCCL_SUPPRESS_DEPRECATED_PUSH + CountingInputIterator tile_nonzero_indices(tile_start_coord.y); + _CCCL_SUPPRESS_DEPRECATED_POP + CoordinateT thread_start_coord; + + MergePathSearch( + OffsetT(threadIdx.x * ITEMS_PER_THREAD), // Diagonal + s_tile_row_end_offsets, // List A + tile_nonzero_indices, // List B + tile_num_rows, + tile_num_nonzeros, + thread_start_coord); + + __syncthreads(); // Perf-sync + + // Compute the thread's merge path segment + CoordinateT thread_current_coord = thread_start_coord; + KeyValuePairT scan_segment[ITEMS_PER_THREAD]; + ValueT running_total = 0.0; + + OffsetT row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; + ValueT nonzero = s_tile_nonzeros[thread_current_coord.y]; + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + if (tile_nonzero_indices[thread_current_coord.y] < row_end_offset) + { + // Move down (accumulate) + scan_segment[ITEM].value = nonzero; + running_total += nonzero; + ++thread_current_coord.y; + nonzero = s_tile_nonzeros[thread_current_coord.y]; + } + else + { + // Move right (reset) + scan_segment[ITEM].value = 0.0; + running_total = 0.0; + ++thread_current_coord.x; + row_end_offset = s_tile_row_end_offsets[thread_current_coord.x]; + } + + scan_segment[ITEM].key = thread_current_coord.x; + } + + __syncthreads(); + + // Block-wide reduce-value-by-segment + KeyValuePairT tile_carry; + ReduceBySegmentOpT scan_op; + KeyValuePairT scan_item; + + scan_item.value = running_total; + scan_item.key = thread_current_coord.x; + + BlockScanT(temp_storage.aliasable.scan).ExclusiveScan(scan_item, scan_item, scan_op, tile_carry); + + if (threadIdx.x == 0) + { + scan_item.key = thread_start_coord.x; + scan_item.value = 0.0; + } + + if (tile_num_rows > 0) + { + __syncthreads(); + + // Scan downsweep and scatter + ValueT* s_partials = &temp_storage.aliasable.merge_items[0].nonzero; + + if (scan_item.key != scan_segment[0].key) + { + s_partials[scan_item.key] = scan_item.value; + } + else + { + scan_segment[0].value += scan_item.value; + } + +#pragma unroll + for (int ITEM = 1; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + if (scan_segment[ITEM - 1].key != scan_segment[ITEM].key) + { + s_partials[scan_segment[ITEM - 1].key] = scan_segment[ITEM - 1].value; + } + else + { + scan_segment[ITEM].value += scan_segment[ITEM - 1].value; + } + } + + __syncthreads(); + +#pragma unroll 1 + for (int item = threadIdx.x; item < tile_num_rows; item += BLOCK_THREADS) + { + spmv_params.d_vector_y[tile_start_coord.x + item] = s_partials[item]; + } + } + + // Return the tile's running carry-out + return tile_carry; + } + + /** + * @brief Consume input tile + * + * @param[in] d_tile_coordinates + * Pointer to the temporary array of tile starting coordinates + * + * @param[out] d_tile_carry_pairs + * Pointer to the temporary array carry-out dot product row-ids, one per block + * + * @param[in] num_merge_tiles + * Number of merge tiles + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeTile(CoordinateT* d_tile_coordinates, KeyValuePairT* d_tile_carry_pairs, int num_merge_tiles) + { + int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index + + if (tile_idx >= num_merge_tiles) + { + return; + } + + // Read our starting coordinates + if (threadIdx.x < 2) + { + if (d_tile_coordinates == nullptr) + { + // Search our starting coordinates + OffsetT diagonal = (tile_idx + threadIdx.x) * TILE_ITEMS; + CoordinateT tile_coord; + _CCCL_SUPPRESS_DEPRECATED_PUSH + CountingInputIterator nonzero_indices(0); + _CCCL_SUPPRESS_DEPRECATED_POP + + // Search the merge path + MergePathSearch( + diagonal, + RowOffsetsSearchIteratorT(spmv_params.d_row_end_offsets), + nonzero_indices, + spmv_params.num_rows, + spmv_params.num_nonzeros, + tile_coord); + + temp_storage.tile_coords[threadIdx.x] = tile_coord; + } + else + { + temp_storage.tile_coords[threadIdx.x] = d_tile_coordinates[tile_idx + threadIdx.x]; + } + } + + __syncthreads(); + + CoordinateT tile_start_coord = temp_storage.tile_coords[0]; + CoordinateT tile_end_coord = temp_storage.tile_coords[1]; + + // Consume multi-segment tile + KeyValuePairT tile_carry = + ConsumeTile(tile_idx, tile_start_coord, tile_end_coord, Int2Type()); + + // Output the tile's carry-out + if (threadIdx.x == 0) + { + if (HAS_ALPHA) + { + tile_carry.value *= spmv_params.alpha; + } + + tile_carry.key += tile_start_coord.x; + if (tile_carry.key >= spmv_params.num_rows) + { + // FIXME: This works around an invalid memory access in the + // fixup kernel. The underlying issue needs to be debugged and + // properly fixed, but this hack prevents writes to + // out-of-bounds addresses. It doesn't appear to have an effect + // on the validity of the results, since this only affects the + // carry-over from last tile in the input. + tile_carry.key = spmv_params.num_rows - 1; + tile_carry.value = ValueT{}; + }; + + d_tile_carry_pairs[tile_idx] = tile_carry; + } + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_sub_warp_merge_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_sub_warp_merge_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b10f1cda3ead118d26fe2e28704c80597f9804af --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_sub_warp_merge_sort.cuh @@ -0,0 +1,347 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +template +struct AgentSubWarpMergeSortPolicy +{ + static constexpr int WARP_THREADS = WARP_THREADS_ARG; + static constexpr int ITEMS_PER_THREAD = ITEMS_PER_THREAD_ARG; + static constexpr int ITEMS_PER_TILE = WARP_THREADS * ITEMS_PER_THREAD; + + static constexpr cub::WarpLoadAlgorithm LOAD_ALGORITHM = LOAD_ALGORITHM_ARG; + static constexpr cub::CacheLoadModifier LOAD_MODIFIER = LOAD_MODIFIER_ARG; + static constexpr cub::WarpStoreAlgorithm STORE_ALGORITHM = STORE_ALGORITHM_ARG; +}; + +template +struct AgentSmallAndMediumSegmentedSortPolicy +{ + static constexpr int BLOCK_THREADS = BLOCK_THREADS_ARG; + using SmallPolicyT = SmallPolicy; + using MediumPolicyT = MediumPolicy; + + static constexpr int SEGMENTS_PER_MEDIUM_BLOCK = BLOCK_THREADS / MediumPolicyT::WARP_THREADS; + + static constexpr int SEGMENTS_PER_SMALL_BLOCK = BLOCK_THREADS / SmallPolicyT::WARP_THREADS; +}; + +namespace detail +{ +namespace sub_warp_merge_sort +{ + +/** + * @brief AgentSubWarpSort implements a sub-warp merge sort. + * + * This agent can work with any power of two number of threads, not exceeding + * 32. The number of threads is defined in the `PolicyT::WARP_THREADS`. Virtual + * warp of `PolicyT::WARP_THREADS` will efficiently load data using + * `PolicyT::LOAD_ALGORITHM`, sort it using `WarpMergeSort`, and store it back + * using `PolicyT::STORE_ALGORITHM`. + * + * @tparam IS_DESCENDING + * Whether or not the sorted-order is high-to-low + * + * @tparam PolicyT + * Chained tuning policy + * + * @tparam KeyT + * Key type + * + * @tparam ValueT + * Value type + * + * @tparam OffsetT + * Signed integer type for global offsets + */ +template +class AgentSubWarpSort +{ + using traits = detail::radix::traits_t; + using bit_ordered_type = typename traits::bit_ordered_type; + + struct BinaryOpT + { + template + _CCCL_DEVICE bool operator()(T lhs, T rhs) const noexcept + { + _CCCL_IF_CONSTEXPR (IS_DESCENDING) + { + return lhs > rhs; + } + else + { + return lhs < rhs; + } + _CCCL_UNREACHABLE(); + } + +#if defined(__CUDA_FP16_TYPES_EXIST__) + _CCCL_DEVICE bool operator()(__half lhs, __half rhs) const noexcept + { + // Need to explicitly cast to float for SM <= 52. + _CCCL_IF_CONSTEXPR (IS_DESCENDING) + { + NV_IF_TARGET(NV_PROVIDES_SM_53, (return __hgt(lhs, rhs);), (return __half2float(lhs) > __half2float(rhs);)); + } + else + { + NV_IF_TARGET(NV_PROVIDES_SM_53, (return __hlt(lhs, rhs);), (return __half2float(lhs) < __half2float(rhs);)); + } + _CCCL_UNREACHABLE(); + } +#endif // __CUDA_FP16_TYPES_EXIST__ + }; + +#if defined(__CUDA_FP16_TYPES_EXIST__) + _CCCL_DEVICE static bool equal(__half lhs, __half rhs) + { + // Need to explicitly cast to float for SM <= 52. + NV_IF_TARGET(NV_PROVIDES_SM_53, (return __heq(lhs, rhs);), (return __half2float(lhs) == __half2float(rhs);)); + } +#endif // __CUDA_FP16_TYPES_EXIST__ + + template + _CCCL_DEVICE static bool equal(T lhs, T rhs) + { + return lhs == rhs; + } + + _CCCL_DEVICE static bool get_oob_default(Int2Type /* is bool */) + { + // Traits::MAX_KEY for `bool` is 0xFF which is different from `true` and makes + // comparison with oob unreliable. + return !IS_DESCENDING; + } + + _CCCL_DEVICE static KeyT get_oob_default(Int2Type /* is bool */) + { + // For FP64 the difference is: + // Lowest() -> -1.79769e+308 = 00...00b -> TwiddleIn -> -0 = 10...00b + // LOWEST -> -nan = 11...11b -> TwiddleIn -> 0 = 00...00b + + // Segmented sort doesn't support custom types at the moment. + bit_ordered_type default_key_bits = IS_DESCENDING ? traits::min_raw_binary_key(identity_decomposer_t{}) + : traits::max_raw_binary_key(identity_decomposer_t{}); + return reinterpret_cast(default_key_bits); + } + +public: + static constexpr bool KEYS_ONLY = std::is_same::value; + + using WarpMergeSortT = WarpMergeSort; + + using KeysLoadItT = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + using ItemsLoadItT = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator::type; + + using WarpLoadKeysT = cub::WarpLoad; + using WarpLoadItemsT = + cub::WarpLoad; + + using WarpStoreKeysT = + cub::WarpStore; + using WarpStoreItemsT = + cub::WarpStore; + + union _TempStorage + { + typename WarpLoadKeysT::TempStorage load_keys; + typename WarpLoadItemsT::TempStorage load_items; + typename WarpMergeSortT::TempStorage sort; + typename WarpStoreKeysT::TempStorage store_keys; + typename WarpStoreItemsT::TempStorage store_items; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + _TempStorage& storage; + + _CCCL_DEVICE _CCCL_FORCEINLINE explicit AgentSubWarpSort(TempStorage& temp_storage) + : storage(temp_storage.Alias()) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE void ProcessSegment( + int segment_size, KeysLoadItT keys_input, KeyT* keys_output, ItemsLoadItT values_input, ValueT* values_output) + { + WarpMergeSortT warp_merge_sort(storage.sort); + + if (segment_size < 3) + { + ShortCircuit( + warp_merge_sort.get_linear_tid(), + segment_size, + keys_input, + keys_output, + values_input, + values_output, + BinaryOpT{}); + } + else + { + KeyT keys[PolicyT::ITEMS_PER_THREAD]; + ValueT values[PolicyT::ITEMS_PER_THREAD]; + + KeyT oob_default = AgentSubWarpSort::get_oob_default(Int2Type::value>{}); + + WarpLoadKeysT(storage.load_keys).Load(keys_input, keys, segment_size, oob_default); + __syncwarp(warp_merge_sort.get_member_mask()); + + if (!KEYS_ONLY) + { + WarpLoadItemsT(storage.load_items).Load(values_input, values, segment_size); + + __syncwarp(warp_merge_sort.get_member_mask()); + } + + warp_merge_sort.Sort(keys, values, BinaryOpT{}, segment_size, oob_default); + __syncwarp(warp_merge_sort.get_member_mask()); + + WarpStoreKeysT(storage.store_keys).Store(keys_output, keys, segment_size); + + if (!KEYS_ONLY) + { + __syncwarp(warp_merge_sort.get_member_mask()); + WarpStoreItemsT(storage.store_items).Store(values_output, values, segment_size); + } + } + } + +private: + /** + * This method implements a shortcut for sorting less than three items. + * Only the first thread of a virtual warp is used for soring. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ShortCircuit( + unsigned int linear_tid, + OffsetT segment_size, + KeysLoadItT keys_input, + KeyT* keys_output, + ItemsLoadItT values_input, + ValueT* values_output, + CompareOpT binary_op) + { + if (segment_size == 1) + { + if (linear_tid == 0) + { + if (keys_input.ptr != keys_output) + { + keys_output[0] = keys_input[0]; + } + + if (!KEYS_ONLY) + { + if (values_input.ptr != values_output) + { + values_output[0] = values_input[0]; + } + } + } + } + else if (segment_size == 2) + { + if (linear_tid == 0) + { + KeyT lhs = keys_input[0]; + KeyT rhs = keys_input[1]; + + if (equal(lhs, rhs) || binary_op(lhs, rhs)) + { + keys_output[0] = lhs; + keys_output[1] = rhs; + + if (!KEYS_ONLY) + { + if (values_output != values_input.ptr) + { + values_output[0] = values_input[0]; + values_output[1] = values_input[1]; + } + } + } + else + { + keys_output[0] = rhs; + keys_output[1] = lhs; + + if (!KEYS_ONLY) + { + // values_output might be an alias for values_input, so + // we have to use registers here + + const ValueT lhs_val = values_input[0]; + const ValueT rhs_val = values_input[1]; + + values_output[0] = rhs_val; + values_output[1] = lhs_val; + } + } + } + } + } +}; + +} // namespace sub_warp_merge_sort +} // namespace detail + +template +using AgentSubWarpSort CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::sub_warp_merge_sort::AgentSubWarpSort; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_three_way_partition.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_three_way_partition.cuh new file mode 100644 index 0000000000000000000000000000000000000000..d0a37d13e797e00cb5f26138cd435a3c2b3b8923 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_three_way_partition.cuh @@ -0,0 +1,583 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +namespace detail +{ + +namespace three_way_partition +{ + +template +struct pair_pack_t +{ + OffsetT x, y; + + _CCCL_DEVICE pair_pack_t operator+(const pair_pack_t& other) const + { + return {x + other.x, y + other.y}; + } +}; + +template +struct accumulator_pack_base_t +{ + using pack_t = pair_pack_t; + + _CCCL_DEVICE static pack_t pack(OffsetT f, OffsetT s) + { + return {f, s}; + } + _CCCL_DEVICE static OffsetT first(pack_t packed) + { + return packed.x; + } + _CCCL_DEVICE static OffsetT second(pack_t packed) + { + return packed.y; + } +}; + +template +struct accumulator_pack_base_t::type> +{ + using pack_t = std::uint64_t; + + _CCCL_DEVICE static pack_t pack(OffsetT f, OffsetT s) + { + return (static_cast(f) << 32) | static_cast(s); + } + + _CCCL_DEVICE static OffsetT first(pack_t packed) + { + return static_cast(packed >> 32); + } + + _CCCL_DEVICE static OffsetT second(pack_t packed) + { + return static_cast(packed & 0xFFFFFFFF); + } +}; + +template +struct accumulator_pack_t : accumulator_pack_base_t +{ + using base = accumulator_pack_base_t; + using typename base::pack_t; + + _CCCL_DEVICE static void subtract(pack_t& packed, OffsetT val) + { + packed = base::pack(base::first(packed) - val, base::second(packed) - val); + } + + _CCCL_DEVICE static OffsetT sum(pack_t& packed) + { + return base::first(packed) + base::second(packed); + } + + _CCCL_DEVICE static pack_t zero() + { + return {}; + } +}; + +} // namespace three_way_partition + +} // namespace detail + +template > +struct AgentThreeWayPartitionPolicy +{ + static constexpr int BLOCK_THREADS = _BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = _ITEMS_PER_THREAD; + static constexpr BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + static constexpr CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + static constexpr BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; + + struct detail + { + using delay_constructor_t = DelayConstructorT; + }; +}; + +/** + * \brief Implements a device-wide three-way partitioning + * + * Splits input data into three parts based on the selection functors. If the + * first functor selects an item, the algorithm places it in the first part. + * Otherwise, if the second functor selects an item, the algorithm places it in + * the second part. If both functors don't select an item, the algorithm places + * it into the unselected part. + */ +template +struct AgentThreeWayPartition +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + // The input value type + using InputT = cub::detail::value_t; + + using AccumPackHelperT = detail::three_way_partition::accumulator_pack_t; + using AccumPackT = typename AccumPackHelperT::pack_t; + + // Tile status descriptor interface type + using ScanTileStateT = cub::ScanTileState; + + // Constants + static constexpr int BLOCK_THREADS = PolicyT::BLOCK_THREADS; + static constexpr int ITEMS_PER_THREAD = PolicyT::ITEMS_PER_THREAD; + static constexpr int TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD; + + using WrappedInputIteratorT = + ::cuda::std::_If::value, + cub::CacheModifiedInputIterator, + InputIteratorT>; + + // Parameterized BlockLoad type for input data + using BlockLoadT = cub::BlockLoad; + + // Parameterized BlockScan type + using BlockScanT = cub::BlockScan; + + // Callback type for obtaining tile prefix during block scan + using DelayConstructorT = typename PolicyT::detail::delay_constructor_t; + using TilePrefixCallbackOpT = + cub::TilePrefixCallbackOp, ScanTileStateT, 0, DelayConstructorT>; + + // Item exchange type + using ItemExchangeT = InputT[TILE_ITEMS]; + + // Shared memory type for this thread block + union _TempStorage + { + struct ScanStorage + { + // Smem needed for tile scanning + typename BlockScanT::TempStorage scan; + + // Smem needed for cooperative prefix callback + typename TilePrefixCallbackOpT::TempStorage prefix; + } scan_storage; + + // Smem needed for loading items + typename BlockLoadT::TempStorage load_items; + + // Smem needed for compacting items (allows non POD items in this union) + cub::Uninitialized raw_exchange; + }; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : cub::Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + _TempStorage& temp_storage; ///< Reference to temp_storage + WrappedInputIteratorT d_in; ///< Input items + FirstOutputIteratorT d_first_part_out; + SecondOutputIteratorT d_second_part_out; + UnselectedOutputIteratorT d_unselected_out; + SelectFirstPartOp select_first_part_op; + SelectSecondPartOp select_second_part_op; + OffsetT num_items; ///< Total number of input items + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + // Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE AgentThreeWayPartition( + TempStorage& temp_storage, + InputIteratorT d_in, + FirstOutputIteratorT d_first_part_out, + SecondOutputIteratorT d_second_part_out, + UnselectedOutputIteratorT d_unselected_out, + SelectFirstPartOp select_first_part_op, + SelectSecondPartOp select_second_part_op, + OffsetT num_items) + : temp_storage(temp_storage.Alias()) + , d_in(d_in) + , d_first_part_out(d_first_part_out) + , d_second_part_out(d_second_part_out) + , d_unselected_out(d_unselected_out) + , select_first_part_op(select_first_part_op) + , select_second_part_op(select_second_part_op) + , num_items(num_items) + {} + + //--------------------------------------------------------------------- + // Utility methods for initializing the selections + //--------------------------------------------------------------------- + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Initialize( + OffsetT num_tile_items, InputT (&items)[ITEMS_PER_THREAD], AccumPackT (&items_selection_flags)[ITEMS_PER_THREAD]) + { + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + // Out-of-bounds items are selection_flags + items_selection_flags[ITEM] = AccumPackHelperT::pack(1, 1); + + if (!IS_LAST_TILE || (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM < num_tile_items)) + { + OffsetT first_item_selected = select_first_part_op(items[ITEM]); + items_selection_flags[ITEM] = + AccumPackHelperT::pack(first_item_selected, first_item_selected ? 0 : select_second_part_op(items[ITEM])); + } + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Scatter( + InputT (&items)[ITEMS_PER_THREAD], + AccumPackT (&items_selection_flags)[ITEMS_PER_THREAD], + AccumPackT (&items_selection_indices)[ITEMS_PER_THREAD], + int num_tile_items, + AccumPackT num_tile_selected, + AccumPackT num_tile_selected_prefix, + OffsetT num_rejected_prefix) + { + __syncthreads(); + + const OffsetT num_first_selections_prefix = AccumPackHelperT::first(num_tile_selected_prefix); + const OffsetT num_second_selections_prefix = AccumPackHelperT::second(num_tile_selected_prefix); + + const int first_item_end = AccumPackHelperT::first(num_tile_selected); + const int second_item_end = first_item_end + AccumPackHelperT::second(num_tile_selected); + + // Scatter items to shared memory (rejections first) + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int item_idx = (threadIdx.x * ITEMS_PER_THREAD) + ITEM; + + const OffsetT first_items_selection_indices = AccumPackHelperT::first(items_selection_indices[ITEM]); + const OffsetT second_items_selection_indices = AccumPackHelperT::second(items_selection_indices[ITEM]); + + if (!IS_LAST_TILE || (item_idx < num_tile_items)) + { + int local_scatter_offset = 0; + + if (AccumPackHelperT::first(items_selection_flags[ITEM])) + { + local_scatter_offset = first_items_selection_indices - num_first_selections_prefix; + } + else if (AccumPackHelperT::second(items_selection_flags[ITEM])) + { + local_scatter_offset = first_item_end + second_items_selection_indices - num_second_selections_prefix; + } + else + { + // Medium item + int local_selection_idx = (first_items_selection_indices - num_first_selections_prefix) + + (second_items_selection_indices - num_second_selections_prefix); + local_scatter_offset = second_item_end + item_idx - local_selection_idx; + } + + temp_storage.raw_exchange.Alias()[local_scatter_offset] = items[ITEM]; + } + } + + __syncthreads(); + + // Gather items from shared memory and scatter to global + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int item_idx = (ITEM * BLOCK_THREADS) + threadIdx.x; + + if (!IS_LAST_TILE || (item_idx < num_tile_items)) + { + InputT item = temp_storage.raw_exchange.Alias()[item_idx]; + + if (item_idx < first_item_end) + { + d_first_part_out[num_first_selections_prefix + item_idx] = item; + } + else if (item_idx < second_item_end) + { + d_second_part_out[num_second_selections_prefix + item_idx - first_item_end] = item; + } + else + { + int rejection_idx = item_idx - second_item_end; + d_unselected_out[num_rejected_prefix + rejection_idx] = item; + } + } + } + } + + //--------------------------------------------------------------------- + // Cooperatively scan a device-wide sequence of tiles with other CTAs + //--------------------------------------------------------------------- + + /** + * Process first tile of input (dynamic chained scan). + * Returns the running count of selections (including this tile) + * + * @param num_tile_items Number of input items comprising this tile + * @param tile_offset Tile offset + * @param first_tile_state Global tile state descriptor + * @param second_tile_state Global tile state descriptor + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeFirstTile(int num_tile_items, OffsetT tile_offset, ScanTileStateT& tile_state, AccumPackT& num_items_selected) + { + InputT items[ITEMS_PER_THREAD]; + + AccumPackT items_selection_flags[ITEMS_PER_THREAD]; + AccumPackT items_selection_indices[ITEMS_PER_THREAD]; + + // Load items + if (IS_LAST_TILE) + { + BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items, num_tile_items); + } + else + { + BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items); + } + + // Initialize selection_flags + Initialize(num_tile_items, items, items_selection_flags); + __syncthreads(); + + // Exclusive scan of selection_flags + BlockScanT(temp_storage.scan_storage.scan) + .ExclusiveSum(items_selection_flags, items_selection_indices, num_items_selected); + + if (threadIdx.x == 0) + { + // Update tile status if this is not the last tile + if (!IS_LAST_TILE) + { + tile_state.SetInclusive(0, num_items_selected); + } + } + + // Discount any out-of-bounds selections + if (IS_LAST_TILE) + { + AccumPackHelperT::subtract(num_items_selected, TILE_ITEMS - num_tile_items); + } + + // Scatter flagged items + Scatter( + items, + items_selection_flags, + items_selection_indices, + num_tile_items, + num_items_selected, + // all the prefixes equal to 0 because it's the first tile + AccumPackHelperT::zero(), + 0); + } + + /** + * Process subsequent tile of input (dynamic chained scan). + * Returns the running count of selections (including this tile) + * + * @param num_tile_items Number of input items comprising this tile + * @param tile_idx Tile index + * @param tile_offset Tile offset + * @param first_tile_state Global tile state descriptor + * @param second_tile_state Global tile state descriptor + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ConsumeSubsequentTile( + int num_tile_items, int tile_idx, OffsetT tile_offset, ScanTileStateT& tile_state, AccumPackT& num_items_selected) + { + InputT items[ITEMS_PER_THREAD]; + + AccumPackT items_selected_flags[ITEMS_PER_THREAD]; + AccumPackT items_selected_indices[ITEMS_PER_THREAD]; + + // Load items + if (IS_LAST_TILE) + { + BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items, num_tile_items); + } + else + { + BlockLoadT(temp_storage.load_items).Load(d_in + tile_offset, items); + } + + // Initialize selection_flags + Initialize(num_tile_items, items, items_selected_flags); + __syncthreads(); + + // Exclusive scan of values and selection_flags + TilePrefixCallbackOpT prefix_op(tile_state, temp_storage.scan_storage.prefix, ::cuda::std::plus<>{}, tile_idx); + + BlockScanT(temp_storage.scan_storage.scan).ExclusiveSum(items_selected_flags, items_selected_indices, prefix_op); + + num_items_selected = prefix_op.GetInclusivePrefix(); + AccumPackT num_items_in_tile_selected = prefix_op.GetBlockAggregate(); + AccumPackT num_items_selected_prefix = prefix_op.GetExclusivePrefix(); + + __syncthreads(); + + OffsetT num_rejected_prefix = (tile_idx * TILE_ITEMS) - AccumPackHelperT::sum(num_items_selected_prefix); + + // Discount any out-of-bounds selections. There are exactly + // TILE_ITEMS - num_tile_items elements like that because we + // marked them as selected in Initialize method. + if (IS_LAST_TILE) + { + const int num_discount = TILE_ITEMS - num_tile_items; + + AccumPackHelperT::subtract(num_items_selected, num_discount); + AccumPackHelperT::subtract(num_items_in_tile_selected, num_discount); + } + + // Scatter flagged items + Scatter( + items, + items_selected_flags, + items_selected_indices, + num_tile_items, + num_items_in_tile_selected, + num_items_selected_prefix, + num_rejected_prefix); + } + + /** + * Process a tile of input + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeTile(int num_tile_items, int tile_idx, OffsetT tile_offset, ScanTileStateT& tile_state, AccumPackT& accum) + { + if (tile_idx == 0) + { + ConsumeFirstTile(num_tile_items, tile_offset, tile_state, accum); + } + else + { + ConsumeSubsequentTile(num_tile_items, tile_idx, tile_offset, tile_state, accum); + } + } + + /** + * Scan tiles of items as part of a dynamic chained scan + * + * @tparam NumSelectedIteratorT + * Output iterator type for recording number of items selection_flags + * + * @param num_tiles + * Total number of input tiles + * + * @param first_tile_state + * Global tile state descriptor + * + * @param second_tile_state + * Global tile state descriptor + * + * @param d_num_selected_out + * Output total number selection_flags + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeRange(int num_tiles, ScanTileStateT& tile_state, NumSelectedIteratorT d_num_selected_out) + { + // Blocks are launched in increasing order, so just assign one tile per block + // Current tile index + const int tile_idx = static_cast((blockIdx.x * gridDim.y) + blockIdx.y); + + // Global offset for the current tile + const OffsetT tile_offset = tile_idx * TILE_ITEMS; + + AccumPackT accum; + + if (tile_idx < num_tiles - 1) + { + // Not the last tile (full) + ConsumeTile(TILE_ITEMS, tile_idx, tile_offset, tile_state, accum); + } + else + { + // The last tile (possibly partially-full) + const OffsetT num_remaining = num_items - tile_offset; + + ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state, accum); + + if (threadIdx.x == 0) + { + // Output the total number of items selection_flags + d_num_selected_out[0] = AccumPackHelperT::first(accum); + d_num_selected_out[1] = AccumPackHelperT::second(accum); + } + } + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_unique_by_key.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_unique_by_key.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a1a731f150ff5b469dc767fed1ce97bf1e58688c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/agent_unique_by_key.cuh @@ -0,0 +1,635 @@ +/****************************************************************************** + * Copyright (c) NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::AgentUniqueByKey implements a stateful abstraction of CUDA thread blocks for participating in device-wide + * unique-by-key. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Tuning policy types + ******************************************************************************/ + +/** + * Parameterizable tuning policy type for AgentUniqueByKey + * + * @tparam DelayConstructorT + * Implementation detail, do not specify directly, requirements on the + * content of this type are subject to breaking change. + */ +template > +struct AgentUniqueByKeyPolicy +{ + enum + { + BLOCK_THREADS = _BLOCK_THREADS, + ITEMS_PER_THREAD = _ITEMS_PER_THREAD, + }; + static constexpr cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; + static constexpr cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; + static constexpr cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; + + struct detail + { + using delay_constructor_t = DelayConstructorT; + }; +}; + +/****************************************************************************** + * Thread block abstractions + ******************************************************************************/ + +namespace detail +{ +namespace unique_by_key +{ + +/** + * @brief AgentUniqueByKey implements a stateful abstraction of CUDA thread blocks for participating + * in device-wide unique-by-key + * + * @tparam AgentUniqueByKeyPolicyT + * Parameterized AgentUniqueByKeyPolicy tuning policy type + * + * @tparam KeyInputIteratorT + * Random-access input iterator type for keys + * + * @tparam ValueInputIteratorT + * Random-access input iterator type for values + * + * @tparam KeyOutputIteratorT + * Random-access output iterator type for keys + * + * @tparam ValueOutputIteratorT + * Random-access output iterator type for values + * + * @tparam EqualityOpT + * Equality operator type + * + * @tparam OffsetT + * Signed integer type for global offsets + */ +template +struct AgentUniqueByKey +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + // The input key and value type + using KeyT = typename std::iterator_traits::value_type; + using ValueT = typename std::iterator_traits::value_type; + + // Tile status descriptor interface type + using ScanTileStateT = ScanTileState; + + // Constants + enum + { + BLOCK_THREADS = AgentUniqueByKeyPolicyT::BLOCK_THREADS, + ITEMS_PER_THREAD = AgentUniqueByKeyPolicyT::ITEMS_PER_THREAD, + ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD, + }; + + // Cache-modified Input iterator wrapper type (for applying cache modifier) for keys + using WrappedKeyInputIteratorT = typename std::conditional< + std::is_pointer::value, + CacheModifiedInputIterator, // Wrap the native input pointer + // with + // CacheModifiedValuesInputIterator + KeyInputIteratorT>::type; // Directly use the supplied input iterator type + + // Cache-modified Input iterator wrapper type (for applying cache modifier) for values + using WrappedValueInputIteratorT = typename std::conditional< + std::is_pointer::value, + CacheModifiedInputIterator, // Wrap the native input + // pointer with + // CacheModifiedValuesInputIterator + ValueInputIteratorT>::type; // Directly use the supplied input iterator type + + // Parameterized BlockLoad type for input data + using BlockLoadKeys = BlockLoad; + + // Parameterized BlockLoad type for flags + using BlockLoadValues = BlockLoad; + + // Parameterized BlockDiscontinuity type for items + using BlockDiscontinuityKeys = cub::BlockDiscontinuity; + + // Parameterized BlockScan type + using BlockScanT = cub::BlockScan; + + // Parameterized BlockDiscontinuity type for items + using DelayConstructorT = typename AgentUniqueByKeyPolicyT::detail::delay_constructor_t; + using TilePrefixCallback = + cub::TilePrefixCallbackOp, ScanTileStateT, 0, DelayConstructorT>; + + // Key exchange type + using KeyExchangeT = KeyT[ITEMS_PER_TILE]; + + // Value exchange type + using ValueExchangeT = ValueT[ITEMS_PER_TILE]; + + // Shared memory type for this thread block + union _TempStorage + { + struct ScanStorage + { + typename BlockScanT::TempStorage scan; + typename TilePrefixCallback::TempStorage prefix; + typename BlockDiscontinuityKeys::TempStorage discontinuity; + } scan_storage; + + // Smem needed for loading keys + typename BlockLoadKeys::TempStorage load_keys; + + // Smem needed for loading values + typename BlockLoadValues::TempStorage load_values; + + // Smem needed for compacting items (allows non POD items in this union) + Uninitialized shared_keys; + Uninitialized shared_values; + }; + + // Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + _TempStorage& temp_storage; + WrappedKeyInputIteratorT d_keys_in; + WrappedValueInputIteratorT d_values_in; + KeyOutputIteratorT d_keys_out; + ValueOutputIteratorT d_values_out; + cub::InequalityWrapper inequality_op; + OffsetT num_items; + + //--------------------------------------------------------------------- + // Constructor + //--------------------------------------------------------------------- + + // Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE AgentUniqueByKey( + TempStorage& temp_storage_, + WrappedKeyInputIteratorT d_keys_in_, + WrappedValueInputIteratorT d_values_in_, + KeyOutputIteratorT d_keys_out_, + ValueOutputIteratorT d_values_out_, + EqualityOpT equality_op_, + OffsetT num_items_) + : temp_storage(temp_storage_.Alias()) + , d_keys_in(d_keys_in_) + , d_values_in(d_values_in_) + , d_keys_out(d_keys_out_) + , d_values_out(d_values_out_) + , inequality_op(equality_op_) + , num_items(num_items_) + {} + + //--------------------------------------------------------------------- + // Utility functions + //--------------------------------------------------------------------- + + struct KeyTagT + {}; + struct ValueTagT + {}; + + _CCCL_DEVICE _CCCL_FORCEINLINE KeyExchangeT& GetShared(KeyTagT) + { + return temp_storage.shared_keys.Alias(); + } + _CCCL_DEVICE _CCCL_FORCEINLINE ValueExchangeT& GetShared(ValueTagT) + { + return temp_storage.shared_values.Alias(); + } + + //--------------------------------------------------------------------- + // Scatter utility methods + //--------------------------------------------------------------------- + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Scatter( + Tag tag, + OutputIt items_out, + T (&items)[ITEMS_PER_THREAD], + OffsetT (&selection_flags)[ITEMS_PER_THREAD], + OffsetT (&selection_indices)[ITEMS_PER_THREAD], + int /*num_tile_items*/, + int num_tile_selections, + OffsetT num_selections_prefix, + OffsetT /*num_selections*/) + { +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + int local_scatter_offset = selection_indices[ITEM] - num_selections_prefix; + if (selection_flags[ITEM]) + { + GetShared(tag)[local_scatter_offset] = items[ITEM]; + } + } + + __syncthreads(); + +// Preventing loop unrolling helps avoid perf degradation when switching from signed to unsigned 32-bit offset +// types +#pragma unroll 1 + for (int item = threadIdx.x; item < num_tile_selections; item += BLOCK_THREADS) + { + items_out[num_selections_prefix + item] = GetShared(tag)[item]; + } + + __syncthreads(); + } + + //--------------------------------------------------------------------- + // Cooperatively scan a device-wide sequence of tiles with other CTAs + //--------------------------------------------------------------------- + + /** + * @brief Process first tile of input (dynamic chained scan). + * + * @param num_tile_items + * Number of input items comprising this tile + * + * @param tile_offset + * Tile offset + * + * @param tile_state + * Global tile state descriptor + * + * @return The running count of selections (including this tile) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT + ConsumeFirstTile(int num_tile_items, OffsetT tile_offset, ScanTileStateT& tile_state) + { + KeyT keys[ITEMS_PER_THREAD]; + OffsetT selection_flags[ITEMS_PER_THREAD]; + OffsetT selection_idx[ITEMS_PER_THREAD]; + + if (IS_LAST_TILE) + { + // Fill last elements with the first element + // because collectives are not suffix guarded + BlockLoadKeys(temp_storage.load_keys) + .Load(d_keys_in + tile_offset, keys, num_tile_items, *(d_keys_in + tile_offset)); + } + else + { + BlockLoadKeys(temp_storage.load_keys).Load(d_keys_in + tile_offset, keys); + } + + __syncthreads(); + + ValueT values[ITEMS_PER_THREAD]; + if (IS_LAST_TILE) + { + // Fill last elements with the first element + // because collectives are not suffix guarded + BlockLoadValues(temp_storage.load_values) + .Load(d_values_in + tile_offset, values, num_tile_items, *(d_values_in + tile_offset)); + } + else + { + BlockLoadValues(temp_storage.load_values).Load(d_values_in + tile_offset, values); + } + + __syncthreads(); + + BlockDiscontinuityKeys(temp_storage.scan_storage.discontinuity).FlagHeads(selection_flags, keys, inequality_op); +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + // Set selection_flags for out-of-bounds items + if ((IS_LAST_TILE) && (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items)) + { + selection_flags[ITEM] = 1; + } + } + + __syncthreads(); + + OffsetT num_tile_selections = 0; + OffsetT num_selections = 0; + OffsetT num_selections_prefix = 0; + + BlockScanT(temp_storage.scan_storage.scan).ExclusiveSum(selection_flags, selection_idx, num_tile_selections); + + if (threadIdx.x == 0) + { + // Update tile status if this is not the last tile + if (!IS_LAST_TILE) + { + tile_state.SetInclusive(0, num_tile_selections); + } + } + + // Do not count any out-of-bounds selections + if (IS_LAST_TILE) + { + int num_discount = ITEMS_PER_TILE - num_tile_items; + num_tile_selections -= num_discount; + } + num_selections = num_tile_selections; + + __syncthreads(); + + Scatter(KeyTagT(), + d_keys_out, + keys, + selection_flags, + selection_idx, + num_tile_items, + num_tile_selections, + num_selections_prefix, + num_selections); + + __syncthreads(); + + Scatter(ValueTagT(), + d_values_out, + values, + selection_flags, + selection_idx, + num_tile_items, + num_tile_selections, + num_selections_prefix, + num_selections); + + return num_selections; + } + + /** + * @brief Process subsequent tile of input (dynamic chained scan). + * + * @param num_tile_items + * Number of input items comprising this tile + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param tile_state + * Global tile state descriptor + * + * @return Returns the running count of selections (including this tile) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT + ConsumeSubsequentTile(int num_tile_items, int tile_idx, OffsetT tile_offset, ScanTileStateT& tile_state) + { + KeyT keys[ITEMS_PER_THREAD]; + OffsetT selection_flags[ITEMS_PER_THREAD]; + OffsetT selection_idx[ITEMS_PER_THREAD]; + + if (IS_LAST_TILE) + { + // Fill last elements with the first element + // because collectives are not suffix guarded + BlockLoadKeys(temp_storage.load_keys) + .Load(d_keys_in + tile_offset, keys, num_tile_items, *(d_keys_in + tile_offset)); + } + else + { + BlockLoadKeys(temp_storage.load_keys).Load(d_keys_in + tile_offset, keys); + } + + __syncthreads(); + + ValueT values[ITEMS_PER_THREAD]; + if (IS_LAST_TILE) + { + // Fill last elements with the first element + // because collectives are not suffix guarded + BlockLoadValues(temp_storage.load_values) + .Load(d_values_in + tile_offset, values, num_tile_items, *(d_values_in + tile_offset)); + } + else + { + BlockLoadValues(temp_storage.load_values).Load(d_values_in + tile_offset, values); + } + + __syncthreads(); + + KeyT tile_predecessor = d_keys_in[tile_offset - 1]; + BlockDiscontinuityKeys(temp_storage.scan_storage.discontinuity) + .FlagHeads(selection_flags, keys, inequality_op, tile_predecessor); + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) + { + // Set selection_flags for out-of-bounds items + if ((IS_LAST_TILE) && (OffsetT(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items)) + { + selection_flags[ITEM] = 1; + } + } + + __syncthreads(); + + OffsetT num_tile_selections = 0; + OffsetT num_selections = 0; + OffsetT num_selections_prefix = 0; + + TilePrefixCallback prefix_cb(tile_state, temp_storage.scan_storage.prefix, ::cuda::std::plus<>{}, tile_idx); + BlockScanT(temp_storage.scan_storage.scan).ExclusiveSum(selection_flags, selection_idx, prefix_cb); + + num_selections = prefix_cb.GetInclusivePrefix(); + num_tile_selections = prefix_cb.GetBlockAggregate(); + num_selections_prefix = prefix_cb.GetExclusivePrefix(); + + if (IS_LAST_TILE) + { + int num_discount = ITEMS_PER_TILE - num_tile_items; + num_tile_selections -= num_discount; + num_selections -= num_discount; + } + + __syncthreads(); + + Scatter(KeyTagT(), + d_keys_out, + keys, + selection_flags, + selection_idx, + num_tile_items, + num_tile_selections, + num_selections_prefix, + num_selections); + + __syncthreads(); + + Scatter(ValueTagT(), + d_values_out, + values, + selection_flags, + selection_idx, + num_tile_items, + num_tile_selections, + num_selections_prefix, + num_selections); + + return num_selections; + } + + /** + * @brief Process a tile of input + * + * @param num_tile_items + * Number of input items comprising this tile + * + * @param tile_idx + * Tile index + * + * @param tile_offset + * Tile offset + * + * @param tile_state + * Global tile state descriptor + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT + ConsumeTile(int num_tile_items, int tile_idx, OffsetT tile_offset, ScanTileStateT& tile_state) + { + OffsetT num_selections; + if (tile_idx == 0) + { + num_selections = ConsumeFirstTile(num_tile_items, tile_offset, tile_state); + } + else + { + num_selections = ConsumeSubsequentTile(num_tile_items, tile_idx, tile_offset, tile_state); + } + + return num_selections; + } + + /** + * @brief Scan tiles of items as part of a dynamic chained scan + * + * @param num_tiles + * Total number of input tiles + * + * @param tile_state + * Global tile state descriptor + * + * @param d_num_selected_out + * Output total number selection_flags + * + * @tparam NumSelectedIteratorT + * Output iterator type for recording number of items selection_flags + * + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ConsumeRange(int num_tiles, ScanTileStateT& tile_state, NumSelectedIteratorT d_num_selected_out) + { + // Blocks are launched in increasing order, so just assign one tile per block + int tile_idx = (blockIdx.x * gridDim.y) + blockIdx.y; // Current tile index + + // Global offset for the current tile + OffsetT tile_offset = static_cast(tile_idx) * static_cast(ITEMS_PER_TILE); + + if (tile_idx < num_tiles - 1) + { + ConsumeTile(ITEMS_PER_TILE, tile_idx, tile_offset, tile_state); + } + else + { + int num_remaining = static_cast(num_items - tile_offset); + OffsetT num_selections = ConsumeTile(num_remaining, tile_idx, tile_offset, tile_state); + if (threadIdx.x == 0) + { + *d_num_selected_out = num_selections; + } + } + } +}; + +} // namespace unique_by_key +} // namespace detail + +template +using AgentUniqueByKey CCCL_DEPRECATED_BECAUSE("This class is considered an implementation detail and the public " + "interface will be removed.") = + detail::unique_by_key::AgentUniqueByKey< + AgentUniqueByKeyPolicyT, + KeyInputIteratorT, + ValueInputIteratorT, + KeyOutputIteratorT, + ValueOutputIteratorT, + EqualityOpT, + OffsetT>; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/single_pass_scan_operators.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/single_pass_scan_operators.cuh new file mode 100644 index 0000000000000000000000000000000000000000..92e7c35b46a0df0be61227de50e13c127994df6d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/agent/single_pass_scan_operators.cuh @@ -0,0 +1,1321 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * Callback operator types for supplying BlockScan prefixes + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Prefix functor type for maintaining a running prefix while scanning a + * region independent of other thread blocks + ******************************************************************************/ + +/** + * Stateful callback operator type for supplying BlockScan prefixes. + * Maintains a running prefix that can be applied to consecutive + * BlockScan operations. + * + * @tparam T + * BlockScan value type + * + * @tparam ScanOpT + * Wrapped scan operator type + */ +template +struct BlockScanRunningPrefixOp +{ + /// Wrapped scan operator + ScanOpT op; + + /// Running block-wide prefix + T running_total; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockScanRunningPrefixOp(ScanOpT op) + : op(op) + {} + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockScanRunningPrefixOp(T starting_prefix, ScanOpT op) + : op(op) + , running_total(starting_prefix) + {} + + /** + * Prefix callback operator. Returns the block-wide running_total in thread-0. + * + * @param block_aggregate + * The aggregate sum of the BlockScan inputs + */ + _CCCL_DEVICE _CCCL_FORCEINLINE T operator()(const T& block_aggregate) + { + T retval = running_total; + running_total = op(running_total, block_aggregate); + return retval; + } +}; + +/****************************************************************************** + * Generic tile status interface types for block-cooperative scans + ******************************************************************************/ + +/** + * Enumerations of tile status + */ +enum ScanTileStatus +{ + SCAN_TILE_OOB, // Out-of-bounds (e.g., padding) + SCAN_TILE_INVALID = 99, // Not yet processed + SCAN_TILE_PARTIAL, // Tile aggregate is available + SCAN_TILE_INCLUSIVE, // Inclusive tile prefix is available +}; + +/** + * Enum class used for specifying the memory order that shall be enforced while reading and writing the tile status. + */ +enum class MemoryOrder +{ + // Uses relaxed loads when reading a tile's status and relaxed stores when updating a tile's status + relaxed, + // Uses load acquire when reading a tile's status and store release when updating a tile's status + acquire_release +}; + +namespace detail +{ +template +_CCCL_DEVICE _CCCL_FORCEINLINE void delay() +{ + NV_IF_TARGET(NV_PROVIDES_SM_70, (if (Delay > 0) { + if (gridDim.x < GridThreshold) + { + __threadfence_block(); + } + else + { + __nanosleep(Delay); + } + })); +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void delay(int ns) +{ + NV_IF_TARGET(NV_PROVIDES_SM_70, (if (ns > 0) { + if (gridDim.x < GridThreshold) + { + __threadfence_block(); + } + else + { + __nanosleep(ns); + } + })); +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void always_delay() +{ + NV_IF_TARGET(NV_PROVIDES_SM_70, (__nanosleep(Delay);)); +} + +_CCCL_DEVICE _CCCL_FORCEINLINE void always_delay(int ns) +{ + NV_IF_TARGET(NV_PROVIDES_SM_70, (__nanosleep(ns);), ((void) ns;)); +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void delay_or_prevent_hoisting() +{ + NV_IF_TARGET(NV_PROVIDES_SM_70, (delay();), (__threadfence_block();)); +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void delay_or_prevent_hoisting(int ns) +{ + NV_IF_TARGET(NV_PROVIDES_SM_70, (delay(ns);), ((void) ns; __threadfence_block();)); +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void always_delay_or_prevent_hoisting() +{ + NV_IF_TARGET(NV_PROVIDES_SM_70, (always_delay(Delay);), (__threadfence_block();)); +} + +_CCCL_DEVICE _CCCL_FORCEINLINE void always_delay_or_prevent_hoisting(int ns) +{ + NV_IF_TARGET(NV_PROVIDES_SM_70, (always_delay(ns);), ((void) ns; __threadfence_block();)); +} + +template +struct no_delay_constructor_t +{ + struct delay_t + { + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + NV_IF_TARGET(NV_PROVIDES_SM_70, (), (__threadfence_block();)); + } + }; + + _CCCL_DEVICE _CCCL_FORCEINLINE no_delay_constructor_t(unsigned int /* seed */) + { + delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + return {}; + } +}; + +template +struct reduce_by_key_delay_constructor_t +{ + struct delay_t + { + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + NV_DISPATCH_TARGET( + NV_IS_EXACTLY_SM_80, + (delay();), + NV_PROVIDES_SM_70, + (delay<0, GridThreshold>();), + NV_IS_DEVICE, + (__threadfence_block();)); + } + }; + + _CCCL_DEVICE _CCCL_FORCEINLINE reduce_by_key_delay_constructor_t(unsigned int /* seed */) + { + delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + return {}; + } +}; + +template +struct fixed_delay_constructor_t +{ + struct delay_t + { + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + delay_or_prevent_hoisting(); + } + }; + + _CCCL_DEVICE _CCCL_FORCEINLINE fixed_delay_constructor_t(unsigned int /* seed */) + { + delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + return {}; + } +}; + +template +struct exponential_backoff_constructor_t +{ + struct delay_t + { + int delay; + + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + always_delay_or_prevent_hoisting(delay); + delay <<= 1; + } + }; + + _CCCL_DEVICE _CCCL_FORCEINLINE exponential_backoff_constructor_t(unsigned int /* seed */) + { + always_delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + return {InitialDelay}; + } +}; + +template +struct exponential_backoff_jitter_constructor_t +{ + struct delay_t + { + static constexpr unsigned int a = 16807; + static constexpr unsigned int c = 0; + static constexpr unsigned int m = 1u << 31; + + unsigned int max_delay; + unsigned int& seed; + + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int next(unsigned int min, unsigned int max) + { + return (seed = (a * seed + c) % m) % (max + 1 - min) + min; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + always_delay_or_prevent_hoisting(next(0, max_delay)); + max_delay <<= 1; + } + }; + + unsigned int seed; + + _CCCL_DEVICE _CCCL_FORCEINLINE exponential_backoff_jitter_constructor_t(unsigned int seed) + : seed(seed) + { + always_delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + return {InitialDelay, seed}; + } +}; + +template +struct exponential_backoff_jitter_window_constructor_t +{ + struct delay_t + { + static constexpr unsigned int a = 16807; + static constexpr unsigned int c = 0; + static constexpr unsigned int m = 1u << 31; + + unsigned int max_delay; + unsigned int& seed; + + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int next(unsigned int min, unsigned int max) + { + return (seed = (a * seed + c) % m) % (max + 1 - min) + min; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + unsigned int next_max_delay = max_delay << 1; + always_delay_or_prevent_hoisting(next(max_delay, next_max_delay)); + max_delay = next_max_delay; + } + }; + + unsigned int seed; + _CCCL_DEVICE _CCCL_FORCEINLINE exponential_backoff_jitter_window_constructor_t(unsigned int seed) + : seed(seed) + { + always_delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + return {InitialDelay, seed}; + } +}; + +template +struct exponential_backon_jitter_window_constructor_t +{ + struct delay_t + { + static constexpr unsigned int a = 16807; + static constexpr unsigned int c = 0; + static constexpr unsigned int m = 1u << 31; + + unsigned int max_delay; + unsigned int& seed; + + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int next(unsigned int min, unsigned int max) + { + return (seed = (a * seed + c) % m) % (max + 1 - min) + min; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + int prev_delay = max_delay >> 1; + always_delay_or_prevent_hoisting(next(prev_delay, max_delay)); + max_delay = prev_delay; + } + }; + + unsigned int seed; + unsigned int max_delay = InitialDelay; + + _CCCL_DEVICE _CCCL_FORCEINLINE exponential_backon_jitter_window_constructor_t(unsigned int seed) + : seed(seed) + { + always_delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + max_delay >>= 1; + return {max_delay, seed}; + } +}; + +template +struct exponential_backon_jitter_constructor_t +{ + struct delay_t + { + static constexpr unsigned int a = 16807; + static constexpr unsigned int c = 0; + static constexpr unsigned int m = 1u << 31; + + unsigned int max_delay; + unsigned int& seed; + + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int next(unsigned int min, unsigned int max) + { + return (seed = (a * seed + c) % m) % (max + 1 - min) + min; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + always_delay_or_prevent_hoisting(next(0, max_delay)); + max_delay >>= 1; + } + }; + + unsigned int seed; + unsigned int max_delay = InitialDelay; + + _CCCL_DEVICE _CCCL_FORCEINLINE exponential_backon_jitter_constructor_t(unsigned int seed) + : seed(seed) + { + always_delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + max_delay >>= 1; + return {max_delay, seed}; + } +}; + +template +struct exponential_backon_constructor_t +{ + struct delay_t + { + unsigned int delay; + + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()() + { + always_delay_or_prevent_hoisting(delay); + delay >>= 1; + } + }; + + unsigned int max_delay = InitialDelay; + + _CCCL_DEVICE _CCCL_FORCEINLINE exponential_backon_constructor_t(unsigned int /* seed */) + { + always_delay(); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE delay_t operator()() + { + max_delay >>= 1; + return {max_delay}; + } +}; + +using default_no_delay_constructor_t = no_delay_constructor_t<450>; +using default_no_delay_t = default_no_delay_constructor_t::delay_t; + +template +using default_delay_constructor_t = + ::cuda::std::_If::PRIMITIVE, fixed_delay_constructor_t<350, 450>, default_no_delay_constructor_t>; + +template +using default_delay_t = typename default_delay_constructor_t::delay_t; + +template +using default_reduce_by_key_delay_constructor_t = + ::cuda::std::_If<(Traits::PRIMITIVE) && (sizeof(ValueT) + sizeof(KeyT) < 16), + reduce_by_key_delay_constructor_t<350, 450>, + default_delay_constructor_t>>; + +/** + * @brief Alias template for a ScanTileState specialized for a given value type, `T`, and memory order `Order`. + * + * @tparam T The ScanTileState's value type + * @tparam Order The memory order to be implemented by the ScanTileState + */ +template +struct tile_state_with_memory_order +{ + ScanTileStateT& tile_state; + using T = typename ScanTileStateT::StatusValueT; + using StatusWord = typename ScanTileStateT::StatusWord; + + /** + * Update the specified tile's inclusive value and corresponding status + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void SetInclusive(int tile_idx, T tile_inclusive) + { + tile_state.template SetInclusive(tile_idx, tile_inclusive); + } + + /** + * Update the specified tile's partial value and corresponding status + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void SetPartial(int tile_idx, T tile_partial) + { + tile_state.template SetPartial(tile_idx, tile_partial); + } + + /** + * Wait for the corresponding tile to become non-invalid + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void WaitForValid(int tile_idx, StatusWord& status, T& value, DelayT delay = {}) + { + tile_state.template WaitForValid(tile_idx, status, value, delay); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE T LoadValid(int tile_idx) + { + return tile_state.template LoadValid(tile_idx); + } +}; +} // namespace detail + +/** + * Tile status interface. + */ +template ::PRIMITIVE> +struct ScanTileState; + +/** + * Tile status interface specialized for scan status and value types + * that can be combined into one machine word that can be + * read/written coherently in a single access. + */ +template +struct ScanTileState +{ + using StatusValueT = T; + + // Status word type + using StatusWord = ::cuda::std::_If< + sizeof(T) == 8, + unsigned long long, + ::cuda::std::_If>>; + + // Unit word type + using TxnWord = ::cuda::std::_If>; + + // Device word type + struct TileDescriptor + { + StatusWord status; + T value; + }; + + // Constants + enum + { + TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS, + }; + + // Device storage + TxnWord* d_tile_descriptors; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ScanTileState() + : d_tile_descriptors(nullptr) + {} + + /** + * @brief Initializer + * + * @param[in] num_tiles + * Number of tiles + * + * @param[in] d_temp_storage + * Device-accessible allocation of temporary storage. + * When nullptr, the required allocation size is written to \p temp_storage_bytes and no work is + * done. + * + * @param[in] temp_storage_bytes + * Size in bytes of \t d_temp_storage allocation + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE cudaError_t + Init(int /*num_tiles*/, void* d_temp_storage, size_t /*temp_storage_bytes*/) + { + d_tile_descriptors = reinterpret_cast(d_temp_storage); + return cudaSuccess; + } + + /** + * @brief Compute device memory needed for tile status + * + * @param[in] num_tiles + * Number of tiles + * + * @param[out] temp_storage_bytes + * Size in bytes of \t d_temp_storage allocation + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE static cudaError_t AllocationSize(int num_tiles, size_t& temp_storage_bytes) + { + // bytes needed for tile status descriptors + temp_storage_bytes = (num_tiles + TILE_STATUS_PADDING) * sizeof(TxnWord); + return cudaSuccess; + } + + /** + * Initialize (from device) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void InitializeStatus(int num_tiles) + { + int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; + + TxnWord val = TxnWord(); + TileDescriptor* descriptor = reinterpret_cast(&val); + + if (tile_idx < num_tiles) + { + // Not-yet-set + descriptor->status = StatusWord(SCAN_TILE_INVALID); + d_tile_descriptors[TILE_STATUS_PADDING + tile_idx] = val; + } + + if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING)) + { + // Padding + descriptor->status = StatusWord(SCAN_TILE_OOB); + d_tile_descriptors[threadIdx.x] = val; + } + } + +private: + template + _CCCL_DEVICE _CCCL_FORCEINLINE typename ::cuda::std::enable_if<(Order == MemoryOrder::relaxed), void>::type + StoreStatus(TxnWord* ptr, TxnWord alias) + { + detail::store_relaxed(ptr, alias); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE typename ::cuda::std::enable_if<(Order == MemoryOrder::acquire_release), void>::type + StoreStatus(TxnWord* ptr, TxnWord alias) + { + detail::store_release(ptr, alias); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE typename ::cuda::std::enable_if<(Order == MemoryOrder::relaxed), TxnWord>::type + LoadStatus(TxnWord* ptr) + { + return detail::load_relaxed(ptr); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE typename ::cuda::std::enable_if<(Order == MemoryOrder::acquire_release), TxnWord>::type + LoadStatus(TxnWord* ptr) + { + // For pre-volta we hoist the memory barrier to outside the loop, i.e., after reading a valid state + NV_IF_TARGET(NV_PROVIDES_SM_70, (return detail::load_acquire(ptr);), (return detail::load_relaxed(ptr);)); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE typename ::cuda::std::enable_if<(Order == MemoryOrder::relaxed), void>::type + ThreadfenceForLoadAcqPreVolta() + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE typename ::cuda::std::enable_if<(Order == MemoryOrder::acquire_release), void>::type + ThreadfenceForLoadAcqPreVolta() + { + NV_IF_TARGET(NV_PROVIDES_SM_70, (), (__threadfence();)); + } + +public: + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SetInclusive(int tile_idx, T tile_inclusive) + { + TileDescriptor tile_descriptor; + tile_descriptor.status = SCAN_TILE_INCLUSIVE; + tile_descriptor.value = tile_inclusive; + + TxnWord alias; + *reinterpret_cast(&alias) = tile_descriptor; + + StoreStatus(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx, alias); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SetPartial(int tile_idx, T tile_partial) + { + TileDescriptor tile_descriptor; + tile_descriptor.status = SCAN_TILE_PARTIAL; + tile_descriptor.value = tile_partial; + + TxnWord alias; + *reinterpret_cast(&alias) = tile_descriptor; + + StoreStatus(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx, alias); + } + + /** + * Wait for the corresponding tile to become non-invalid + */ + template , MemoryOrder Order = MemoryOrder::relaxed> + _CCCL_DEVICE _CCCL_FORCEINLINE void + WaitForValid(int tile_idx, StatusWord& status, T& value, DelayT delay_or_prevent_hoisting = {}) + { + TileDescriptor tile_descriptor; + + { + TxnWord alias = LoadStatus(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx); + tile_descriptor = reinterpret_cast(alias); + } + + while (__any_sync(0xffffffff, (tile_descriptor.status == SCAN_TILE_INVALID))) + { + delay_or_prevent_hoisting(); + TxnWord alias = LoadStatus(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx); + tile_descriptor = reinterpret_cast(alias); + } + + // For pre-Volta and load acquire we emit relaxed loads in LoadStatus and hoist the threadfence here + ThreadfenceForLoadAcqPreVolta(); + + status = tile_descriptor.status; + value = tile_descriptor.value; + } + + /** + * Loads and returns the tile's value. The returned value is undefined if either (a) the tile's status is invalid or + * (b) there is no memory fence between reading a non-invalid status and the call to LoadValid. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE T LoadValid(int tile_idx) + { + TxnWord alias = d_tile_descriptors[TILE_STATUS_PADDING + tile_idx]; + TileDescriptor tile_descriptor = reinterpret_cast(alias); + return tile_descriptor.value; + } +}; + +/** + * Tile status interface specialized for scan status and value types that + * cannot be combined into one machine word. + */ +template +struct ScanTileState +{ + using StatusValueT = T; + + // Status word type + using StatusWord = unsigned int; + + // Constants + enum + { + TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS, + }; + + // Device storage + StatusWord* d_tile_status; + T* d_tile_partial; + T* d_tile_inclusive; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ScanTileState() + : d_tile_status(nullptr) + , d_tile_partial(nullptr) + , d_tile_inclusive(nullptr) + {} + + /** + * @brief Initializer + * + * @param[in] num_tiles + * Number of tiles + * + * @param[in] d_temp_storage + * Device-accessible allocation of temporary storage. + * When nullptr, the required allocation size is written to \p temp_storage_bytes and no work is + * done. + * + * @param[in] temp_storage_bytes + * Size in bytes of \t d_temp_storage allocation + */ + /// Initializer + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE cudaError_t Init(int num_tiles, void* d_temp_storage, size_t temp_storage_bytes) + { + cudaError_t error = cudaSuccess; + do + { + void* allocations[3] = {}; + size_t allocation_sizes[3]; + + // bytes needed for tile status descriptors + allocation_sizes[0] = (num_tiles + TILE_STATUS_PADDING) * sizeof(StatusWord); + + // bytes needed for partials + allocation_sizes[1] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized); + + // bytes needed for inclusives + allocation_sizes[2] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized); + + // Compute allocation pointers into the single storage blob + error = CubDebug(AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes)); + + if (cudaSuccess != error) + { + break; + } + + // Alias the offsets + d_tile_status = reinterpret_cast(allocations[0]); + d_tile_partial = reinterpret_cast(allocations[1]); + d_tile_inclusive = reinterpret_cast(allocations[2]); + } while (0); + + return error; + } + + /** + * @brief Compute device memory needed for tile status + * + * @param[in] num_tiles + * Number of tiles + * + * @param[out] temp_storage_bytes + * Size in bytes of \t d_temp_storage allocation + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE static cudaError_t AllocationSize(int num_tiles, size_t& temp_storage_bytes) + { + // Specify storage allocation requirements + size_t allocation_sizes[3]; + + // bytes needed for tile status descriptors + allocation_sizes[0] = (num_tiles + TILE_STATUS_PADDING) * sizeof(StatusWord); + + // bytes needed for partials + allocation_sizes[1] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized); + + // bytes needed for inclusives + allocation_sizes[2] = (num_tiles + TILE_STATUS_PADDING) * sizeof(Uninitialized); + + // Set the necessary size of the blob + void* allocations[3] = {}; + return CubDebug(AliasTemporaries(nullptr, temp_storage_bytes, allocations, allocation_sizes)); + } + + /** + * Initialize (from device) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void InitializeStatus(int num_tiles) + { + int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; + if (tile_idx < num_tiles) + { + // Not-yet-set + d_tile_status[TILE_STATUS_PADDING + tile_idx] = StatusWord(SCAN_TILE_INVALID); + } + + if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING)) + { + // Padding + d_tile_status[threadIdx.x] = StatusWord(SCAN_TILE_OOB); + } + } + + /** + * Update the specified tile's inclusive value and corresponding status + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SetInclusive(int tile_idx, T tile_inclusive) + { + // Update tile inclusive value + ThreadStore(d_tile_inclusive + TILE_STATUS_PADDING + tile_idx, tile_inclusive); + detail::store_release(d_tile_status + TILE_STATUS_PADDING + tile_idx, StatusWord(SCAN_TILE_INCLUSIVE)); + } + + /** + * Update the specified tile's partial value and corresponding status + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SetPartial(int tile_idx, T tile_partial) + { + // Update tile partial value + ThreadStore(d_tile_partial + TILE_STATUS_PADDING + tile_idx, tile_partial); + detail::store_release(d_tile_status + TILE_STATUS_PADDING + tile_idx, StatusWord(SCAN_TILE_PARTIAL)); + } + + /** + * Wait for the corresponding tile to become non-invalid + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void WaitForValid(int tile_idx, StatusWord& status, T& value, DelayT delay = {}) + { + do + { + delay(); + status = detail::load_relaxed(d_tile_status + TILE_STATUS_PADDING + tile_idx); + __threadfence(); + } while (__any_sync(0xffffffff, (status == SCAN_TILE_INVALID))); + + if (status == StatusWord(SCAN_TILE_PARTIAL)) + { + value = ThreadLoad(d_tile_partial + TILE_STATUS_PADDING + tile_idx); + } + else if (status == StatusWord(SCAN_TILE_INCLUSIVE)) + { + value = ThreadLoad(d_tile_inclusive + TILE_STATUS_PADDING + tile_idx); + } + } + + /** + * Loads and returns the tile's value. The returned value is undefined if either (a) the tile's status is invalid or + * (b) there is no memory fence between reading a non-invalid status and the call to LoadValid. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE T LoadValid(int tile_idx) + { + return d_tile_inclusive[TILE_STATUS_PADDING + tile_idx]; + } +}; + +/****************************************************************************** + * ReduceByKey tile status interface types for block-cooperative scans + ******************************************************************************/ + +/** + * Tile status interface for reduction by key. + * + */ +template ::PRIMITIVE) && (sizeof(ValueT) + sizeof(KeyT) < 16)> +struct ReduceByKeyScanTileState; + +/** + * Tile status interface for reduction by key, specialized for scan status and value types that + * cannot be combined into one machine word. + */ +template +struct ReduceByKeyScanTileState : ScanTileState> +{ + using SuperClass = ScanTileState>; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ReduceByKeyScanTileState() + : SuperClass() + {} +}; + +/** + * Tile status interface for reduction by key, specialized for scan status and value types that + * can be combined into one machine word that can be read/written coherently in a single access. + */ +template +struct ReduceByKeyScanTileState +{ + using KeyValuePairT = KeyValuePair; + + // Constants + enum + { + PAIR_SIZE = static_cast(sizeof(ValueT) + sizeof(KeyT)), + TXN_WORD_SIZE = 1 << Log2::VALUE, + STATUS_WORD_SIZE = TXN_WORD_SIZE - PAIR_SIZE, + + TILE_STATUS_PADDING = CUB_PTX_WARP_THREADS, + }; + + // Status word type + using StatusWord = ::cuda::std::_If< + STATUS_WORD_SIZE == 8, + unsigned long long, + ::cuda::std:: + _If>>; + + // Status word type + using TxnWord = ::cuda::std:: + _If>; + + // Device word type (for when sizeof(ValueT) == sizeof(KeyT)) + struct TileDescriptorBigStatus + { + KeyT key; + ValueT value; + StatusWord status; + }; + + // Device word type (for when sizeof(ValueT) != sizeof(KeyT)) + struct TileDescriptorLittleStatus + { + ValueT value; + StatusWord status; + KeyT key; + }; + + // Device word type + using TileDescriptor = + ::cuda::std::_If; + + // Device storage + TxnWord* d_tile_descriptors; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ReduceByKeyScanTileState() + : d_tile_descriptors(nullptr) + {} + + /** + * @brief Initializer + * + * @param[in] num_tiles + * Number of tiles + * + * @param[in] d_temp_storage + * Device-accessible allocation of temporary storage. When nullptr, the required allocation size + * is written to \p temp_storage_bytes and no work is done. + * + * @param[in] temp_storage_bytes + * Size in bytes of \t d_temp_storage allocation + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE cudaError_t + Init(int /*num_tiles*/, void* d_temp_storage, size_t /*temp_storage_bytes*/) + { + d_tile_descriptors = reinterpret_cast(d_temp_storage); + return cudaSuccess; + } + + /** + * @brief Compute device memory needed for tile status + * + * @param[in] num_tiles + * Number of tiles + * + * @param[out] temp_storage_bytes + * Size in bytes of \t d_temp_storage allocation + */ + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE static cudaError_t AllocationSize(int num_tiles, size_t& temp_storage_bytes) + { + // bytes needed for tile status descriptors + temp_storage_bytes = (num_tiles + TILE_STATUS_PADDING) * sizeof(TxnWord); + return cudaSuccess; + } + + /** + * Initialize (from device) + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void InitializeStatus(int num_tiles) + { + int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; + TxnWord val = TxnWord(); + TileDescriptor* descriptor = reinterpret_cast(&val); + + if (tile_idx < num_tiles) + { + // Not-yet-set + descriptor->status = StatusWord(SCAN_TILE_INVALID); + d_tile_descriptors[TILE_STATUS_PADDING + tile_idx] = val; + } + + if ((blockIdx.x == 0) && (threadIdx.x < TILE_STATUS_PADDING)) + { + // Padding + descriptor->status = StatusWord(SCAN_TILE_OOB); + d_tile_descriptors[threadIdx.x] = val; + } + } + + /** + * Update the specified tile's inclusive value and corresponding status + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void SetInclusive(int tile_idx, KeyValuePairT tile_inclusive) + { + TileDescriptor tile_descriptor; + tile_descriptor.status = SCAN_TILE_INCLUSIVE; + tile_descriptor.value = tile_inclusive.value; + tile_descriptor.key = tile_inclusive.key; + + TxnWord alias; + *reinterpret_cast(&alias) = tile_descriptor; + + detail::store_relaxed(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx, alias); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void SetPartial(int tile_idx, KeyValuePairT tile_partial) + { + TileDescriptor tile_descriptor; + tile_descriptor.status = SCAN_TILE_PARTIAL; + tile_descriptor.value = tile_partial.value; + tile_descriptor.key = tile_partial.key; + + TxnWord alias; + *reinterpret_cast(&alias) = tile_descriptor; + + detail::store_relaxed(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx, alias); + } + + /** + * Wait for the corresponding tile to become non-invalid + */ + template ::delay_t> + _CCCL_DEVICE _CCCL_FORCEINLINE void + WaitForValid(int tile_idx, StatusWord& status, KeyValuePairT& value, DelayT delay_or_prevent_hoisting = {}) + { + // TxnWord alias = ThreadLoad(d_tile_descriptors + TILE_STATUS_PADDING + + // tile_idx); TileDescriptor tile_descriptor = reinterpret_cast(alias); + // + // while (tile_descriptor.status == SCAN_TILE_INVALID) + // { + // __threadfence_block(); // prevent hoisting loads from loop + // + // alias = ThreadLoad(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx); + // tile_descriptor = reinterpret_cast(alias); + // } + // + // status = tile_descriptor.status; + // value.value = tile_descriptor.value; + // value.key = tile_descriptor.key; + + TileDescriptor tile_descriptor; + + do + { + delay_or_prevent_hoisting(); + TxnWord alias = detail::load_relaxed(d_tile_descriptors + TILE_STATUS_PADDING + tile_idx); + tile_descriptor = reinterpret_cast(alias); + + } while (__any_sync(0xffffffff, (tile_descriptor.status == SCAN_TILE_INVALID))); + + status = tile_descriptor.status; + value.value = tile_descriptor.value; + value.key = tile_descriptor.key; + } +}; + +/****************************************************************************** + * Prefix call-back operator for coupling local block scan within a + * block-cooperative scan + ******************************************************************************/ + +/** + * Stateful block-scan prefix functor. Provides the the running prefix for + * the current tile by using the call-back warp to wait on on + * aggregates/prefixes from predecessor tiles to become available. + * + * @tparam DelayConstructorT + * Implementation detail, do not specify directly, requirements on the + * content of this type are subject to breaking change. + */ +template > +struct TilePrefixCallbackOp +{ + // Parameterized warp reduce + using WarpReduceT = WarpReduce; + + // Temporary storage type + struct _TempStorage + { + typename WarpReduceT::TempStorage warp_reduce; + T exclusive_prefix; + T inclusive_prefix; + T block_aggregate; + }; + + // Alias wrapper allowing temporary storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + // Type of status word + using StatusWord = typename ScanTileStateT::StatusWord; + + // Fields + _TempStorage& temp_storage; ///< Reference to a warp-reduction instance + ScanTileStateT& tile_status; ///< Interface to tile status + ScanOpT scan_op; ///< Binary scan operator + int tile_idx; ///< The current tile index + T exclusive_prefix; ///< Exclusive prefix for the tile + T inclusive_prefix; ///< Inclusive prefix for the tile + + // Constructs prefix functor for a given tile index. + // Precondition: thread blocks processing all of the predecessor tiles were scheduled. + _CCCL_DEVICE _CCCL_FORCEINLINE + TilePrefixCallbackOp(ScanTileStateT& tile_status, TempStorage& temp_storage, ScanOpT scan_op, int tile_idx) + : temp_storage(temp_storage.Alias()) + , tile_status(tile_status) + , scan_op(scan_op) + , tile_idx(tile_idx) + {} + + // Computes the tile index and constructs prefix functor with it. + // Precondition: thread block per tile assignment. + _CCCL_DEVICE _CCCL_FORCEINLINE + TilePrefixCallbackOp(ScanTileStateT& tile_status, TempStorage& temp_storage, ScanOpT scan_op) + : TilePrefixCallbackOp(tile_status, temp_storage, scan_op, blockIdx.x) + {} + + /** + * @brief Block until all predecessors within the warp-wide window have non-invalid status + * + * @param predecessor_idx + * Preceding tile index to inspect + * + * @param[out] predecessor_status + * Preceding tile status + * + * @param[out] window_aggregate + * Relevant partial reduction from this window of preceding tiles + */ + template > + _CCCL_DEVICE _CCCL_FORCEINLINE void + ProcessWindow(int predecessor_idx, StatusWord& predecessor_status, T& window_aggregate, DelayT delay = {}) + { + T value; + tile_status.WaitForValid(predecessor_idx, predecessor_status, value, delay); + + // Perform a segmented reduction to get the prefix for the current window. + // Use the swizzled scan operator because we are now scanning *down* towards thread0. + + int tail_flag = (predecessor_status == StatusWord(SCAN_TILE_INCLUSIVE)); + window_aggregate = + WarpReduceT(temp_storage.warp_reduce).TailSegmentedReduce(value, tail_flag, SwizzleScanOp(scan_op)); + } + + // BlockScan prefix callback functor (called by the first warp) + _CCCL_DEVICE _CCCL_FORCEINLINE T operator()(T block_aggregate) + { + // Update our status with our tile-aggregate + if (threadIdx.x == 0) + { + detail::uninitialized_copy_single(&temp_storage.block_aggregate, block_aggregate); + + tile_status.SetPartial(tile_idx, block_aggregate); + } + + int predecessor_idx = tile_idx - threadIdx.x - 1; + StatusWord predecessor_status; + T window_aggregate; + + // Wait for the warp-wide window of predecessor tiles to become valid + DelayConstructorT construct_delay(tile_idx); + ProcessWindow(predecessor_idx, predecessor_status, window_aggregate, construct_delay()); + + // The exclusive tile prefix starts out as the current window aggregate + exclusive_prefix = window_aggregate; + + // Keep sliding the window back until we come across a tile whose inclusive prefix is known + while (__all_sync(0xffffffff, (predecessor_status != StatusWord(SCAN_TILE_INCLUSIVE)))) + { + predecessor_idx -= CUB_PTX_WARP_THREADS; + + // Update exclusive tile prefix with the window prefix + ProcessWindow(predecessor_idx, predecessor_status, window_aggregate, construct_delay()); + exclusive_prefix = scan_op(window_aggregate, exclusive_prefix); + } + + // Compute the inclusive tile prefix and update the status for this tile + if (threadIdx.x == 0) + { + inclusive_prefix = scan_op(exclusive_prefix, block_aggregate); + tile_status.SetInclusive(tile_idx, inclusive_prefix); + + detail::uninitialized_copy_single(&temp_storage.exclusive_prefix, exclusive_prefix); + + detail::uninitialized_copy_single(&temp_storage.inclusive_prefix, inclusive_prefix); + } + + // Return exclusive_prefix + return exclusive_prefix; + } + + // Get the exclusive prefix stored in temporary storage + _CCCL_DEVICE _CCCL_FORCEINLINE T GetExclusivePrefix() + { + return temp_storage.exclusive_prefix; + } + + // Get the inclusive prefix stored in temporary storage + _CCCL_DEVICE _CCCL_FORCEINLINE T GetInclusivePrefix() + { + return temp_storage.inclusive_prefix; + } + + // Get the block aggregate stored in temporary storage + _CCCL_DEVICE _CCCL_FORCEINLINE T GetBlockAggregate() + { + return temp_storage.block_aggregate; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE int GetTileIdx() const + { + return tile_idx; + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_adjacent_difference.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_adjacent_difference.cuh new file mode 100644 index 0000000000000000000000000000000000000000..38636571e807a58c1992b9bff488b6d3e3fce3fe --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_adjacent_difference.cuh @@ -0,0 +1,965 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! The cub::BlockAdjacentDifference class provides collective methods for computing the differences of adjacent +//! elements partitioned across a CUDA thread block. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! BlockAdjacentDifference provides :ref:`collective ` methods for computing the +//! differences of adjacent elements partitioned across a CUDA thread block. +//! +//! Overview +//! ++++++++++++++++ +//! +//! BlockAdjacentDifference calculates the differences of adjacent elements in the elements partitioned across a CUDA +//! thread block. Because the binary operation could be noncommutative, there are two sets of methods. +//! Methods named SubtractLeft subtract left element ``i - 1`` of input sequence from current element ``i``. +//! Methods named SubtractRight subtract the right element ``i + 1`` from the current one ``i``: +//! +//! .. code-block:: c++ +//! +//! int values[4]; // [1, 2, 3, 4] +//! //... +//! int subtract_left_result[4]; <-- [ 1, 1, 1, 1 ] +//! int subtract_right_result[4]; <-- [ -1, -1, -1, 4 ] +//! +//! - For SubtractLeft, if the left element is out of bounds, the input value is assigned to ``output[0]`` +//! without modification. +//! - For SubtractRight, if the right element is out of bounds, the input value is assigned to the current output value +//! without modification. +//! - The block/example_block_reduce_dyn_smem.cu example under the examples/block folder illustrates usage of +//! dynamically shared memory with BlockReduce and how to re-purpose the same memory region. +//! This example can be easily adapted to the storage required by BlockAdjacentDifference. +//! +//! A Simple Example +//! ++++++++++++++++ +//! +//! The code snippet below illustrates how to use BlockAdjacentDifference to +//! compute the left difference between adjacent elements. +//! +//! .. code-block:: c++ +//! +//! #include +//! // or equivalently +//! +//! struct CustomDifference +//! { +//! template +//! __host__ DataType operator()(DataType &lhs, DataType &rhs) +//! { +//! return lhs - rhs; +//! } +//! }; +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize BlockAdjacentDifference for a 1D block of +//! // 128 threads of type int +//! using BlockAdjacentDifferenceT = +//! cub::BlockAdjacentDifference; +//! +//! // Allocate shared memory for BlockAdjacentDifference +//! __shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int thread_data[4]; +//! ... +//! +//! // Collectively compute adjacent_difference +//! int result[4]; +//! +//! BlockAdjacentDifferenceT(temp_storage).SubtractLeft( +//! thread_data, +//! result, +//! CustomDifference()); +//! +//! Suppose the set of input `thread_data` across the block of threads is +//! ``{ [4,2,1,1], [1,1,1,1], [2,3,3,3], [3,4,1,4], ... }``. +//! The corresponding output ``result`` in those threads will be +//! ``{ [4,-2,-1,0], [0,0,0,0], [1,1,0,0], [0,1,-3,3], ... }``. +//! +//! @endrst +template +class BlockAdjacentDifference +{ +private: + /// The thread block size in threads + static constexpr int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; + + /// Shared memory storage layout type (last element from each thread's input) + struct _TempStorage + { + T first_items[BLOCK_THREADS]; + T last_items[BLOCK_THREADS]; + }; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + /// Specialization for when FlagOp has third index param + template ::value> + struct ApplyOp + { + // Apply flag operator + static _CCCL_DEVICE _CCCL_FORCEINLINE T FlagT(FlagOp flag_op, const T& a, const T& b, int idx) + { + return flag_op(b, a, idx); + } + }; + + /// Specialization for when FlagOp does not have a third index param + template + struct ApplyOp + { + // Apply flag operator + static _CCCL_DEVICE _CCCL_FORCEINLINE T FlagT(FlagOp flag_op, const T& a, const T& b, int /*idx*/) + { + return flag_op(b, a); + } + }; + + /// Templated unrolling of item comparison (inductive case) + struct Iterate + { + /** + * Head flags + * + * @param[out] flags Calling thread's discontinuity head_flags + * @param[in] input Calling thread's input items + * @param[out] preds Calling thread's predecessor items + * @param[in] flag_op Binary boolean flag predicate + */ + template + static _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeads( + int linear_tid, + FlagT (&flags)[ITEMS_PER_THREAD], + T (&input)[ITEMS_PER_THREAD], + T (&preds)[ITEMS_PER_THREAD], + FlagOp flag_op) + { +#pragma unroll + for (int i = 1; i < ITEMS_PER_THREAD; ++i) + { + preds[i] = input[i - 1]; + flags[i] = ApplyOp::FlagT(flag_op, preds[i], input[i], (linear_tid * ITEMS_PER_THREAD) + i); + } + } + + /** + * Tail flags + * + * @param[out] flags Calling thread's discontinuity head_flags + * @param[in] input Calling thread's input items + * @param[in] flag_op Binary boolean flag predicate + */ + template + static _CCCL_DEVICE _CCCL_FORCEINLINE void + FlagTails(int linear_tid, FlagT (&flags)[ITEMS_PER_THREAD], T (&input)[ITEMS_PER_THREAD], FlagOp flag_op) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD - 1; ++i) + { + flags[i] = ApplyOp::FlagT(flag_op, input[i], input[i + 1], (linear_tid * ITEMS_PER_THREAD) + i + 1); + } + } + }; + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + +public: + /// @smemstorage{BlockAdjacentDifference} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared memory as temporary storage + _CCCL_DEVICE _CCCL_FORCEINLINE BlockAdjacentDifference() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @brief Collective constructor using the specified memory allocation as temporary storage + //! @param[in] temp_storage Reference to memory allocation having layout type TempStorage + _CCCL_DEVICE _CCCL_FORCEINLINE BlockAdjacentDifference(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Read left operations + //! @{ + + //! @rst + //! Subtracts the left element of each adjacent pair of elements partitioned across a CUDA thread block. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates how to use BlockAdjacentDifference to compute the left difference between + //! adjacent elements. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockAdjacentDifference for a 1D block + //! // of 128 threads of type int + //! using BlockAdjacentDifferenceT = + //! cub::BlockAdjacentDifference; + //! + //! // Allocate shared memory for BlockAdjacentDifference + //! __shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute adjacent_difference + //! BlockAdjacentDifferenceT(temp_storage).SubtractLeft( + //! thread_data, + //! thread_data, + //! CustomDifference()); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [4,2,1,1], [1,1,1,1], [2,3,3,3], [3,4,1,4], ... }``. + //! The corresponding output ``result`` in those threads will be + //! ``{ [4,-2,-1,0], [0,0,0,0], [1,1,0,0], [0,1,-3,3], ... }``. + //! @endrst + //! + //! @param[out] output + //! Calling thread's adjacent difference result + //! + //! @param[in] input + //! Calling thread's input items (may be aliased to `output`) + //! + //! @param[in] difference_op + //! Binary difference operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + SubtractLeft(T (&input)[ITEMS_PER_THREAD], OutputType (&output)[ITEMS_PER_THREAD], DifferenceOpT difference_op) + { + // Share last item + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + +#pragma unroll + for (int item = ITEMS_PER_THREAD - 1; item > 0; item--) + { + output[item] = difference_op(input[item], input[item - 1]); + } + + if (linear_tid == 0) + { + output[0] = input[0]; + } + else + { + output[0] = difference_op(input[0], temp_storage.last_items[linear_tid - 1]); + } + } + + //! @rst + //! Subtracts the left element of each adjacent pair of elements partitioned across a CUDA thread block. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates how to use BlockAdjacentDifference to compute the left difference between + //! adjacent elements. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockAdjacentDifference for a 1D block of + //! // 128 threads of type int + //! using BlockAdjacentDifferenceT = + //! cub::BlockAdjacentDifference; + //! + //! // Allocate shared memory for BlockAdjacentDifference + //! __shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // The last item in the previous tile: + //! int tile_predecessor_item = ...; + //! + //! // Collectively compute adjacent_difference + //! BlockAdjacentDifferenceT(temp_storage).SubtractLeft( + //! thread_data, + //! thread_data, + //! CustomDifference(), + //! tile_predecessor_item); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [4,2,1,1], [1,1,1,1], [2,3,3,3], [3,4,1,4], ... }``. + //! and that `tile_predecessor_item` is `3`. The corresponding output + //! ``result`` in those threads will be + //! ``{ [1,-2,-1,0], [0,0,0,0], [1,1,0,0], [0,1,-3,3], ... }``. + //! @endrst + //! + //! @param[out] output + //! Calling thread's adjacent difference result + //! + //! @param[in] input + //! Calling thread's input items (may be aliased to `output`) + //! + //! @param[in] difference_op + //! Binary difference operator + //! + //! @param[in] tile_predecessor_item + //! @rst + //! *thread*\ :sub:`0` only item which is going to be subtracted from the first tile item + //! (*input*\ :sub:`0` from *thread*\ :sub:`0`). + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SubtractLeft( + T (&input)[ITEMS_PER_THREAD], + OutputT (&output)[ITEMS_PER_THREAD], + DifferenceOpT difference_op, + T tile_predecessor_item) + { + // Share last item + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + +#pragma unroll + for (int item = ITEMS_PER_THREAD - 1; item > 0; item--) + { + output[item] = difference_op(input[item], input[item - 1]); + } + + // Set flag for first thread-item + if (linear_tid == 0) + { + output[0] = difference_op(input[0], tile_predecessor_item); + } + else + { + output[0] = difference_op(input[0], temp_storage.last_items[linear_tid - 1]); + } + } + + //! @rst + //! Subtracts the left element of each adjacent pair of elements partitioned across a CUDA thread block. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates how to use BlockAdjacentDifference to compute the left difference between + //! adjacent elements. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockAdjacentDifference for a 1D block of + //! // 128 threads of type int + //! using BlockAdjacentDifferenceT = + //! cub::BlockAdjacentDifference; + //! + //! // Allocate shared memory for BlockAdjacentDifference + //! __shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! int valid_items = 9; + //! + //! // Collectively compute adjacent_difference + //! BlockAdjacentDifferenceT(temp_storage).SubtractLeftPartialTile( + //! thread_data, + //! thread_data, + //! CustomDifference(), + //! valid_items); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [4,2,1,1], [1,1,1,1], [2,3,3,3], [3,4,1,4], ... }``. + //! The corresponding output ``result`` in those threads will be + //! ``{ [4,-2,-1,0], [0,0,0,0], [1,3,3,3], [3,4,1,4], ... }``. + //! @endrst + //! + //! @param[out] output + //! Calling thread's adjacent difference result + //! + //! @param[in] input + //! Calling thread's input items (may be aliased to `output`) + //! + //! @param[in] difference_op + //! Binary difference operator + //! + //! @param[in] valid_items + //! Number of valid items in thread block + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SubtractLeftPartialTile( + T (&input)[ITEMS_PER_THREAD], OutputType (&output)[ITEMS_PER_THREAD], DifferenceOpT difference_op, int valid_items) + { + // Share last item + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + + if ((linear_tid + 1) * ITEMS_PER_THREAD <= valid_items) + { +#pragma unroll + for (int item = ITEMS_PER_THREAD - 1; item > 0; item--) + { + output[item] = difference_op(input[item], input[item - 1]); + } + } + else + { +#pragma unroll + for (int item = ITEMS_PER_THREAD - 1; item > 0; item--) + { + const int idx = linear_tid * ITEMS_PER_THREAD + item; + + if (idx < valid_items) + { + output[item] = difference_op(input[item], input[item - 1]); + } + else + { + output[item] = input[item]; + } + } + } + + if (linear_tid == 0 || valid_items <= linear_tid * ITEMS_PER_THREAD) + { + output[0] = input[0]; + } + else + { + output[0] = difference_op(input[0], temp_storage.last_items[linear_tid - 1]); + } + } + + //! @rst + //! Subtracts the left element of each adjacent pair of elements partitioned across a CUDA thread block. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates how to use BlockAdjacentDifference to compute the left difference between + //! adjacent elements. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockAdjacentDifference for a 1D block of + //! // 128 threads of type int + //! using BlockAdjacentDifferenceT = + //! cub::BlockAdjacentDifference; + //! + //! // Allocate shared memory for BlockAdjacentDifference + //! __shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! int valid_items = 9; + //! int tile_predecessor_item = 4; + //! + //! // Collectively compute adjacent_difference + //! BlockAdjacentDifferenceT(temp_storage).SubtractLeftPartialTile( + //! thread_data, + //! thread_data, + //! CustomDifference(), + //! valid_items, + //! tile_predecessor_item); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [4,2,1,1], [1,1,1,1], [2,3,3,3], [3,4,1,4], ... }``. + //! The corresponding output ``result`` in those threads will be + //! ``{ [0,-2,-1,0], [0,0,0,0], [1,3,3,3], [3,4,1,4], ... }``. + //! @endrst + //! + //! @param[out] output + //! Calling thread's adjacent difference result + //! + //! @param[in] input + //! Calling thread's input items (may be aliased to `output`) + //! + //! @param[in] difference_op + //! Binary difference operator + //! + //! @param[in] valid_items + //! Number of valid items in thread block + //! + //! @param[in] tile_predecessor_item + //! @rst + //! *thread*\ :sub:`0` only item which is going to be subtracted from the first tile item + //! (*input*\ :sub:`0` from *thread*\ :sub:`0`). + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SubtractLeftPartialTile( + T (&input)[ITEMS_PER_THREAD], + OutputType (&output)[ITEMS_PER_THREAD], + DifferenceOpT difference_op, + int valid_items, + T tile_predecessor_item) + { + // Share last item + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + + if ((linear_tid + 1) * ITEMS_PER_THREAD <= valid_items) + { +#pragma unroll + for (int item = ITEMS_PER_THREAD - 1; item > 0; item--) + { + output[item] = difference_op(input[item], input[item - 1]); + } + } + else + { +#pragma unroll + for (int item = ITEMS_PER_THREAD - 1; item > 0; item--) + { + const int idx = linear_tid * ITEMS_PER_THREAD + item; + + if (idx < valid_items) + { + output[item] = difference_op(input[item], input[item - 1]); + } + else + { + output[item] = input[item]; + } + } + } + + if (valid_items <= linear_tid * ITEMS_PER_THREAD) + { + output[0] = input[0]; + } + else if (linear_tid == 0) + { + output[0] = difference_op(input[0], tile_predecessor_item); + } + else + { + output[0] = difference_op(input[0], temp_storage.last_items[linear_tid - 1]); + } + } + + //! @} end member group + //! @name Read right operations + //! @{ + //! + //! @rst + //! + //! Subtracts the right element of each adjacent pair of elements partitioned across a CUDA thread block. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates how to use BlockAdjacentDifference to compute the right difference between + //! adjacent elements. + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockAdjacentDifference for a 1D block of + //! // 128 threads of type int + //! using BlockAdjacentDifferenceT = + //! cub::BlockAdjacentDifference; + //! + //! // Allocate shared memory for BlockAdjacentDifference + //! __shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute adjacent_difference + //! BlockAdjacentDifferenceT(temp_storage).SubtractRight( + //! thread_data, + //! thread_data, + //! CustomDifference()); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ ...3], [4,2,1,1], [1,1,1,1], [2,3,3,3], [3,4,1,4] }``. + //! The corresponding output ``result`` in those threads will be + //! ``{ ...-1, [2,1,0,0], [0,0,0,-1], [-1,0,0,0], [-1,3,-3,4] }``. + //! @endrst + //! + //! @param[out] output + //! Calling thread's adjacent difference result + //! + //! @param[in] input + //! Calling thread's input items (may be aliased to `output`) + //! + //! @param[in] difference_op + //! Binary difference operator + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + SubtractRight(T (&input)[ITEMS_PER_THREAD], OutputT (&output)[ITEMS_PER_THREAD], DifferenceOpT difference_op) + { + // Share first item + temp_storage.first_items[linear_tid] = input[0]; + + __syncthreads(); + +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD - 1; item++) + { + output[item] = difference_op(input[item], input[item + 1]); + } + + if (linear_tid == BLOCK_THREADS - 1) + { + output[ITEMS_PER_THREAD - 1] = input[ITEMS_PER_THREAD - 1]; + } + else + { + output[ITEMS_PER_THREAD - 1] = + difference_op(input[ITEMS_PER_THREAD - 1], temp_storage.first_items[linear_tid + 1]); + } + } + + //! @rst + //! Subtracts the right element of each adjacent pair of elements partitioned across a CUDA thread block. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates how to use BlockAdjacentDifference to compute the right difference between + //! adjacent elements. + //! + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockAdjacentDifference for a 1D block of + //! // 128 threads of type int + //! using BlockAdjacentDifferenceT = + //! cub::BlockAdjacentDifference; + //! + //! // Allocate shared memory for BlockAdjacentDifference + //! __shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // The first item in the next tile: + //! int tile_successor_item = ...; + //! + //! // Collectively compute adjacent_difference + //! BlockAdjacentDifferenceT(temp_storage).SubtractRight( + //! thread_data, + //! thread_data, + //! CustomDifference(), + //! tile_successor_item); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ ...3], [4,2,1,1], [1,1,1,1], [2,3,3,3], [3,4,1,4] }``, + //! and that ``tile_successor_item`` is ``3``. The corresponding output ``result`` + //! in those threads will be + //! ``{ ...-1, [2,1,0,0], [0,0,0,-1], [-1,0,0,0], [-1,3,-3,1] }``. + //! @endrst + //! + //! @param[out] output + //! Calling thread's adjacent difference result + //! + //! @param[in] input + //! Calling thread's input items (may be aliased to `output`) + //! + //! @param[in] difference_op + //! Binary difference operator + //! + //! @param[in] tile_successor_item + //! @rst + //! *thread*\ :sub:`BLOCK_THREADS` only item which is going to be subtracted from the last tile item + //! (*input*\ :sub:`ITEMS_PER_THREAD` from *thread*\ :sub:`BLOCK_THREADS`). + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SubtractRight( + T (&input)[ITEMS_PER_THREAD], + OutputT (&output)[ITEMS_PER_THREAD], + DifferenceOpT difference_op, + T tile_successor_item) + { + // Share first item + temp_storage.first_items[linear_tid] = input[0]; + + __syncthreads(); + + // Set flag for last thread-item + T successor_item = (linear_tid == BLOCK_THREADS - 1) + ? tile_successor_item // Last thread + : temp_storage.first_items[linear_tid + 1]; + +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD - 1; item++) + { + output[item] = difference_op(input[item], input[item + 1]); + } + + output[ITEMS_PER_THREAD - 1] = difference_op(input[ITEMS_PER_THREAD - 1], successor_item); + } + + //! @rst + //! Subtracts the right element of each adjacent pair in range of elements partitioned across a CUDA thread block. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates how to use BlockAdjacentDifference to compute the right difference between + //! adjacent elements. + //! + //! + //! .. code-block:: c++ + //! + //! #include + //! // or equivalently + //! + //! struct CustomDifference + //! { + //! template + //! __host__ DataType operator()(DataType &lhs, DataType &rhs) + //! { + //! return lhs - rhs; + //! } + //! }; + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockAdjacentDifference for a 1D block of + //! // 128 threads of type int + //! using BlockAdjacentDifferenceT = + //! cub::BlockAdjacentDifference; + //! + //! // Allocate shared memory for BlockAdjacentDifference + //! __shared__ typename BlockAdjacentDifferenceT::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute adjacent_difference + //! BlockAdjacentDifferenceT(temp_storage).SubtractRightPartialTile( + //! thread_data, + //! thread_data, + //! CustomDifference(), + //! valid_items); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ ...3], [4,2,1,1], [1,1,1,1], [2,3,3,3], [3,4,1,4] }``. + //! and that ``valid_items`` is ``507``. The corresponding output ``result`` in + //! those threads will be + //! ``{ ...-1, [2,1,0,0], [0,0,0,-1], [-1,0,3,3], [3,4,1,4] }``. + //! @endrst + //! + //! @param[out] output + //! Calling thread's adjacent difference result + //! + //! @param[in] input + //! Calling thread's input items (may be aliased to `output`) + //! + //! @param[in] difference_op + //! Binary difference operator + //! + //! @param[in] valid_items + //! Number of valid items in thread block + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SubtractRightPartialTile( + T (&input)[ITEMS_PER_THREAD], OutputT (&output)[ITEMS_PER_THREAD], DifferenceOpT difference_op, int valid_items) + { + // Share first item + temp_storage.first_items[linear_tid] = input[0]; + + __syncthreads(); + + if ((linear_tid + 1) * ITEMS_PER_THREAD < valid_items) + { +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD - 1; item++) + { + output[item] = difference_op(input[item], input[item + 1]); + } + + output[ITEMS_PER_THREAD - 1] = + difference_op(input[ITEMS_PER_THREAD - 1], temp_storage.first_items[linear_tid + 1]); + } + else + { +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; item++) + { + const int idx = linear_tid * ITEMS_PER_THREAD + item; + + // Right element of input[valid_items - 1] is out of bounds. + // According to the API it's copied into output array + // without modification. + if (idx < valid_items - 1) + { + output[item] = difference_op(input[item], input[item + 1]); + } + else + { + output[item] = input[item]; + } + } + } + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_discontinuity.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_discontinuity.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e4998f32510c4f8a36eb4ee7b648db48e6e559b7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_discontinuity.cuh @@ -0,0 +1,1219 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * The cub::BlockDiscontinuity class provides [collective](../index.html#sec0) methods for + * flagging discontinuities within an ordered set of items partitioned across a CUDA thread block. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! The BlockDiscontinuity class provides :ref:`collective ` methods for +//! flagging discontinuities within an ordered set of items partitioned across a CUDA thread +//! block. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - A set of "head flags" (or "tail flags") is often used to indicate corresponding items +//! that differ from their predecessors (or successors). For example, head flags are convenient +//! for demarcating disjoint data segments as part of a segmented scan or reduction. +//! - @blocked +//! +//! Performance Considerations +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - @granularity +//! - Incurs zero bank conflicts for most types +//! +//! A Simple Example +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @blockcollective{BlockDiscontinuity} +//! +//! The code snippet below illustrates the head flagging of 512 integer items that +//! are partitioned in a :ref:`blocked arrangement ` across 128 threads +//! where each thread owns 4 consecutive items. +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int +//! using BlockDiscontinuity = cub::BlockDiscontinuity; +//! +//! // Allocate shared memory for BlockDiscontinuity +//! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int thread_data[4]; +//! ... +//! +//! // Collectively compute head flags for discontinuities in the segment +//! int head_flags[4]; +//! BlockDiscontinuity(temp_storage).FlagHeads(head_flags, thread_data, cub::Inequality()); +//! +//! Suppose the set of input ``thread_data`` across the block of threads is +//! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }``. +//! The corresponding output ``head_flags`` in those threads will be +//! ``{ [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }``. +//! +//! Re-using dynamically allocating shared memory +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The ``examples/block/example_block_reduce_dyn_smem.cu`` example illustrates usage of +//! dynamically shared memory with BlockReduce and how to re-purpose the same memory region. +//! This example can be easily adapted to the storage required by BlockDiscontinuity. +//! @endrst +//! +//! @tparam T +//! The data type to be flagged. +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused +template +class BlockDiscontinuity +{ +private: + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + /// Shared memory storage layout type (last element from each thread's input) + struct _TempStorage + { + T first_items[BLOCK_THREADS]; + T last_items[BLOCK_THREADS]; + }; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + /// Specialization for when FlagOp has third index param + template ::value> + struct ApplyOp + { + // Apply flag operator + static _CCCL_DEVICE _CCCL_FORCEINLINE bool FlagT(FlagOp flag_op, const T& a, const T& b, int idx) + { + return flag_op(a, b, idx); + } + }; + + /// Specialization for when FlagOp does not have a third index param + template + struct ApplyOp + { + // Apply flag operator + static _CCCL_DEVICE _CCCL_FORCEINLINE bool FlagT(FlagOp flag_op, const T& a, const T& b, int /*idx*/) + { + return flag_op(a, b); + } + }; + + /// Templated unrolling of item comparison (inductive case) + struct Iterate + { + /** + * @brief Head flags + * + * @param[out] flags + * Calling thread's discontinuity head_flags + * + * @param[in] input + * Calling thread's input items + * + * @param[out] preds + * Calling thread's predecessor items + * + * @param[in] flag_op + * Binary boolean flag predicate + */ + template + static _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeads( + int linear_tid, + FlagT (&flags)[ITEMS_PER_THREAD], + T (&input)[ITEMS_PER_THREAD], + T (&preds)[ITEMS_PER_THREAD], + FlagOp flag_op) + { +#pragma unroll + for (int i = 1; i < ITEMS_PER_THREAD; ++i) + { + preds[i] = input[i - 1]; + flags[i] = ApplyOp::FlagT(flag_op, preds[i], input[i], (linear_tid * ITEMS_PER_THREAD) + i); + } + } + + /** + * @brief Tail flags + * + * @param[out] flags + * Calling thread's discontinuity head_flags + * + * @param[in] input + * Calling thread's input items + * + * @param[in] flag_op + * Binary boolean flag predicate + */ + template + static _CCCL_DEVICE _CCCL_FORCEINLINE void + FlagTails(int linear_tid, FlagT (&flags)[ITEMS_PER_THREAD], T (&input)[ITEMS_PER_THREAD], FlagOp flag_op) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD - 1; ++i) + { + flags[i] = ApplyOp::FlagT(flag_op, input[i], input[i + 1], (linear_tid * ITEMS_PER_THREAD) + i + 1); + } + } + }; + + /****************************************************************************** + * Thread fields + ******************************************************************************/ + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + +public: + /// @smemstorage{BlockDiscontinuity} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + /** + * @brief Collective constructor using a private static allocation of shared memory as temporary + * storage. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockDiscontinuity() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Collective constructor using the specified memory allocation as temporary storage. + * + * @param[in] temp_storage + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockDiscontinuity(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Head flag operations + //! @{ + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + + /** + * @param[out] head_flags + * Calling thread's discontinuity head_flags + * + * @param[in] input + * Calling thread's input items + * + * @param[out] preds + * Calling thread's predecessor items + * + * @param[in] flag_op + * Binary boolean flag predicate + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeads( + FlagT (&head_flags)[ITEMS_PER_THREAD], T (&input)[ITEMS_PER_THREAD], T (&preds)[ITEMS_PER_THREAD], FlagOp flag_op) + { + // Share last item + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + + if (linear_tid == 0) + { + // Set flag for first thread-item (preds[0] is undefined) + head_flags[0] = 1; + } + else + { + preds[0] = temp_storage.last_items[linear_tid - 1]; + head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); + } + + // Set head_flags for remaining items + Iterate::FlagHeads(linear_tid, head_flags, input, preds, flag_op); + } + + /** + * @param[out] head_flags + * Calling thread's discontinuity head_flags + * + * @param[in] input + * Calling thread's input items + * + * @param[out] preds + * Calling thread's predecessor items + * + * @param[in] flag_op + * Binary boolean flag predicate + * + * @param[in] tile_predecessor_item + * [thread0 only] Item with which to compare the first tile item + * (input0 from thread0). + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeads( + FlagT (&head_flags)[ITEMS_PER_THREAD], + T (&input)[ITEMS_PER_THREAD], + T (&preds)[ITEMS_PER_THREAD], + FlagOp flag_op, + T tile_predecessor_item) + { + // Share last item + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + + // Set flag for first thread-item + preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread + temp_storage.last_items[linear_tid - 1]; + + head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); + + // Set head_flags for remaining items + Iterate::FlagHeads(linear_tid, head_flags, input, preds, flag_op); + } + +#endif // _CCCL_DOXYGEN_INVOKED + + //! @rst + //! Sets head flags indicating discontinuities between items partitioned across the thread + //! block, for which the first item has no reference and is always flagged. + //! + //! - The flag ``head_flags[i]`` is set for item ``input[i]`` when ``flag_op(previous-item, input[i])`` returns + //! ``true`` (where ``previous-item`` is either the preceding item in the same thread or the last item in + //! the previous thread). + //! - For *thread*\ :sub:`0`, item ``input[0]`` is always flagged. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the head-flagging of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int + //! using BlockDiscontinuity = cub::BlockDiscontinuity; + //! + //! // Allocate shared memory for BlockDiscontinuity + //! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute head flags for discontinuities in the segment + //! int head_flags[4]; + //! BlockDiscontinuity(temp_storage).FlagHeads(head_flags, thread_data, cub::Inequality()); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }``. + //! The corresponding output ``head_flags`` in those threads will be + //! ``{ [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }``. + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread + //! + //! @tparam FlagT + //! **[inferred]** The flag type (must be an integer type) + //! + //! @tparam FlagOp + //! **[inferred]** Binary predicate functor type having member + //! `T operator()(const T &a, const T &b)` or member + //! `T operator()(const T &a, const T &b, unsigned int b_index)`, and returning `true` + //! if a discontinuity exists between `a` and `b`, otherwise `false`. + //! `b_index` is the rank of b in the aggregate tile of data. + //! + //! @param[out] head_flags + //! Calling thread's discontinuity head_flags + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[in] flag_op + //! Binary boolean flag predicate + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + FlagHeads(FlagT (&head_flags)[ITEMS_PER_THREAD], T (&input)[ITEMS_PER_THREAD], FlagOp flag_op) + { + T preds[ITEMS_PER_THREAD]; + FlagHeads(head_flags, input, preds, flag_op); + } + + //! @rst + //! Sets head flags indicating discontinuities between items partitioned across the thread block. + //! + //! - The flag ``head_flags[i]`` is set for item ``input[i]`` when ``flag_op(previous-item, input[i])`` + //! returns ``true`` (where ``previous-item`` is either the preceding item in the same thread or the last item + //! in the previous thread). + //! - For *thread*\ :sub:`0`, item ``input[0]`` is compared against ``tile_predecessor_item``. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the head-flagging of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int + //! using BlockDiscontinuity = cub::BlockDiscontinuity; + //! + //! // Allocate shared memory for BlockDiscontinuity + //! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Have thread0 obtain the predecessor item for the entire tile + //! int tile_predecessor_item; + //! if (threadIdx.x == 0) tile_predecessor_item == ... + //! + //! // Collectively compute head flags for discontinuities in the segment + //! int head_flags[4]; + //! BlockDiscontinuity(temp_storage).FlagHeads( + //! head_flags, thread_data, cub::Inequality(), tile_predecessor_item); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }``, + //! and that ``tile_predecessor_item`` is ``0``. The corresponding output ``head_flags`` in those + //! threads will be ``{ [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }``. + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam FlagT + //! **[inferred]** The flag type (must be an integer type) + //! + //! @tparam FlagOp + //! **[inferred]** Binary predicate functor type having member + //! `T operator()(const T &a, const T &b)` or member + //! `T operator()(const T &a, const T &b, unsigned int b_index)`, + //! and returning `true` if a discontinuity exists between `a` and `b`, + //! otherwise `false`. `b_index` is the rank of b in the aggregate tile of data. + //! + //! @param[out] head_flags + //! Calling thread's discontinuity `head_flags` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[in] flag_op + //! Binary boolean flag predicate + //! + //! @param[in] tile_predecessor_item + //! @rst + //! *thread*\ :sub:`0` only item with which to compare the first tile item (``input[0]`` from *thread*\ :sub:`0`). + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeads( + FlagT (&head_flags)[ITEMS_PER_THREAD], T (&input)[ITEMS_PER_THREAD], FlagOp flag_op, T tile_predecessor_item) + { + T preds[ITEMS_PER_THREAD]; + FlagHeads(head_flags, input, preds, flag_op, tile_predecessor_item); + } + + //! @} end member group + //! @name Tail flag operations + //! @{ + + //! @rst + //! Sets tail flags indicating discontinuities between items partitioned across the thread + //! block, for which the last item has no reference and is always flagged. + //! + //! - The flag ``tail_flags[i]`` is set for item ``input[i]`` when + //! ``flag_op(input[i], next-item)`` + //! returns ``true`` (where `next-item` is either the next item + //! in the same thread or the first item in the next thread). + //! - For *thread*\ :sub:`BLOCK_THREADS - 1`, item ``input[ITEMS_PER_THREAD - 1]`` is always flagged. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the tail-flagging of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int + //! using BlockDiscontinuity = cub::BlockDiscontinuity; + //! + //! // Allocate shared memory for BlockDiscontinuity + //! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute tail flags for discontinuities in the segment + //! int tail_flags[4]; + //! BlockDiscontinuity(temp_storage).FlagTails(tail_flags, thread_data, cub::Inequality()); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }``. + //! The corresponding output ``tail_flags`` in those threads will be + //! ``{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }``. + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam FlagT + //! **[inferred]** The flag type (must be an integer type) + //! + //! @tparam FlagOp + //! **[inferred]** Binary predicate functor type having member + //! `T operator()(const T &a, const T &b)` or member + //! `T operator()(const T &a, const T &b, unsigned int b_index)`, and returning `true` + //! if a discontinuity exists between `a` and `b`, otherwise `false`. `b_index` is the + //! rank of `b` in the aggregate tile of data. + //! + //! @param[out] tail_flags + //! Calling thread's discontinuity tail_flags + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[in] flag_op + //! Binary boolean flag predicate + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + FlagTails(FlagT (&tail_flags)[ITEMS_PER_THREAD], T (&input)[ITEMS_PER_THREAD], FlagOp flag_op) + { + // Share first item + temp_storage.first_items[linear_tid] = input[0]; + + __syncthreads(); + + // Set flag for last thread-item + tail_flags[ITEMS_PER_THREAD - 1] = + (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread + ApplyOp::FlagT( + flag_op, + input[ITEMS_PER_THREAD - 1], + temp_storage.first_items[linear_tid + 1], + (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); + + // Set tail_flags for remaining items + Iterate::FlagTails(linear_tid, tail_flags, input, flag_op); + } + + //! @rst + //! Sets tail flags indicating discontinuities between items partitioned across the thread block. + //! + //! - The flag ``tail_flags[i]`` is set for item ``input[i]`` when ``flag_op(input[i], next-item)`` + //! returns ``true`` (where ``next-item`` is either the next item in the same thread or the first item in + //! the next thread). + //! - For *thread*\ :sub:`BLOCK_THREADS - 1`, item ``input[ITEMS_PER_THREAD - 1]`` is compared against + //! ``tile_successor_item``. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the tail-flagging of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int + //! using BlockDiscontinuity = cub::BlockDiscontinuity; + //! + //! // Allocate shared memory for BlockDiscontinuity + //! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Have thread127 obtain the successor item for the entire tile + //! int tile_successor_item; + //! if (threadIdx.x == 127) tile_successor_item == ... + //! + //! // Collectively compute tail flags for discontinuities in the segment + //! int tail_flags[4]; + //! BlockDiscontinuity(temp_storage).FlagTails( + //! tail_flags, thread_data, cub::Inequality(), tile_successor_item); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }`` + //! and that ``tile_successor_item`` is ``125``. The corresponding output ``tail_flags`` in those + //! threads will be ``{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }``. + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam FlagT + //! **[inferred]** The flag type (must be an integer type) + //! + //! @tparam FlagOp + //! **[inferred]** Binary predicate functor type having member + //! `T operator()(const T &a, const T &b)` or member + //! `T operator()(const T &a, const T &b, unsigned int b_index)`, and returning `true` + //! if a discontinuity exists between `a` and `b`, otherwise `false`. `b_index` is the + //! rank of `b` in the aggregate tile of data. + //! + //! @param[out] tail_flags + //! Calling thread's discontinuity tail_flags + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[in] flag_op + //! Binary boolean flag predicate + //! + //! @param[in] tile_successor_item + //! @rst + //! *thread*\ :sub:`BLOCK_THREADS - 1` only item with which to + //! compare the last tile item (``input[ITEMS_PER_THREAD - 1]`` from + //! *thread*\ :sub:`BLOCK_THREADS - 1`). + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + FlagTails(FlagT (&tail_flags)[ITEMS_PER_THREAD], T (&input)[ITEMS_PER_THREAD], FlagOp flag_op, T tile_successor_item) + { + // Share first item + temp_storage.first_items[linear_tid] = input[0]; + + __syncthreads(); + + // Set flag for last thread-item + T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread + temp_storage.first_items[linear_tid + 1]; + + tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( + flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); + + // Set tail_flags for remaining items + Iterate::FlagTails(linear_tid, tail_flags, input, flag_op); + } + + //! @} end member group + //! @name Head & tail flag operations + //! @{ + + //! @rst + //! Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. + //! + //! - The flag ``head_flags[i]`` is set for item ``input[i]`` when ``flag_op(previous-item, input[i])`` returns + //! ``true`` (where ``previous-item`` is either the preceding item in the same thread or the last item in + //! the previous thread). + //! - For *thread*\ :sub:`0`, item ``input[0]`` is always flagged. + //! - The flag ``tail_flags[i]`` is set for item ``input[i]`` when ``flag_op(input[i], next-item)`` + //! returns ``true`` (where next-item is either the next item in the same thread or the first item in + //! the next thread). + //! - For *thread*\ :sub:`BLOCK_THREADS - 1`, item ``input[ITEMS_PER_THREAD - 1]`` is always flagged. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the head- and tail-flagging of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int + //! using BlockDiscontinuity = cub::BlockDiscontinuity; + //! + //! // Allocate shared memory for BlockDiscontinuity + //! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute head and flags for discontinuities in the segment + //! int head_flags[4]; + //! int tail_flags[4]; + //! BlockDiscontinuity(temp_storage).FlagHeadsAndTails( + //! head_flags, tail_flags, thread_data, cub::Inequality()); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }`` + //! and that the tile_successor_item is ``125``. The corresponding output ``head_flags`` + //! in those threads will be ``{ [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }``. + //! and the corresponding output ``tail_flags`` in those threads will be + //! ``{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }``. + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam FlagT + //! **[inferred]** The flag type (must be an integer type) + //! + //! @tparam FlagOp + //! **[inferred]** Binary predicate functor type having member + //! `T operator()(const T &a, const T &b)` or member + //! `T operator()(const T &a, const T &b, unsigned int b_index)`, and returning `true` + //! if a discontinuity exists between `a` and `b`, otherwise `false`. `b_index` is the + //! rank of `b` in the aggregate tile of data. + //! + //! @param[out] head_flags + //! Calling thread's discontinuity head_flags + //! + //! @param[out] tail_flags + //! Calling thread's discontinuity tail_flags + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[in] flag_op + //! Binary boolean flag predicate + template + _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeadsAndTails( + FlagT (&head_flags)[ITEMS_PER_THREAD], + FlagT (&tail_flags)[ITEMS_PER_THREAD], + T (&input)[ITEMS_PER_THREAD], + FlagOp flag_op) + { + // Share first and last items + temp_storage.first_items[linear_tid] = input[0]; + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + + T preds[ITEMS_PER_THREAD]; + + // Set flag for first thread-item + if (linear_tid == 0) + { + head_flags[0] = 1; + } + else + { + preds[0] = temp_storage.last_items[linear_tid - 1]; + head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); + } + + // Set flag for last thread-item + tail_flags[ITEMS_PER_THREAD - 1] = + (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread + ApplyOp::FlagT( + flag_op, + input[ITEMS_PER_THREAD - 1], + temp_storage.first_items[linear_tid + 1], + (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); + + // Set head_flags for remaining items + Iterate::FlagHeads(linear_tid, head_flags, input, preds, flag_op); + + // Set tail_flags for remaining items + Iterate::FlagTails(linear_tid, tail_flags, input, flag_op); + } + + //! @rst + //! Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. + //! + //! - The flag ``head_flags[i]`` is set for item ``input[i]`` when + //! ``flag_op(previous-item, input[i])`` returns ``true`` (where ``previous-item`` is either the preceding item + //! in the same thread or the last item in the previous thread). + //! - For *thread*\ :sub:`0`, item ``input[0]`` is always flagged. + //! - The flag ``tail_flags[i]`` is set for item ``input[i]`` when ``flag_op(input[i], next-item)`` returns ``true`` + //! (where ``next-item`` is either the next item in the same thread or the first item in the next thread). + //! - For *thread*\ :sub:`BLOCK_THREADS - 1`, item ``input[ITEMS_PER_THREAD - 1]`` is compared + //! against ``tile_predecessor_item``. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the head- and tail-flagging of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int + //! using BlockDiscontinuity = cub::BlockDiscontinuity; + //! + //! // Allocate shared memory for BlockDiscontinuity + //! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Have thread127 obtain the successor item for the entire tile + //! int tile_successor_item; + //! if (threadIdx.x == 127) tile_successor_item == ... + //! + //! // Collectively compute head and flags for discontinuities in the segment + //! int head_flags[4]; + //! int tail_flags[4]; + //! BlockDiscontinuity(temp_storage).FlagHeadsAndTails( + //! head_flags, tail_flags, tile_successor_item, thread_data, cub::Inequality()); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }`` + //! and that the tile_successor_item is ``125``. The corresponding output ``head_flags`` + //! in those threads will be ``{ [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }``. + //! and the corresponding output ``tail_flags`` in those threads will be + //! ``{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }``. + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam FlagT + //! **[inferred]** The flag type (must be an integer type) + //! + //! @tparam FlagOp + //! **[inferred]** Binary predicate functor type having member + //! `T operator()(const T &a, const T &b)` or member + //! `T operator()(const T &a, const T &b, unsigned int b_index)`, and returning `true` + //! if a discontinuity exists between `a` and `b`, otherwise `false`. `b_index` is the + //! rank of b in the aggregate tile of data. + //! + //! @param[out] head_flags + //! Calling thread's discontinuity head_flags + //! + //! @param[out] tail_flags + //! Calling thread's discontinuity tail_flags + //! + //! @param[in] tile_successor_item + //! @rst + //! *thread*\ :sub:`BLOCK_THREADS - 1` only item with which to compare + //! the last tile item (``input[ITEMS_PER_THREAD - 1]`` from + //! *thread*\ :sub:`BLOCK_THREADS - 1`). + //! @endrst + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[in] flag_op + //! Binary boolean flag predicate + template + _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeadsAndTails( + FlagT (&head_flags)[ITEMS_PER_THREAD], + FlagT (&tail_flags)[ITEMS_PER_THREAD], + T tile_successor_item, + T (&input)[ITEMS_PER_THREAD], + FlagOp flag_op) + { + // Share first and last items + temp_storage.first_items[linear_tid] = input[0]; + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + + T preds[ITEMS_PER_THREAD]; + + // Set flag for first thread-item + if (linear_tid == 0) + { + head_flags[0] = 1; + } + else + { + preds[0] = temp_storage.last_items[linear_tid - 1]; + head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); + } + + // Set flag for last thread-item + T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread + temp_storage.first_items[linear_tid + 1]; + + tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( + flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); + + // Set head_flags for remaining items + Iterate::FlagHeads(linear_tid, head_flags, input, preds, flag_op); + + // Set tail_flags for remaining items + Iterate::FlagTails(linear_tid, tail_flags, input, flag_op); + } + + //! @rst + //! Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. + //! + //! - The flag ``head_flags[i]`` is set for item ``input[i]`` when ``flag_op(previous-item, input[i])`` + //! returns ``true`` (where ``previous-item`` is either the preceding item in the same thread or the last item + //! in the previous thread). + //! - For *thread*\ :sub:`0`, item ``input[0]`` is compared against ``tile_predecessor_item``. + //! - The flag ``tail_flags[i]`` is set for item ``input[i]`` when + //! ``flag_op(input[i], next-item)`` returns ``true`` (where ``next-item`` is either the next item + //! in the same thread or the first item in the next thread). + //! - For *thread*\ :sub:`BLOCK_THREADS - 1`, item + //! ``input[ITEMS_PER_THREAD - 1]`` is always flagged. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the head- and tail-flagging of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int + //! using BlockDiscontinuity = cub::BlockDiscontinuity; + //! + //! // Allocate shared memory for BlockDiscontinuity + //! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Have thread0 obtain the predecessor item for the entire tile + //! int tile_predecessor_item; + //! if (threadIdx.x == 0) tile_predecessor_item == ... + //! + //! // Have thread127 obtain the successor item for the entire tile + //! int tile_successor_item; + //! if (threadIdx.x == 127) tile_successor_item == ... + //! + //! // Collectively compute head and flags for discontinuities in the segment + //! int head_flags[4]; + //! int tail_flags[4]; + //! BlockDiscontinuity(temp_storage).FlagHeadsAndTails( + //! head_flags, tile_predecessor_item, tail_flags, tile_successor_item, + //! thread_data, cub::Inequality()); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }``, + //! that the ``tile_predecessor_item`` is ``0``, and that the ``tile_successor_item`` is ``125``. + //! The corresponding output ``head_flags`` in those threads will be + //! ``{ [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }``, and the corresponding output ``tail_flags`` + //! in those threads will be ``{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }``. + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam FlagT + //! **[inferred]** The flag type (must be an integer type) + //! + //! @tparam FlagOp + //! **[inferred]** Binary predicate functor type having member + //! `T operator()(const T &a, const T &b)` or member + //! `T operator()(const T &a, const T &b, unsigned int b_index)`, and returning `true` + //! if a discontinuity exists between `a` and `b`, otherwise `false`. `b_index` is the rank + //! of b in the aggregate tile of data. + //! + //! @param[out] head_flags + //! Calling thread's discontinuity head_flags + //! + //! @param[in] tile_predecessor_item + //! @rst + //! *thread*\ :sub:`0` only item with which to compare the first tile item (``input[0]`` from *thread*\ :sub:`0`). + //! @endrst + //! + //! @param[out] tail_flags + //! Calling thread's discontinuity tail_flags + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[in] flag_op + //! Binary boolean flag predicate + template + _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeadsAndTails( + FlagT (&head_flags)[ITEMS_PER_THREAD], + T tile_predecessor_item, + FlagT (&tail_flags)[ITEMS_PER_THREAD], + T (&input)[ITEMS_PER_THREAD], + FlagOp flag_op) + { + // Share first and last items + temp_storage.first_items[linear_tid] = input[0]; + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + + T preds[ITEMS_PER_THREAD]; + + // Set flag for first thread-item + preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread + temp_storage.last_items[linear_tid - 1]; + + head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); + + // Set flag for last thread-item + tail_flags[ITEMS_PER_THREAD - 1] = + (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread + ApplyOp::FlagT( + flag_op, + input[ITEMS_PER_THREAD - 1], + temp_storage.first_items[linear_tid + 1], + (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); + + // Set head_flags for remaining items + Iterate::FlagHeads(linear_tid, head_flags, input, preds, flag_op); + + // Set tail_flags for remaining items + Iterate::FlagTails(linear_tid, tail_flags, input, flag_op); + } + + //! @rst + //! Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. + //! + //! - The flag ``head_flags[i]`` is set for item ``input[i]`` when ``flag_op(previous-item, input[i])`` + //! returns ``true`` (where ``previous-item`` is either the preceding item in the same thread or the last item in + //! the previous thread). + //! - For *thread*\ :sub:`0`, item ``input[0]`` is compared against ``tile_predecessor_item``. + //! - The flag ``tail_flags[i]`` is set for item ``input[i]`` when ``flag_op(input[i], next-item)`` + //! returns ``true`` (where ``next-item`` is either the next item in the same thread or the first item in + //! the next thread). + //! - For *thread*\ :sub:`BLOCK_THREADS - 1`, item ``input[ITEMS_PER_THREAD - 1]`` is compared + //! against ``tile_successor_item``. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the head- and tail-flagging of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockDiscontinuity for a 1D block of 128 threads of type int + //! using BlockDiscontinuity = cub::BlockDiscontinuity; + //! + //! // Allocate shared memory for BlockDiscontinuity + //! __shared__ typename BlockDiscontinuity::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Have thread0 obtain the predecessor item for the entire tile + //! int tile_predecessor_item; + //! if (threadIdx.x == 0) tile_predecessor_item == ... + //! + //! // Have thread127 obtain the successor item for the entire tile + //! int tile_successor_item; + //! if (threadIdx.x == 127) tile_successor_item == ... + //! + //! // Collectively compute head and flags for discontinuities in the segment + //! int head_flags[4]; + //! int tail_flags[4]; + //! BlockDiscontinuity(temp_storage).FlagHeadsAndTails( + //! head_flags, tile_predecessor_item, tail_flags, tile_successor_item, + //! thread_data, cub::Inequality()); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }``, + //! that the ``tile_predecessor_item`` is ``0``, and that the + //! ``tile_successor_item`` is ``125``. The corresponding output ``head_flags`` + //! in those threads will be ``{ [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }``. + //! and the corresponding output ``tail_flags`` in those threads will be + //! ``{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }``. + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam FlagT + //! **[inferred]** The flag type (must be an integer type) + //! + //! @tparam FlagOp + //! **[inferred]** Binary predicate functor type having member + //! `T operator()(const T &a, const T &b)` or member + //! `T operator()(const T &a, const T &b, unsigned int b_index)`, and returning `true` + //! if a discontinuity exists between `a` and `b`, otherwise `false`. `b_index` is the rank + //! of `b` in the aggregate tile of data. + //! + //! @param[out] head_flags + //! Calling thread's discontinuity head_flags + //! + //! @param[in] tile_predecessor_item + //! @rst + //! *thread*\ :sub:`0` only item with which to compare the first tile item (``input[0]`` from *thread*\ :sub:`0`). + //! @endrst + //! + //! @param[out] tail_flags + //! Calling thread's discontinuity tail_flags + //! + //! @param[in] tile_successor_item + //! @rst + //! *thread*\ :sub:`BLOCK_THREADS - 1` only item with which to compare the last tile item + //! (``input[ITEMS_PER_THREAD - 1]`` from *thread*\ :sub:`BLOCK_THREADS - 1`). + //! @endrst + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[in] flag_op + //! Binary boolean flag predicate + template + _CCCL_DEVICE _CCCL_FORCEINLINE void FlagHeadsAndTails( + FlagT (&head_flags)[ITEMS_PER_THREAD], + T tile_predecessor_item, + FlagT (&tail_flags)[ITEMS_PER_THREAD], + T tile_successor_item, + T (&input)[ITEMS_PER_THREAD], + FlagOp flag_op) + { + // Share first and last items + temp_storage.first_items[linear_tid] = input[0]; + temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + + T preds[ITEMS_PER_THREAD]; + + // Set flag for first thread-item + preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread + temp_storage.last_items[linear_tid - 1]; + + head_flags[0] = ApplyOp::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); + + // Set flag for last thread-item + T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread + temp_storage.first_items[linear_tid + 1]; + + tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp::FlagT( + flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); + + // Set head_flags for remaining items + Iterate::FlagHeads(linear_tid, head_flags, input, preds, flag_op); + + // Set tail_flags for remaining items + Iterate::FlagTails(linear_tid, tail_flags, input, flag_op); + } + + //! @} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_exchange.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_exchange.cuh new file mode 100644 index 0000000000000000000000000000000000000000..d1ae91c223d185bd89ca4bdbc7ecdf3c1b65da6d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_exchange.cuh @@ -0,0 +1,1300 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2024, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! The cub::BlockExchange class provides :ref:`collective ` methods for +//! rearranging data partitioned across a CUDA thread block. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! The BlockExchange class provides :ref:`collective ` methods for rearranging data partitioned +//! across a CUDA thread block. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - It is commonplace for blocks of threads to rearrange data items between threads. For example, the +//! device-accessible memory subsystem prefers access patterns where data items are "striped" across threads (where +//! consecutive threads access consecutive items), yet most block-wide operations prefer a "blocked" partitioning of +//! items across threads (where consecutive items belong to a single thread). +//! - BlockExchange supports the following types of data exchanges: +//! +//! - Transposing between :ref:`blocked ` and :ref:`striped ` +//! arrangements +//! - Transposing between :ref:`blocked ` and +//! :ref:`warp-striped ` arrangements +//! - Scattering ranked items to a :ref:`blocked arrangement ` +//! - Scattering ranked items to a :ref:`striped arrangement ` +//! +//! - @rowmajor +//! +//! A Simple Example +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @blockcollective{BlockExchange} +//! +//! The code snippet below illustrates the conversion from a "blocked" to a "striped" arrangement of 512 integer items +//! partitioned across 128 threads where each thread owns 4 items. +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(int *d_data, ...) +//! { +//! // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each +//! using BlockExchange = cub::BlockExchange; +//! +//! // Allocate shared memory for BlockExchange +//! __shared__ typename BlockExchange::TempStorage temp_storage; +//! +//! // Load a tile of data striped across threads +//! int thread_data[4]; +//! cub::LoadDirectStriped<128>(threadIdx.x, d_data, thread_data); +//! +//! // Collectively exchange data into a blocked arrangement across threads +//! BlockExchange(temp_storage).StripedToBlocked(thread_data); +//! +//! Suppose the set of striped input ``thread_data`` across the block of threads is ``{ [0,128,256,384], +//! [1,129,257,385], ..., [127,255,383,511] }``. The corresponding output ``thread_data`` in those threads will be +//! ``{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }``. +//! +//! Performance Considerations +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - Proper device-specific padding ensures zero bank conflicts for most types. +//! +//! Re-using dynamically allocating shared memory +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of dynamically shared memory with +//! BlockReduce and how to re-purpose the same memory region. This example can be easily adapted to the storage required +//! by BlockExchange. +//! @endrst +//! +//! @tparam T +//! The data type to be exchanged +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam ITEMS_PER_THREAD +//! The number of items partitioned onto each thread. +//! +//! @tparam WARP_TIME_SLICING +//! **[optional]** When `true`, only use enough shared memory for a single warp's worth of +//! tile data, time-slicing the block-wide exchange over multiple synchronized rounds. Yields a smaller memory footprint +//! at the expense of decreased parallelism. (Default: false) +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! [optional] Unused. +template +class BlockExchange +{ + static constexpr int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; ///< The thread block size in threads + static constexpr int WARP_THREADS = CUB_WARP_THREADS(0); + static constexpr int WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS; // TODO(bgruber): use ceil_div in + // C++14 + static constexpr int LOG_SMEM_BANKS = CUB_LOG_SMEM_BANKS(0); + + static constexpr int TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD; + static constexpr int TIME_SLICES = WARP_TIME_SLICING ? WARPS : 1; + static constexpr int TIME_SLICED_THREADS = WARP_TIME_SLICING ? CUB_MIN(BLOCK_THREADS, WARP_THREADS) : BLOCK_THREADS; + static constexpr int TIME_SLICED_ITEMS = TIME_SLICED_THREADS * ITEMS_PER_THREAD; + static constexpr int WARP_TIME_SLICED_THREADS = CUB_MIN(BLOCK_THREADS, WARP_THREADS); + static constexpr int WARP_TIME_SLICED_ITEMS = WARP_TIME_SLICED_THREADS * ITEMS_PER_THREAD; + + // Insert padding to avoid bank conflicts during raking when items per thread is a power of two and > 4 (otherwise + // we can typically use 128b loads) + static constexpr bool INSERT_PADDING = ITEMS_PER_THREAD > 4 && PowerOfTwo::VALUE; + static constexpr int PADDING_ITEMS = INSERT_PADDING ? (TIME_SLICED_ITEMS >> LOG_SMEM_BANKS) : 0; + + /// Shared memory storage layout type + struct alignas(16) _TempStorage + { + T buff[TIME_SLICED_ITEMS + PADDING_ITEMS]; + }; + +public: + /// @smemstorage{BlockExchange} + using TempStorage = Uninitialized<_TempStorage>; + +private: + _TempStorage& temp_storage; + + // TODO(bgruber): can we use signed int here? Only these variables are unsigned: + unsigned int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z); + unsigned int lane_id = ::cuda::ptx::get_sreg_laneid(); + unsigned int warp_id = WARPS == 1 ? 0 : linear_tid / WARP_THREADS; + unsigned int warp_offset = warp_id * WARP_TIME_SLICED_ITEMS; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + //! @brief Transposes data items from **blocked** arrangement to **striped** arrangement. Specialized for no + //! timeslicing. + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void BlockedToStriped( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + Int2Type /*time_slicing*/) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = linear_tid * ITEMS_PER_THREAD + i; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i * BLOCK_THREADS + linear_tid; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + + //! @brief Transposes data items from **blocked** arrangement to **striped** arrangement. Specialized for + //! warp-timeslicing. + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void BlockedToStriped( + const T (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD], Int2Type /*time_slicing*/) + { + T temp_items[ITEMS_PER_THREAD]; + +#pragma unroll + for (int slice = 0; slice < TIME_SLICES; slice++) + { + const int slice_offset = slice * TIME_SLICED_ITEMS; + const int slice_oob = slice_offset + TIME_SLICED_ITEMS; + + __syncthreads(); + + if (warp_id == slice) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = lane_id * ITEMS_PER_THREAD + i; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + } + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + // Read a strip of items + const int strip_offset = i * BLOCK_THREADS; + const int strip_oob = strip_offset + BLOCK_THREADS; + + if (slice_offset < strip_oob && slice_oob > strip_offset) + { + int item_offset = strip_offset + linear_tid - slice_offset; + if (item_offset >= 0 && item_offset < TIME_SLICED_ITEMS) + { + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + temp_items[i] = temp_storage.buff[item_offset]; + } + } + } + } + +// Copy +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + output_items[i] = temp_items[i]; + } + } + + //! @brief Transposes data items from **blocked** arrangement to **warp-striped** arrangement. Specialized for no + //! timeslicing + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void BlockedToWarpStriped( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + Int2Type /*time_slicing*/) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = warp_offset + i + (lane_id * ITEMS_PER_THREAD); + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncwarp(0xffffffff); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = warp_offset + (i * WARP_TIME_SLICED_THREADS) + lane_id; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + + //! @brief Transposes data items from **blocked** arrangement to **warp-striped** arrangement. Specialized for + //! warp-timeslicing + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void BlockedToWarpStriped( + const T (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD], Int2Type /*time_slicing*/) + { + if (warp_id == 0) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i + lane_id * ITEMS_PER_THREAD; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncwarp(0xffffffff); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i * WARP_TIME_SLICED_THREADS + lane_id; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + +#pragma unroll + for (int slice = 1; slice < TIME_SLICES; ++slice) + { + __syncthreads(); + + if (warp_id == slice) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i + lane_id * ITEMS_PER_THREAD; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncwarp(0xffffffff); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i * WARP_TIME_SLICED_THREADS + lane_id; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + } + } + + //! @brief Transposes data items from **striped** arrangement to **blocked** arrangement. Specialized for no + //! timeslicing. + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void StripedToBlocked( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + Int2Type /*time_slicing*/) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i * BLOCK_THREADS + linear_tid; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncthreads(); + +// No timeslicing +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = linear_tid * ITEMS_PER_THREAD + i; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + + //! @brief Transposes data items from **striped** arrangement to **blocked** arrangement. Specialized for + //! warp-timeslicing. + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void StripedToBlocked( + const T (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD], Int2Type /*time_slicing*/) + { + // Warp time-slicing + T temp_items[ITEMS_PER_THREAD]; + +#pragma unroll + for (int slice = 0; slice < TIME_SLICES; slice++) + { + const int slice_offset = slice * TIME_SLICED_ITEMS; + const int slice_oob = slice_offset + TIME_SLICED_ITEMS; + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + // Write a strip of items + const int strip_offset = i * BLOCK_THREADS; + const int strip_oob = strip_offset + BLOCK_THREADS; + + if (slice_offset < strip_oob && slice_oob > strip_offset) + { + int item_offset = strip_offset + linear_tid - slice_offset; + if (item_offset >= 0 && item_offset < TIME_SLICED_ITEMS) + { + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + } + } + + __syncthreads(); + + if (warp_id == slice) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = lane_id * ITEMS_PER_THREAD + i; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + temp_items[i] = temp_storage.buff[item_offset]; + } + } + } + +// Copy +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + output_items[i] = temp_items[i]; + } + } + + //! @brief Transposes data items from **warp-striped** arrangement to **blocked** arrangement. Specialized for no + //! timeslicing + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void WarpStripedToBlocked( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + Int2Type /*time_slicing*/) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = warp_offset + (i * WARP_TIME_SLICED_THREADS) + lane_id; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncwarp(0xffffffff); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = warp_offset + i + (lane_id * ITEMS_PER_THREAD); + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(output_items + i, temp_storage.buff[item_offset]); + } + } + + //! @brief Transposes data items from **warp-striped** arrangement to **blocked** arrangement. Specialized for + //! warp-timeslicing + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void WarpStripedToBlocked( + const T (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD], Int2Type /*time_slicing*/) + { +#pragma unroll + for (int slice = 0; slice < TIME_SLICES; ++slice) + { + __syncthreads(); + + if (warp_id == slice) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i * WARP_TIME_SLICED_THREADS + lane_id; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncwarp(0xffffffff); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i + lane_id * ITEMS_PER_THREAD; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + } + } + + //! @brief Exchanges data items annotated by rank into **blocked** arrangement. Specialized for no timeslicing. + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[in] ranks + //! Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToBlocked( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD], + Int2Type /*time_slicing*/) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = ranks[i]; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = linear_tid * ITEMS_PER_THREAD + i; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + + //! @brief Exchanges data items annotated by rank into **blocked** arrangement. Specialized for warp-timeslicing. + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[in] ranks + //! Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToBlocked( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT ranks[ITEMS_PER_THREAD], + Int2Type /*time_slicing*/) + { + T temp_items[ITEMS_PER_THREAD]; + +#pragma unroll + for (int slice = 0; slice < TIME_SLICES; slice++) + { + __syncthreads(); + + const int slice_offset = TIME_SLICED_ITEMS * slice; + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = ranks[i] - slice_offset; + if (item_offset >= 0 && item_offset < WARP_TIME_SLICED_ITEMS) + { + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + } + + __syncthreads(); + + if (warp_id == slice) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = lane_id * ITEMS_PER_THREAD + i; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + temp_items[i] = temp_storage.buff[item_offset]; + } + } + } + +// Copy +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + output_items[i] = temp_items[i]; + } + } + + //! @brief Exchanges data items annotated by rank into **striped** arrangement. Specialized for no timeslicing. + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[in] ranks + //! Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStriped( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD], + Int2Type /*time_slicing*/) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = ranks[i]; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i * BLOCK_THREADS + linear_tid; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + + //! @brief Exchanges data items annotated by rank into **striped** arrangement. Specialized for warp-timeslicing. + //! + //! @param[in] input_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[out] output_items + //! Items to exchange, converting between **blocked** and **striped** arrangements. + //! + //! @param[in] ranks + //! Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStriped( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD], + Int2Type /*time_slicing*/) + { + T temp_items[ITEMS_PER_THREAD]; + +#pragma unroll + for (int slice = 0; slice < TIME_SLICES; slice++) + { + const int slice_offset = slice * TIME_SLICED_ITEMS; + const int slice_oob = slice_offset + TIME_SLICED_ITEMS; + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = ranks[i] - slice_offset; + if (item_offset >= 0 && item_offset < WARP_TIME_SLICED_ITEMS) + { + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + detail::uninitialized_copy_single(temp_storage.buff + item_offset, input_items[i]); + } + } + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + // Read a strip of items + const int strip_offset = i * BLOCK_THREADS; + const int strip_oob = strip_offset + BLOCK_THREADS; + + if (slice_offset < strip_oob && slice_oob > strip_offset) + { + int item_offset = strip_offset + linear_tid - slice_offset; + if (item_offset >= 0 && item_offset < TIME_SLICED_ITEMS) + { + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset += item_offset >> LOG_SMEM_BANKS; + } + temp_items[i] = temp_storage.buff[item_offset]; + } + } + } + } + +// Copy +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + output_items[i] = temp_items[i]; + } + } + +public: + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE BlockExchange() + : temp_storage(PrivateStorage()) + {} + + //! @brief Collective constructor using the specified memory allocation as temporary storage. + //! @param[in] temp_storage Reference to memory allocation having layout type TempStorage + _CCCL_DEVICE _CCCL_FORCEINLINE BlockExchange(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + {} + + //! @} end member group + //! @name Structured exchanges + //! @{ + + //! @rst + //! Transposes data items from **striped** arrangement to **blocked** arrangement. + //! + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the conversion from a "striped" to a "blocked" arrangement + //! of 512 integer items partitioned across 128 threads where each thread owns 4 items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, ...) + //! { + //! // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each + //! using BlockExchange = cub::BlockExchange; + //! + //! // Allocate shared memory for BlockExchange + //! __shared__ typename BlockExchange::TempStorage temp_storage; + //! + //! // Load a tile of ordered data into a striped arrangement across block threads + //! int thread_data[4]; + //! cub::LoadDirectStriped<128>(threadIdx.x, d_data, thread_data); + //! + //! // Collectively exchange data into a blocked arrangement across threads + //! BlockExchange(temp_storage).StripedToBlocked(thread_data, thread_data); + //! + //! Suppose the set of striped input ``thread_data`` across the block of threads is ``{ [0,128,256,384], + //! [1,129,257,385], ..., [127,255,383,511] }`` after loading from device-accessible memory. The corresponding output + //! ``thread_data`` in those threads will be ``{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }``. + //! @endrst + //! + //! @param[in] input_items + //! Items to exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[out] output_items + //! Items from exchange, converting between **striped** and **blocked** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + StripedToBlocked(const T (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + StripedToBlocked(input_items, output_items, Int2Type()); + } + + //! @rst + //! Transposes data items from **blocked** arrangement to **striped** arrangement. + //! + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the conversion from a "blocked" to a "striped" arrangement + //! of 512 integer items partitioned across 128 threads where each thread owns 4 items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, ...) + //! { + //! // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each + //! using BlockExchange = cub::BlockExchange; + //! + //! // Allocate shared memory for BlockExchange + //! __shared__ typename BlockExchange::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively exchange data into a striped arrangement across threads + //! BlockExchange(temp_storage).BlockedToStriped(thread_data, thread_data); + //! + //! // Store data striped across block threads into an ordered tile + //! cub::StoreDirectStriped(threadIdx.x, d_data, thread_data); + //! + //! Suppose the set of blocked input ``thread_data`` across the block of threads is ``{ [0,1,2,3], [4,5,6,7], + //! [8,9,10,11], ..., [508,509,510,511] }``. The corresponding output ``thread_data`` in those threads will be + //! ``{ [0,128,256,384], [1,129,257,385], ..., [127,255,383,511] }`` in preparation for storing to device-accessible + //! memory. + //! @endrst + //! + //! @param[in] input_items + //! Items to exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[out] output_items + //! Items from exchange, converting between **striped** and **blocked** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + BlockedToStriped(const T (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + BlockedToStriped(input_items, output_items, Int2Type()); + } + + //! @rst + //! Transposes data items from **warp-striped** arrangement to **blocked** arrangement. + //! + //! - @smemreuse + //! + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the conversion from a "warp-striped" to a "blocked" + //! arrangement of 512 integer items partitioned across 128 threads where each thread owns 4 + //! items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, ...) + //! { + //! // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each + //! using BlockExchange = cub::BlockExchange; + //! + //! // Allocate shared memory for BlockExchange + //! __shared__ typename BlockExchange::TempStorage temp_storage; + //! + //! // Load a tile of ordered data into a warp-striped arrangement across warp threads + //! int thread_data[4]; + //! cub::LoadSWarptriped(threadIdx.x, d_data, thread_data); + //! + //! // Collectively exchange data into a blocked arrangement across threads + //! BlockExchange(temp_storage).WarpStripedToBlocked(thread_data); + //! + //! Suppose the set of warp-striped input ``thread_data`` across the block of threads is ``{ [0,32,64,96], + //! [1,33,65,97], [2,34,66,98], ..., [415,447,479,511] }`` after loading from device-accessible memory. (The first 128 + //! items are striped across the first warp of 32 threads, the second 128 items are striped across the second warp, + //! etc.) The corresponding output ``thread_data`` in those threads will be ``{ [0,1,2,3], [4,5,6,7], [8,9,10,11], + //! ..., [508,509,510,511] }``. + //! @endrst + //! + //! @param[in] input_items + //! Items to exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[out] output_items + //! Items from exchange, converting between **striped** and **blocked** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + WarpStripedToBlocked(const T (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + WarpStripedToBlocked(input_items, output_items, Int2Type()); + } + + //! @rst + //! Transposes data items from **blocked** arrangement to **warp-striped** arrangement. + //! + //! - @smemreuse + //! + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the conversion from a "blocked" to a "warp-striped" + //! arrangement of 512 integer items partitioned across 128 threads where each thread owns 4 + //! items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, ...) + //! { + //! // Specialize BlockExchange for a 1D block of 128 threads owning 4 integer items each + //! using BlockExchange = cub::BlockExchange; + //! + //! // Allocate shared memory for BlockExchange + //! __shared__ typename BlockExchange::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively exchange data into a warp-striped arrangement across threads + //! BlockExchange(temp_storage).BlockedToWarpStriped(thread_data, thread_data); + //! + //! // Store data striped across warp threads into an ordered tile + //! cub::StoreDirectStriped(threadIdx.x, d_data, thread_data); + //! + //! Suppose the set of blocked input ``thread_data`` across the block of threads is ``{ [0,1,2,3], [4,5,6,7], + //! [8,9,10,11], ..., [508,509,510,511] }``. The corresponding output ``thread_data`` in those threads will be + //! ``{ [0,32,64,96], [1,33,65,97], [2,34,66,98], ..., [415,447,479,511] }`` in preparation for storing to + //! device-accessible memory. (The first 128 items are striped across the first warp of 32 threads, the second 128 + //! items are striped across the second warp, etc.) + //! @endrst + //! + //! @param[in] input_items + //! Items to exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[out] output_items + //! Items from exchange, converting between **striped** and **blocked** arrangements. + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + BlockedToWarpStriped(const T (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + BlockedToWarpStriped(input_items, output_items, Int2Type()); + } + + //! @} end member group + //! @name Scatter exchanges + //! @{ + + //! @rst + //! Exchanges data items annotated by rank into **blocked** arrangement. + //! + //! - @smemreuse + //! @endrst + //! + //! @tparam OffsetT + //! **[inferred]** Signed integer type for local offsets + //! + //! @param[in] input_items + //! Items to exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[out] output_items + //! Items from exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[in] ranks + //! Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToBlocked( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD]) + { + ScatterToBlocked(input_items, output_items, ranks, Int2Type()); + } + + //! @rst + //! Exchanges data items annotated by rank into **striped** arrangement. + //! + //! - @smemreuse + //! + //! @endrst + //! + //! @tparam OffsetT + //! **[inferred]** Signed integer type for local offsets + //! + //! @param[in] input_items + //! Items to exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[out] output_items + //! Items from exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[in] ranks + //! Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStriped( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD]) + { + ScatterToStriped(input_items, output_items, ranks, Int2Type()); + } + + //! @rst + //! Exchanges data items annotated by rank into **striped** arrangement. Items with rank -1 are not exchanged. + //! + //! - @smemreuse + //! + //! @endrst + //! + //! @tparam OffsetT + //! **[inferred]** Signed integer type for local offsets + //! + //! @param[in] input_items + //! Items to exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[out] output_items + //! Items from exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[in] ranks + //! Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStripedGuarded( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD]) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = ranks[i]; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + if (ranks[i] >= 0) + { + temp_storage.buff[item_offset] = input_items[i]; + } + } + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i * BLOCK_THREADS + linear_tid; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + + //! @rst + //! Exchanges valid data items annotated by rank into **striped** arrangement. + //! + //! - @smemreuse + //! + //! @endrst + //! + //! @tparam OffsetT + //! **[inferred]** Signed integer type for local offsets + //! + //! @tparam ValidFlag + //! **[inferred]** FlagT type denoting which items are valid + //! + //! @param[in] input_items + //! Items to exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[out] output_items + //! Items from exchange, converting between **striped** and **blocked** arrangements. + //! + //! @param[in] ranks + //! Corresponding scatter ranks + //! + //! @param[in] is_valid + //! Corresponding flag denoting item validity + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStripedFlagged( + const T (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD], + ValidFlag (&is_valid)[ITEMS_PER_THREAD]) + { +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = ranks[i]; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + if (is_valid[i]) + { + temp_storage.buff[item_offset] = input_items[i]; + } + } + + __syncthreads(); + +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + int item_offset = i * BLOCK_THREADS + linear_tid; + _CCCL_IF_CONSTEXPR (INSERT_PADDING) + { + item_offset = (item_offset >> LOG_SMEM_BANKS) + item_offset; + } + output_items[i] = temp_storage.buff[item_offset]; + } + } + + //! @} end member group + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + + /// @param[in-out] items + /// Items to exchange, converting between **striped** and **blocked** arrangements. + _CCCL_DEVICE _CCCL_FORCEINLINE void StripedToBlocked(T (&items)[ITEMS_PER_THREAD]) + { + StripedToBlocked(items, items); + } + + /// @param[in-out] items + /// Items to exchange, converting between **striped** and **blocked** arrangements. + _CCCL_DEVICE _CCCL_FORCEINLINE void BlockedToStriped(T (&items)[ITEMS_PER_THREAD]) + { + BlockedToStriped(items, items); + } + + /// @param[in-out] items + /// Items to exchange, converting between **striped** and **blocked** arrangements. + _CCCL_DEVICE _CCCL_FORCEINLINE void WarpStripedToBlocked(T (&items)[ITEMS_PER_THREAD]) + { + WarpStripedToBlocked(items, items); + } + + /// @param[in-out] items + /// Items to exchange, converting between **striped** and **blocked** arrangements. + _CCCL_DEVICE _CCCL_FORCEINLINE void BlockedToWarpStriped(T (&items)[ITEMS_PER_THREAD]) + { + BlockedToWarpStriped(items, items); + } + + /// @param[in-out] items + /// Items to exchange, converting between **striped** and **blocked** arrangements. + /// + /// @param[in] ranks + /// Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToBlocked(T (&items)[ITEMS_PER_THREAD], OffsetT (&ranks)[ITEMS_PER_THREAD]) + { + ScatterToBlocked(items, items, ranks); + } + + /// @param[in-out] items + /// Items to exchange, converting between **striped** and **blocked** arrangements. + /// @param[in] ranks + /// Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStriped(T (&items)[ITEMS_PER_THREAD], OffsetT (&ranks)[ITEMS_PER_THREAD]) + { + ScatterToStriped(items, items, ranks); + } + + /// @param[in-out] items + /// Items to exchange, converting between **striped** and **blocked** arrangements. + /// @param[in] ranks + /// Corresponding scatter ranks + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ScatterToStripedGuarded(T (&items)[ITEMS_PER_THREAD], OffsetT (&ranks)[ITEMS_PER_THREAD]) + { + ScatterToStripedGuarded(items, items, ranks); + } + + /// @param[in-out] items + /// Items to exchange, converting between **striped** and **blocked** arrangements. + /// @param[in] ranks + /// Corresponding scatter ranks + /// @param[in] is_valid + /// Corresponding flag denoting item validity + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStripedFlagged( + T (&items)[ITEMS_PER_THREAD], OffsetT (&ranks)[ITEMS_PER_THREAD], ValidFlag (&is_valid)[ITEMS_PER_THREAD]) + { + ScatterToStriped(items, items, ranks, is_valid); + } + +#endif // _CCCL_DOXYGEN_INVOKED +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_histogram.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_histogram.cuh new file mode 100644 index 0000000000000000000000000000000000000000..41abbd588b3dde8d60d943cb1a0a42948bfe34a1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_histogram.cuh @@ -0,0 +1,423 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * The cub::BlockHistogram class provides [collective](../index.html#sec0) methods for + * constructing block-wide histograms from data samples partitioned across a CUDA thread block. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @brief BlockHistogramAlgorithm enumerates alternative algorithms for the parallel construction of +//! block-wide histograms. +enum BlockHistogramAlgorithm +{ + + //! @rst + //! + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! Sorting followed by differentiation. Execution is comprised of two phases: + //! + //! #. Sort the data using efficient radix sort + //! #. Look for "runs" of same-valued keys by detecting discontinuities; the run-lengths are histogram bin counts. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! Delivers consistent throughput regardless of sample bin distribution. + //! + //! @endrst + BLOCK_HISTO_SORT, + + //! @rst + //! + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! Use atomic addition to update byte counts directly + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! Performance is strongly tied to the hardware implementation of atomic + //! addition, and may be significantly degraded for non uniformly-random + //! input distributions where many concurrent updates are likely to be + //! made to the same bin counter. + //! + //! @endrst + BLOCK_HISTO_ATOMIC, +}; + +//! @rst +//! The BlockHistogram class provides :ref:`collective ` methods for +//! constructing block-wide histograms from data samples partitioned across a CUDA thread block. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - A `histogram `_ counts the number of observations that fall into +//! each of the disjoint categories (known as *bins*). +//! - The ``T`` type must be implicitly castable to an integer type. +//! - BlockHistogram expects each integral ``input[i]`` value to satisfy +//! ``0 <= input[i] < BINS``. Values outside of this range result in undefined behavior. +//! - BlockHistogram can be optionally specialized to use different algorithms: +//! +//! #. :cpp:enumerator:`cub::BLOCK_HISTO_SORT`: Sorting followed by differentiation. +//! #. :cpp:enumerator:`cub::BLOCK_HISTO_ATOMIC`: Use atomic addition to update byte counts directly. +//! +//! A Simple Example +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @blockcollective{BlockHistogram} +//! +//! The code snippet below illustrates a 256-bin histogram of 512 integer samples that +//! are partitioned across 128 threads where each thread owns 4 samples. +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each +//! using BlockHistogram = cub::BlockHistogram; +//! +//! // Allocate shared memory for BlockHistogram +//! __shared__ typename BlockHistogram::TempStorage temp_storage; +//! +//! // Allocate shared memory for block-wide histogram bin counts +//! __shared__ unsigned int smem_histogram[256]; +//! +//! // Obtain input samples per thread +//! unsigned char data[4]; +//! ... +//! +//! // Compute the block-wide histogram +//! BlockHistogram(temp_storage).Histogram(data, smem_histogram); +//! +//! Performance and Usage Considerations +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - @granularity +//! - All input values must fall between ``[0, BINS)``, or behavior is undefined. +//! - The histogram output can be constructed in shared or device-accessible memory +//! - See ``cub::BlockHistogramAlgorithm`` for performance details regarding algorithmic alternatives +//! +//! Re-using dynamically allocating shared memory +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of dynamically shared memory with +//! BlockReduce and how to re-purpose the same memory region. This example can be easily adapted to the storage +//! required by BlockHistogram. +//! @endrst +//! +//! @tparam T +//! The sample type being histogrammed (must be castable to an integer bin identifier) +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam ITEMS_PER_THREAD +//! The number of items per thread +//! +//! @tparam BINS +//! The number bins within the histogram +//! +//! @tparam ALGORITHM +//! **[optional]** cub::BlockHistogramAlgorithm enumerator specifying the underlying algorithm to use +//! (default: cub::BLOCK_HISTO_SORT) +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused. +template +class BlockHistogram +{ +private: + /// Constants + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + /// Internal specialization. + using InternalBlockHistogram = + ::cuda::std::_If, + detail::BlockHistogramAtomic>; + + /// Shared memory storage layout type for BlockHistogram + using _TempStorage = typename InternalBlockHistogram::TempStorage; + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + +public: + /// @smemstorage{BlockHistogram} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE BlockHistogram() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Collective constructor using the specified memory allocation as temporary storage. + * + * @param[in] temp_storage + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockHistogram(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Histogram operations + //! @{ + + //! @rst + //! Initialize the shared histogram counters to zero. + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a the initialization and update of a + //! histogram of 512 integer samples that are partitioned across 128 threads + //! where each thread owns 4 samples. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each + //! using BlockHistogram = cub::BlockHistogram; + //! + //! // Allocate shared memory for BlockHistogram + //! __shared__ typename BlockHistogram::TempStorage temp_storage; + //! + //! // Allocate shared memory for block-wide histogram bin counts + //! __shared__ unsigned int smem_histogram[256]; + //! + //! // Obtain input samples per thread + //! unsigned char thread_samples[4]; + //! ... + //! + //! // Initialize the block-wide histogram + //! BlockHistogram(temp_storage).InitHistogram(smem_histogram); + //! + //! // Update the block-wide histogram + //! BlockHistogram(temp_storage).Composite(thread_samples, smem_histogram); + //! + //! @endrst + //! + //! @tparam CounterT + //! **[inferred]** Histogram counter type + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InitHistogram(CounterT histogram[BINS]) + { + // Initialize histogram bin counts to zeros + int histo_offset = 0; + +#pragma unroll + for (; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS) + { + histogram[histo_offset + linear_tid] = 0; + } + // Finish up with guarded initialization if necessary + if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS)) + { + histogram[histo_offset + linear_tid] = 0; + } + } + + //! @rst + //! Constructs a block-wide histogram in shared/device-accessible memory. + //! Each thread contributes an array of input elements. + //! + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a 256-bin histogram of 512 integer samples that + //! are partitioned across 128 threads where each thread owns 4 samples. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each + //! using BlockHistogram = cub::BlockHistogram; + //! + //! // Allocate shared memory for BlockHistogram + //! __shared__ typename BlockHistogram::TempStorage temp_storage; + //! + //! // Allocate shared memory for block-wide histogram bin counts + //! __shared__ unsigned int smem_histogram[256]; + //! + //! // Obtain input samples per thread + //! unsigned char thread_samples[4]; + //! ... + //! + //! // Compute the block-wide histogram + //! BlockHistogram(temp_storage).Histogram(thread_samples, smem_histogram); + //! + //! @endrst + //! + //! @tparam CounterT + //! **[inferred]** Histogram counter type + //! + //! @param[in] items + //! Calling thread's input values to histogram + //! + //! @param[out] histogram + //! Reference to shared/device-accessible memory histogram + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Histogram(T (&items)[ITEMS_PER_THREAD], CounterT histogram[BINS]) + { + // Initialize histogram bin counts to zeros + InitHistogram(histogram); + + __syncthreads(); + + // Composite the histogram + InternalBlockHistogram(temp_storage).Composite(items, histogram); + } + + //! @rst + //! Updates an existing block-wide histogram in shared/device-accessible memory. + //! Each thread composites an array of input elements. + //! + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a the initialization and update of a + //! histogram of 512 integer samples that are partitioned across 128 threads + //! where each thread owns 4 samples. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize a 256-bin BlockHistogram type for a 1D block of 128 threads having 4 character samples each + //! using BlockHistogram = cub::BlockHistogram; + //! + //! // Allocate shared memory for BlockHistogram + //! __shared__ typename BlockHistogram::TempStorage temp_storage; + //! + //! // Allocate shared memory for block-wide histogram bin counts + //! __shared__ unsigned int smem_histogram[256]; + //! + //! // Obtain input samples per thread + //! unsigned char thread_samples[4]; + //! ... + //! + //! // Initialize the block-wide histogram + //! BlockHistogram(temp_storage).InitHistogram(smem_histogram); + //! + //! // Update the block-wide histogram + //! BlockHistogram(temp_storage).Composite(thread_samples, smem_histogram); + //! + //! @endrst + //! + //! @tparam CounterT + //! **[inferred]** Histogram counter type + //! + //! @param[in] items + //! Calling thread's input values to histogram + //! + //! @param[out] histogram + //! Reference to shared/device-accessible memory histogram + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Composite(T (&items)[ITEMS_PER_THREAD], CounterT histogram[BINS]) + { + InternalBlockHistogram(temp_storage).Composite(items, histogram); + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_load.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_load.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c1e9b95ac56eb22df2d3a3276b458c103ab6b093 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_load.cuh @@ -0,0 +1,1243 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! block_load.cuh Operations for reading linear tiles of data into the CUDA thread block. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @name Blocked arrangement I/O (direct) +//! @{ + +//! @rst +//! Load a linear segment of items into a blocked arrangement across the thread block. +//! +//! @blocked +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **[inferred]** The random-access iterator type for input iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D +//! thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base input iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +LoadDirectBlocked(int linear_tid, RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) +{ +// Load directly in thread-blocked order +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + dst_items[i] = block_src_it[linear_tid * ITEMS_PER_THREAD + i]; + } +} + +//! @rst +//! Load a linear segment of items into a blocked arrangement across the thread block, guarded by range. +//! +//! @blocked +//! +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **[inferred]** The random-access iterator type for input iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D +//! thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +//! +//! @param[in] block_items_end +//! First out-of-bounds index when loading from block_src_it +template +_CCCL_DEVICE _CCCL_FORCEINLINE void LoadDirectBlocked( + int linear_tid, RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) +{ +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + const auto src_pos = linear_tid * ITEMS_PER_THREAD + i; + if (src_pos < block_items_end) + { + dst_items[i] = block_src_it[src_pos]; + } + } +} + +//! @rst +//! Load a linear segment of items into a blocked arrangement across the thread block, guarded +//! by range, with a fall-back assignment of out-of-bound elements. +//! +//! @blocked +//! +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **[inferred]** The random-access iterator type for input \iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D +//! thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base input iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +//! +//! @param[in] block_items_end +//! First out-of-bounds index when loading from block_src_it +//! +//! @param[in] oob_default +//! Default value to assign out-of-bound items +template +_CCCL_DEVICE _CCCL_FORCEINLINE void LoadDirectBlocked( + int linear_tid, + RandomAccessIterator block_src_it, + T (&dst_items)[ITEMS_PER_THREAD], + int block_items_end, + DefaultT oob_default) +{ +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + dst_items[i] = oob_default; + } + + LoadDirectBlocked(linear_tid, block_src_it, dst_items, block_items_end); +} + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + +//! @brief Internal implementation for load vectorization +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D +//! thread blocks) +//! +//! @param[in] block_src_ptr +//! Input pointer for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +InternalLoadDirectBlockedVectorized(int linear_tid, const T* block_src_ptr, T (&dst_items)[ITEMS_PER_THREAD]) +{ + // Find biggest memory access word that T is a whole multiple of + using device_word_t = typename UnitWord::DeviceWord; + _CCCL_DIAG_PUSH +# if _CCCL_COMPILER(CLANG, >=, 10) + _CCCL_DIAG_SUPPRESS_CLANG("-Wsizeof-array-div") +# endif // _CCCL_COMPILER(CLANG, >=, 10) + constexpr int total_words = static_cast(sizeof(dst_items) / sizeof(device_word_t)); + _CCCL_DIAG_POP + constexpr int vector_size = (total_words % 4 == 0) ? 4 : (total_words % 2 == 0) ? 2 : 1; + constexpr int vectors_per_thread = total_words / vector_size; + using vector_t = typename CubVector::Type; + + // Load into an array of vectors in thread-blocked order + vector_t vec_items[vectors_per_thread]; + const vector_t* vec_ptr = reinterpret_cast(block_src_ptr) + linear_tid * vectors_per_thread; +# pragma unroll + for (int i = 0; i < vectors_per_thread; i++) + { + vec_items[i] = ThreadLoad(vec_ptr + i); + } + +// Copy to destination +# pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + dst_items[i] = *(reinterpret_cast(vec_items) + i); + } +} + +#endif // _CCCL_DOXYGEN_INVOKED + +//! @rst +//! Load a linear segment of items into a blocked arrangement across the thread block. +//! +//! @blocked +//! +//! The input offset (``block_ptr + block_offset``) must be quad-item aligned +//! +//! The following conditions will prevent vectorization and loading will fall back to cub::BLOCK_LOAD_DIRECT: +//! +//! - ``ITEMS_PER_THREAD`` is odd +//! - The data type ``T`` is not a built-in primitive or CUDA vector type +//! (e.g., ``short``, ``int2``, ``double``, ``float2``, etc.) +//! +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + +//! linear_tid` for 2D thread blocks) +//! +//! @param[in] block_src_ptr +//! The thread block's base pointer for loading from +//! +//! @param[out] dst_items +//! destination to load data into +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +LoadDirectBlockedVectorized(int linear_tid, T* block_src_ptr, T (&dst_items)[ITEMS_PER_THREAD]) +{ + InternalLoadDirectBlockedVectorized(linear_tid, block_src_ptr, dst_items); +} + +//! @} end member group +//! @name Striped arrangement I/O (direct) +//! @{ + +//! @rst +//! Load a linear segment of items into a striped arrangement across the thread block. +//! +//! @striped +//! +//! @endrst +//! +//! @tparam BLOCK_THREADS +//! The thread block size in threads +//! +//! @tparam T +//! **[inferred]** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **[inferred]** The random-access iterator type for input iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D +//! thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +LoadDirectStriped(int linear_tid, RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) +{ +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + dst_items[i] = block_src_it[linear_tid + i * BLOCK_THREADS]; + } +} + +namespace detail +{ +template +_CCCL_DEVICE _CCCL_FORCEINLINE void load_transform_direct_striped( + int linear_tid, RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], TransformOpT transform_op) +{ +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + dst_items[i] = transform_op(block_src_it[linear_tid + i * BLOCK_THREADS]); + } +} +} // namespace detail + +//! @rst +//! Load a linear segment of items into a striped arrangement across the thread block, guarded by range +//! +//! @striped +//! +//! @endrst +//! +//! @tparam BLOCK_THREADS +//! The thread block size in threads +//! +//! @tparam T +//! **inferred** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **inferred** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **inferred** The random-access iterator type for input iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., (threadIdx.y * blockDim.x) + +//! linear_tid for 2D thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +//! +//! @param[in] block_items_end +//! Number of valid items to load +template +_CCCL_DEVICE _CCCL_FORCEINLINE void LoadDirectStriped( + int linear_tid, RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) +{ +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + const auto src_pos = linear_tid + i * BLOCK_THREADS; + if (src_pos < block_items_end) + { + dst_items[i] = block_src_it[src_pos]; + } + } +} + +//! @rst +//! Load a linear segment of items into a striped arrangement across the thread block, guarded +//! by range, with a fall-back assignment of out-of-bound elements. +//! +//! @striped +//! +//! @endrst +//! +//! @tparam BLOCK_THREADS +//! The thread block size in threads +//! +//! @tparam T +//! **inferred** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **inferred** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **inferred** The random-access iterator type for input \iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + +//! linear_tid` for 2D thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +//! +//! @param[in] block_items_end +//! Number of valid items to load +//! +//! @param[in] oob_default +//! Default value to assign out-of-bound items +template +_CCCL_DEVICE _CCCL_FORCEINLINE void LoadDirectStriped( + int linear_tid, + RandomAccessIterator block_src_it, + T (&dst_items)[ITEMS_PER_THREAD], + int block_items_end, + DefaultT oob_default) +{ +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + dst_items[i] = oob_default; + } + + LoadDirectStriped(linear_tid, block_src_it, dst_items, block_items_end); +} + +//! @} end member group +//! @name Warp-striped arrangement I/O (direct) +//! @{ + +//! @rst +//! Load a linear segment of items into a warp-striped arrangement across the thread block. +//! +//! @warpstriped +//! +//! Usage Considerations +//! ++++++++++++++++++++ +//! +//! The number of threads in the thread block must be a multiple of the architecture's warp size. +//! +//! @endrst +//! +//! @tparam T +//! **inferred** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **inferred** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **inferred** The random-access iterator type for input iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + +//! linear_tid` for 2D thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +LoadDirectWarpStriped(int linear_tid, RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) +{ + const int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); + const int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; + const int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; + +// Load directly in warp-striped order +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + new (&dst_items[i]) T(block_src_it[warp_offset + tid + (i * CUB_PTX_WARP_THREADS)]); + } +} + +//! @rst +//! Load a linear segment of items into a warp-striped arrangement across the thread block, guarded by range +//! +//! @warpstriped +//! +//! Usage Considerations +//! ++++++++++++++++++++ +//! +//! The number of threads in the thread block must be a multiple of the architecture's warp size. +//! +//! @endrst +//! +//! @tparam T +//! **inferred** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **inferred** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **inferred** The random-access iterator type for input \iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + +//! linear_tid` for 2D thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +//! +//! @param[in] block_items_end +//! Number of valid items to load +template +_CCCL_DEVICE _CCCL_FORCEINLINE void LoadDirectWarpStriped( + int linear_tid, RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) +{ + const int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); + const int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; + const int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; + +// Load directly in warp-striped order +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + const auto src_pos = warp_offset + tid + (i * CUB_PTX_WARP_THREADS); + if (src_pos < block_items_end) + { + new (&dst_items[i]) T(block_src_it[src_pos]); + } + } +} + +//! @rst +//! Load a linear segment of items into a warp-striped arrangement across the thread block, +//! guarded by range, with a fall-back assignment of out-of-bound elements. +//! +//! @warpstriped +//! +//! @endrst +//! +//! Usage Considerations +//! ++++++++++++++++++++ +//! +//! The number of threads in the thread block must be a multiple of the architecture's warp size. +//! +//! @tparam T +//! **inferred** The data type to load. +//! +//! @tparam ITEMS_PER_THREAD +//! **inferred** The number of consecutive items partitioned onto each thread. +//! +//! @tparam RandomAccessIterator +//! **inferred** The random-access iterator type for input \iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread (e.g., `(threadIdx.y * blockDim.x) + +//! linear_tid` for 2D thread blocks) +//! +//! @param[in] block_src_it +//! The thread block's base iterator for loading from +//! +//! @param[out] dst_items +//! Destination to load data into +//! +//! @param[in] block_items_end +//! Number of valid items to load +//! +//! @param[in] oob_default +//! Default value to assign out-of-bound items +template +_CCCL_DEVICE _CCCL_FORCEINLINE void LoadDirectWarpStriped( + int linear_tid, + RandomAccessIterator block_src_it, + T (&dst_items)[ITEMS_PER_THREAD], + int block_items_end, + DefaultT oob_default) +{ +// Load directly in warp-striped order +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; i++) + { + dst_items[i] = oob_default; + } + + LoadDirectWarpStriped(linear_tid, block_src_it, dst_items, block_items_end); +} + +//! @} end member group + +//! @brief cub::BlockLoadAlgorithm enumerates alternative algorithms for cub::BlockLoad to read a linear segment of data +//! from memory into a blocked arrangement across a CUDA thread block. +enum BlockLoadAlgorithm +{ + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` of data is read directly from memory. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! The utilization of memory transactions (coalescing) decreases as the access stride between threads increases + //! (i.e., the number items per thread). + //! @endrst + BLOCK_LOAD_DIRECT, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`striped arrangement ` of data is read directly from memory. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! The utilization of memory transactions (coalescing) doesn't depend on the number of items per thread. + //! + //! @endrst + BLOCK_LOAD_STRIPED, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` of data is read from memory using CUDA's built-in + //! vectorized loads as a coalescing optimization. For example, ``ld.global.v4.s32`` instructions will be generated + //! when ``T = int`` and ``ITEMS_PER_THREAD % 4 == 0``. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high until the the access stride between threads + //! (i.e., the number items per thread) exceeds the maximum vector load width (typically 4 items or 64B, whichever + //! is lower). + //! - The following conditions will prevent vectorization and loading will fall back to cub::BLOCK_LOAD_DIRECT: + //! + //! - ``ITEMS_PER_THREAD`` is odd + //! - The ``RandomAccessIterator`` is not a simple pointer type + //! - The block input offset is not quadword-aligned + //! - The data type ``T`` is not a built-in primitive or CUDA vector type + //! (e.g., ``short``, ``int2``, ``double``, ``float2``, etc.) + //! + //! @endrst + BLOCK_LOAD_VECTORIZE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`striped arrangement ` of data is read efficiently from memory and then locally + //! transposed into a :ref:`blocked arrangement `. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high regardless of items loaded per thread. + //! - The local reordering incurs slightly longer latencies and throughput than the direct cub::BLOCK_LOAD_DIRECT and + //! cub::BLOCK_LOAD_VECTORIZE alternatives. + //! + //! @endrst + BLOCK_LOAD_TRANSPOSE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`warp-striped arrangement ` of data is read efficiently from memory and then + //! locally transposed into a :ref:`blocked arrangement `. + //! + //! Usage Considerations + //! ++++++++++++++++++++++++++ + //! + //! - BLOCK_THREADS must be a multiple of WARP_THREADS + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high regardless of items loaded per thread. + //! - The local reordering incurs slightly larger latencies than the direct cub::BLOCK_LOAD_DIRECT and + //! cub::BLOCK_LOAD_VECTORIZE alternatives. + //! - Provisions more shared storage, but incurs smaller latencies than the + //! BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED alternative. + //! + //! @endrst + BLOCK_LOAD_WARP_TRANSPOSE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! Like ``BLOCK_LOAD_WARP_TRANSPOSE``, a :ref:`warp-striped arrangement ` of data is read + //! directly from memory and then is locally transposed into a :ref:`blocked arrangement `. + //! To reduce the shared memory requirement, only one warp's worth of shared memory is provisioned and is subsequently + //! time-sliced among warps. + //! + //! Usage Considerations + //! ++++++++++++++++++++++++++ + //! + //! - BLOCK_THREADS must be a multiple of WARP_THREADS + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high regardless of items loaded per thread. + //! - Provisions less shared memory temporary storage, but incurs larger latencies than the BLOCK_LOAD_WARP_TRANSPOSE + //! alternative. + //! + //! @endrst + BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, +}; + +//! @rst +//! The BlockLoad class provides :ref:`collective ` data movement methods for loading a linear +//! segment of items from memory into a :ref:`blocked arrangement ` across a CUDA thread +//! block. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - The BlockLoad class provides a single data movement abstraction that can be specialized to implement different +//! cub::BlockLoadAlgorithm strategies. This facilitates different performance policies for different architectures, +//! data types, granularity sizes, etc. +//! - BlockLoad can be optionally specialized by different data movement strategies: +//! +//! #. :cpp:enumerator:`cub::BLOCK_LOAD_DIRECT`: +//! A :ref:`blocked arrangement ` of data is read directly from memory. +//! #. :cpp:enumerator:`cub::BLOCK_LOAD_STRIPED`: +//! A :ref:`striped arrangement ` of data is read directly from memory. +//! #. :cpp:enumerator:`cub::BLOCK_LOAD_VECTORIZE`: +//! A :ref:`blocked arrangement ` of data is read directly from memory +//! using CUDA's built-in vectorized loads as a coalescing optimization. +//! #. :cpp:enumerator:`cub::BLOCK_LOAD_TRANSPOSE`: +//! A :ref:`striped arrangement ` of data is read directly from memory and is then +//! locally transposed into a :ref:`blocked arrangement `. +//! #. :cpp:enumerator:`cub::BLOCK_LOAD_WARP_TRANSPOSE`: +//! A :ref:`warp-striped arrangement ` of data is read directly from memory and is then +//! locally transposed into a :ref:`blocked arrangement `. +//! #. :cpp:enumerator:`cub::BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED`: +//! A :ref:`warp-striped arrangement ` of data is read directly from memory and is then +//! locally transposed into a :ref:`blocked arrangement ` one warp at a time. +//! +//! - @rowmajor +//! +//! A Simple Example +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @blockcollective{BlockLoad} +//! +//! The code snippet below illustrates the loading of a linear segment of 512 integers into a "blocked" arrangement +//! across 128 threads where each thread owns 4 consecutive items. The load is specialized for +//! ``BLOCK_LOAD_WARP_TRANSPOSE``, meaning memory references are efficiently coalesced using a warp-striped access +//! pattern (after which items are locally reordered among threads). +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(int *d_data, ...) +//! { +//! // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each +//! using BlockLoad = cub::BlockLoad; +//! +//! // Allocate shared memory for BlockLoad +//! __shared__ typename BlockLoad::TempStorage temp_storage; +//! +//! // Load a segment of consecutive items that are blocked across threads +//! int thread_data[4]; +//! BlockLoad(temp_storage).Load(d_data, thread_data); +//! +//! Suppose the input ``d_data`` is ``0, 1, 2, 3, 4, 5, ...``. The set of ``thread_data`` across the block of threads in +//! those threads will be ``{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }``. +//! +//! Re-using dynamically allocating shared memory +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of dynamically shared memory with +//! BlockReduce and how to re-purpose the same memory region. This example can be easily adapted to the storage required +//! by BlockLoad. +//! +//! @endrst +//! +//! @tparam T +// The data type to read into (which must be convertible from the input iterator's value type). +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam ITEMS_PER_THREAD +//! The number of consecutive items partitioned onto each thread. +//! +//! @tparam ALGORITHM +//! **[optional]** cub::BlockLoadAlgorithm tuning policy. default: ``cub::BLOCK_LOAD_DIRECT``. +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused. +template +class BlockLoad +{ + static constexpr int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; // total threads in the block + + template + struct LoadInternal; // helper to dispatch the load algorithm + + template + struct LoadInternal + { + using TempStorage = NullType; + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) + { + LoadDirectBlocked(linear_tid, block_src_it, dst_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) + { + LoadDirectBlocked(linear_tid, block_src_it, dst_items, block_items_end); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end, DefaultT oob_default) + { + LoadDirectBlocked(linear_tid, block_src_it, dst_items, block_items_end, oob_default); + } + }; + + template + struct LoadInternal + { + using TempStorage = NullType; + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) + { + LoadDirectStriped(linear_tid, block_src_it, dst_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) + { + LoadDirectStriped(linear_tid, block_src_it, dst_items, block_items_end); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end, DefaultT oob_default) + { + LoadDirectStriped(linear_tid, block_src_it, dst_items, block_items_end, oob_default); + } + }; + + template + struct LoadInternal + { + using TempStorage = NullType; + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + // attempts vectorization (pointer) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(const T* block_ptr, T (&dst_items)[ITEMS_PER_THREAD]) + { + InternalLoadDirectBlockedVectorized(linear_tid, block_ptr, dst_items); + } + + // any other iterator, no vectorization + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) + { + LoadDirectBlocked(linear_tid, block_src_it, dst_items); + } + + // attempts vectorization (cache modified iterator) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(CacheModifiedInputIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) + { + InternalLoadDirectBlockedVectorized(linear_tid, block_src_it.ptr, dst_items); + } + + // skips vectorization + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) + { + LoadDirectBlocked(linear_tid, block_src_it, dst_items, block_items_end); + } + + // skips vectorization + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end, DefaultT oob_default) + { + LoadDirectBlocked(linear_tid, block_src_it, dst_items, block_items_end, oob_default); + } + }; + + template + struct LoadInternal + { + using BlockExchange = BlockExchange; + using _TempStorage = typename BlockExchange::TempStorage; + using TempStorage = Uninitialized<_TempStorage>; + + _TempStorage& temp_storage; + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& temp_storage, int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) + { + LoadDirectStriped(linear_tid, block_src_it, dst_items); + BlockExchange(temp_storage).StripedToBlocked(dst_items, dst_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) + { + LoadDirectStriped(linear_tid, block_src_it, dst_items, block_items_end); + BlockExchange(temp_storage).StripedToBlocked(dst_items, dst_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end, DefaultT oob_default) + { + LoadDirectStriped(linear_tid, block_src_it, dst_items, block_items_end, oob_default); + BlockExchange(temp_storage).StripedToBlocked(dst_items, dst_items); + } + }; + + template + struct LoadInternal + { + static constexpr int WARP_THREADS = CUB_WARP_THREADS(0); + static_assert(BLOCK_THREADS % WARP_THREADS == 0, "BLOCK_THREADS must be a multiple of WARP_THREADS"); + + using BlockExchange = BlockExchange; + using _TempStorage = typename BlockExchange::TempStorage; + using TempStorage = Uninitialized<_TempStorage>; + + _TempStorage& temp_storage; + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& temp_storage, int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) + { + LoadDirectWarpStriped(linear_tid, block_src_it, dst_items); + BlockExchange(temp_storage).WarpStripedToBlocked(dst_items, dst_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) + { + LoadDirectWarpStriped(linear_tid, block_src_it, dst_items, block_items_end); + BlockExchange(temp_storage).WarpStripedToBlocked(dst_items, dst_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end, DefaultT oob_default) + { + LoadDirectWarpStriped(linear_tid, block_src_it, dst_items, block_items_end, oob_default); + BlockExchange(temp_storage).WarpStripedToBlocked(dst_items, dst_items); + } + }; + + template + struct LoadInternal + { + static constexpr int WARP_THREADS = CUB_WARP_THREADS(0); + static_assert(BLOCK_THREADS % WARP_THREADS == 0, "BLOCK_THREADS must be a multiple of WARP_THREADS"); + + using BlockExchange = BlockExchange; + using _TempStorage = typename BlockExchange::TempStorage; + using TempStorage = Uninitialized<_TempStorage>; + + _TempStorage& temp_storage; + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& temp_storage, int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) + { + LoadDirectWarpStriped(linear_tid, block_src_it, dst_items); + BlockExchange(temp_storage).WarpStripedToBlocked(dst_items, dst_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) + { + LoadDirectWarpStriped(linear_tid, block_src_it, dst_items, block_items_end); + BlockExchange(temp_storage).WarpStripedToBlocked(dst_items, dst_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end, DefaultT oob_default) + { + LoadDirectWarpStriped(linear_tid, block_src_it, dst_items, block_items_end, oob_default); + BlockExchange(temp_storage).WarpStripedToBlocked(dst_items, dst_items); + } + }; + + using InternalLoad = LoadInternal; // load implementation to use + using _TempStorage = typename InternalLoad::TempStorage; + + // Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + _TempStorage& temp_storage; + int linear_tid; + +public: + /// @smemstorage{BlockLoad} + using TempStorage = Uninitialized<_TempStorage>; + + //! @name Collective constructors + //! @{ + + /// @brief Collective constructor using a private static allocation of shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE BlockLoad() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /// @brief Collective constructor using the specified memory allocation as temporary storage. + /// @param[in] temp_storage Reference to memory allocation having layout type TempStorage + _CCCL_DEVICE _CCCL_FORCEINLINE BlockLoad(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Data movement + //! @{ + + //! @rst + //! Load a linear segment of items from memory. + //! + //! - @blocked + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the loading of a linear segment of 512 integers into a "blocked" arrangement + //! across 128 threads where each thread owns 4 consecutive items. The load is specialized for + //! ``BLOCK_LOAD_WARP_TRANSPOSE``, meaning memory references are efficiently coalesced using a warp-striped access + //! pattern (after which items are locally reordered among threads). + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, ...) + //! { + //! // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each + //! using BlockLoad = cub::BlockLoad; + //! + //! // Allocate shared memory for BlockLoad + //! __shared__ typename BlockLoad::TempStorage temp_storage; + //! + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! BlockLoad(temp_storage).Load(d_data, thread_data); + //! + //! Suppose the input ``d_data`` is ``0, 1, 2, 3, 4, 5, ...``. The set of ``thread_data`` across the block of threads + //! in those threads will be ``{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }``. + //! + //! @endrst + //! + //! @param[in] block_src_it + //! The thread block's base iterator for loading from + //! + //! @param[out] dst_items + //! Destination to load data into + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD]) + { + InternalLoad(temp_storage, linear_tid).Load(block_src_it, dst_items); + } + + //! @rst + //! + //! Load a linear segment of items from memory, guarded by range. + //! + //! - @blocked + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the guarded loading of a linear segment of 512 integers into a "blocked" + //! arrangement across 128 threads where each thread owns 4 consecutive items. The load is specialized for + //! ``BLOCK_LOAD_WARP_TRANSPOSE``, meaning memory references are efficiently coalesced using a warp-striped access + //! pattern (after which items are locally reordered among threads). + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, int block_items_end, ...) + //! { + //! // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each + //! using BlockLoad = cub::BlockLoad; + //! + //! // Allocate shared memory for BlockLoad + //! __shared__ typename BlockLoad::TempStorage temp_storage; + //! + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! BlockLoad(temp_storage).Load(d_data, thread_data, block_items_end); + //! + //! Suppose the input ``d_data`` is ``0, 1, 2, 3, 4, 5, 6...`` and ``block_items_end`` is ``5``. The set of + //! ``thread_data`` across the block of threads in those threads will be ``{ [0,1,2,3], [4,?,?,?], ..., [?,?,?,?] }``, + //! with only the first two threads being unmasked to load portions of valid data (and other items remaining + //! unassigned). + //! + //! @endrst + //! + //! @param[in] block_src_it + //! The thread block's base iterator for loading from + //! + //! @param[out] dst_items + //! Destination to load data into + //! + //! @param[in] block_items_end + //! Number of valid items to load + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end) + { + InternalLoad(temp_storage, linear_tid).Load(block_src_it, dst_items, block_items_end); + } + + //! @rst + //! Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements + //! + //! - @blocked + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the guarded loading of a linear segment of 512 integers into a "blocked" + //! arrangement across 128 threads where each thread owns 4 consecutive items. The load is specialized for + //! ``BLOCK_LOAD_WARP_TRANSPOSE``, meaning memory references are efficiently coalesced using a warp-striped access + //! pattern (after which items are locally reordered among threads). + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, int block_items_end, ...) + //! { + //! // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each + //! using BlockLoad = cub::BlockLoad; + //! + //! // Allocate shared memory for BlockLoad + //! __shared__ typename BlockLoad::TempStorage temp_storage; + //! + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! BlockLoad(temp_storage).Load(d_data, thread_data, block_items_end, -1); + //! + //! Suppose the input ``d_data`` is ``0, 1, 2, 3, 4, 5, 6...``, ``block_items_end`` is ``5``, and the out-of-bounds + //! default is ``-1``. The set of ``thread_data`` across the block of threads in those threads will be + //! ``{ [0,1,2,3], [4,-1,-1,-1], ..., [-1,-1,-1,-1] }``, with only the first two threads being unmasked to load + //! portions of valid data (and other items are assigned ``-1``) + //! + //! @endrst + //! + //! @param[in] block_src_it + //! The thread block's base iterator for loading from + //! + //! @param[out] dst_items + //! Destination to load data into + //! + //! @param[in] block_items_end + //! Number of valid items to load + //! + //! @param[in] oob_default + //! Default value to assign out-of-bound items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(RandomAccessIterator block_src_it, T (&dst_items)[ITEMS_PER_THREAD], int block_items_end, DefaultT oob_default) + { + InternalLoad(temp_storage, linear_tid).Load(block_src_it, dst_items, block_items_end, oob_default); + } + + //! @} end member group +}; + +template > +struct BlockLoadType +{ + using type = cub::BlockLoad; +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_merge_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_merge_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..3ade5eb16091d34c49e0289884ead53c30b2bd86 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_merge_sort.cuh @@ -0,0 +1,771 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include +#include +#include + +CUB_NAMESPACE_BEGIN + +// This implements the DiagonalIntersection algorithm from Merge-Path. Additional details can be found in: +// * S. Odeh, O. Green, Z. Mwassi, O. Shmueli, Y. Birk, "Merge Path - Parallel Merging Made Simple", Multithreaded +// Architectures and Applications (MTAAP) Workshop, IEEE 26th International Parallel & Distributed Processing +// Symposium (IPDPS), 2012 +// * S. Odeh, O. Green, Y. Birk, "Merge Path - A Visually Intuitive Approach to Parallel Merging", 2014, URL: +// https://arxiv.org/abs/1406.2628 +template +_CCCL_DEVICE _CCCL_FORCEINLINE OffsetT +MergePath(KeyIt1 keys1, KeyIt2 keys2, OffsetT keys1_count, OffsetT keys2_count, OffsetT diag, BinaryPred binary_pred) +{ + OffsetT keys1_begin = diag < keys2_count ? 0 : diag - keys2_count; + OffsetT keys1_end = (::cuda::std::min)(diag, keys1_count); + + while (keys1_begin < keys1_end) + { + const OffsetT mid = cub::MidPoint(keys1_begin, keys1_end); + // pull copies of the keys before calling binary_pred so proxy references are unwrapped + const detail::value_t key1 = keys1[mid]; + const detail::value_t key2 = keys2[diag - 1 - mid]; + if (binary_pred(key2, key1)) + { + keys1_end = mid; + } + else + { + keys1_begin = mid + 1; + } + } + return keys1_begin; +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void SerialMerge( + KeyIt keys_shared, + int keys1_beg, + int keys2_beg, + int keys1_count, + int keys2_count, + KeyT (&output)[ITEMS_PER_THREAD], + int (&indices)[ITEMS_PER_THREAD], + CompareOp compare_op) +{ + const int keys1_end = keys1_beg + keys1_count; + const int keys2_end = keys2_beg + keys2_count; + + KeyT key1 = keys_shared[keys1_beg]; + KeyT key2 = keys_shared[keys2_beg]; + +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; ++item) + { + const bool p = (keys2_beg < keys2_end) && ((keys1_beg >= keys1_end) || compare_op(key2, key1)); + output[item] = p ? key2 : key1; + indices[item] = p ? keys2_beg++ : keys1_beg++; + if (p) + { + key2 = keys_shared[keys2_beg]; + } + else + { + key1 = keys_shared[keys1_beg]; + } + } +} + +/** + * @brief Generalized merge sort algorithm + * + * This class is used to reduce code duplication. Warp and Block merge sort + * differ only in how they compute thread index and how they synchronize + * threads. Since synchronization might require access to custom data + * (like member mask), CRTP is used. + * + * @par + * The code snippet below illustrates the way this class can be used. + * @par + * @code + * #include // or equivalently + * + * constexpr int BLOCK_THREADS = 256; + * constexpr int ITEMS_PER_THREAD = 9; + * + * class BlockMergeSort : public BlockMergeSortStrategy + * { + * using BlockMergeSortStrategyT = + * BlockMergeSortStrategy; + * public: + * __device__ __forceinline__ explicit BlockMergeSort( + * typename BlockMergeSortStrategyT::TempStorage &temp_storage) + * : BlockMergeSortStrategyT(temp_storage, threadIdx.x) + * {} + * + * __device__ __forceinline__ void SyncImplementation() const + * { + * __syncthreads(); + * } + * }; + * @endcode + * + * @tparam KeyT + * KeyT type + * + * @tparam ValueT + * ValueT type. cub::NullType indicates a keys-only sort + * + * @tparam SynchronizationPolicy + * Provides a way of synchronizing threads. Should be derived from + * `BlockMergeSortStrategy`. + */ +template +class BlockMergeSortStrategy +{ + static_assert(PowerOfTwo::VALUE, "NUM_THREADS must be a power of two"); + +private: + static constexpr int ITEMS_PER_TILE = ITEMS_PER_THREAD * NUM_THREADS; + + // Whether or not there are values to be trucked along with keys + static constexpr bool KEYS_ONLY = ::cuda::std::is_same::value; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + /// Shared memory type required by this thread block + union _TempStorage + { + KeyT keys_shared[ITEMS_PER_TILE + 1]; + ValueT items_shared[ITEMS_PER_TILE + 1]; + }; // union TempStorage +#endif // _CCCL_DOXYGEN_INVOKED + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + const unsigned int linear_tid; + +public: + /// \smemstorage{BlockMergeSort} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + BlockMergeSortStrategy() = delete; + explicit _CCCL_DEVICE _CCCL_FORCEINLINE BlockMergeSortStrategy(unsigned int linear_tid) + : temp_storage(PrivateStorage()) + , linear_tid(linear_tid) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE BlockMergeSortStrategy(TempStorage& temp_storage, unsigned int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int get_linear_tid() const + { + return linear_tid; + } + + /** + * @brief Sorts items partitioned across a CUDA thread block using + * a merge sorting method. + * + * @par + * Sort is not guaranteed to be stable. That is, suppose that i and j are + * equivalent: neither one is less than the other. It is not guaranteed + * that the relative order of these two elements will be preserved by sort. + * + * @tparam CompareOp + * functor type having member `bool operator()(KeyT lhs, KeyT rhs)`. + * `CompareOp` is a model of [Strict Weak Ordering]. + * + * @param[in,out] keys + * Keys to sort + * + * @param[in] compare_op + * Comparison function object which returns true if the first argument is + * ordered before the second + * + * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Sort(KeyT (&keys)[ITEMS_PER_THREAD], CompareOp compare_op) + { + ValueT items[ITEMS_PER_THREAD]; + Sort(keys, items, compare_op, ITEMS_PER_TILE, keys[0]); + } + + /** + * @brief Sorts items partitioned across a CUDA thread block using + * a merge sorting method. + * + * @par + * - Sort is not guaranteed to be stable. That is, suppose that `i` and `j` + * are equivalent: neither one is less than the other. It is not guaranteed + * that the relative order of these two elements will be preserved by sort. + * - The value of `oob_default` is assigned to all elements that are out of + * `valid_items` boundaries. It's expected that `oob_default` is ordered + * after any value in the `valid_items` boundaries. The algorithm always + * sorts a fixed amount of elements, which is equal to + * `ITEMS_PER_THREAD * BLOCK_THREADS`. If there is a value that is ordered + * after `oob_default`, it won't be placed within `valid_items` boundaries. + * + * @tparam CompareOp + * functor type having member `bool operator()(KeyT lhs, KeyT rhs)`. + * `CompareOp` is a model of [Strict Weak Ordering]. + * + * @param[in,out] keys + * Keys to sort + * + * @param[in] compare_op + * Comparison function object which returns true if the first argument is + * ordered before the second + * + * @param[in] valid_items + * Number of valid items to sort + * + * @param[in] oob_default + * Default value to assign out-of-bound items + * + * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Sort(KeyT (&keys)[ITEMS_PER_THREAD], CompareOp compare_op, int valid_items, KeyT oob_default) + { + ValueT items[ITEMS_PER_THREAD]; + Sort(keys, items, compare_op, valid_items, oob_default); + } + + /** + * @brief Sorts items partitioned across a CUDA thread block using a merge sorting method. + * + * @par + * Sort is not guaranteed to be stable. That is, suppose that `i` and `j` are + * equivalent: neither one is less than the other. It is not guaranteed + * that the relative order of these two elements will be preserved by sort. + * + * @tparam CompareOp + * functor type having member `bool operator()(KeyT lhs, KeyT rhs)`. + * `CompareOp` is a model of [Strict Weak Ordering]. + * + * @param[in,out] keys + * Keys to sort + * + * @param[in,out] items + * Values to sort + * + * @param[in] compare_op + * Comparison function object which returns true if the first argument is + * ordered before the second + * + * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Sort(KeyT (&keys)[ITEMS_PER_THREAD], ValueT (&items)[ITEMS_PER_THREAD], CompareOp compare_op) + { + Sort(keys, items, compare_op, ITEMS_PER_TILE, keys[0]); + } + + /** + * @brief Sorts items partitioned across a CUDA thread block using + * a merge sorting method. + * + * @par + * - Sort is not guaranteed to be stable. That is, suppose that `i` and `j` + * are equivalent: neither one is less than the other. It is not guaranteed + * that the relative order of these two elements will be preserved by sort. + * - The value of `oob_default` is assigned to all elements that are out of + * `valid_items` boundaries. It's expected that `oob_default` is ordered + * after any value in the `valid_items` boundaries. The algorithm always + * sorts a fixed amount of elements, which is equal to + * `ITEMS_PER_THREAD * BLOCK_THREADS`. If there is a value that is ordered + * after `oob_default`, it won't be placed within `valid_items` boundaries. + * + * @tparam CompareOp + * functor type having member `bool operator()(KeyT lhs, KeyT rhs)` + * `CompareOp` is a model of [Strict Weak Ordering]. + * + * @tparam IS_LAST_TILE + * True if `valid_items` isn't equal to the `ITEMS_PER_TILE` + * + * @param[in,out] keys + * Keys to sort + * + * @param[in,out] items + * Values to sort + * + * @param[in] compare_op + * Comparison function object which returns true if the first argument is + * ordered before the second + * + * @param[in] valid_items + * Number of valid items to sort + * + * @param[in] oob_default + * Default value to assign out-of-bound items + * + * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Sort(KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&items)[ITEMS_PER_THREAD], + CompareOp compare_op, + int valid_items, + KeyT oob_default) + { + if (IS_LAST_TILE) + { + // if last tile, find valid max_key + // and fill the remaining keys with it + // + KeyT max_key = oob_default; + +#pragma unroll + for (int item = 1; item < ITEMS_PER_THREAD; ++item) + { + if (ITEMS_PER_THREAD * linear_tid + item < valid_items) + { + max_key = compare_op(max_key, keys[item]) ? keys[item] : max_key; + } + else + { + keys[item] = max_key; + } + } + } + + // if first element of thread is in input range, stable sort items + // + if (!IS_LAST_TILE || ITEMS_PER_THREAD * linear_tid < valid_items) + { + StableOddEvenSort(keys, items, compare_op); + } + + // each thread has sorted keys + // merge sort keys in shared memory + // + for (int target_merged_threads_number = 2; target_merged_threads_number <= NUM_THREADS; + target_merged_threads_number *= 2) + { + int merged_threads_number = target_merged_threads_number / 2; + int mask = target_merged_threads_number - 1; + + Sync(); + +// store keys in shmem +// +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; ++item) + { + int idx = ITEMS_PER_THREAD * linear_tid + item; + temp_storage.keys_shared[idx] = keys[item]; + } + + Sync(); + + int indices[ITEMS_PER_THREAD]; + + int first_thread_idx_in_thread_group_being_merged = ~mask & linear_tid; + int start = ITEMS_PER_THREAD * first_thread_idx_in_thread_group_being_merged; + int size = ITEMS_PER_THREAD * merged_threads_number; + + int thread_idx_in_thread_group_being_merged = mask & linear_tid; + + int diag = (::cuda::std::min)(valid_items, ITEMS_PER_THREAD * thread_idx_in_thread_group_being_merged); + + int keys1_beg = (::cuda::std::min)(valid_items, start); + int keys1_end = (::cuda::std::min)(valid_items, keys1_beg + size); + int keys2_beg = keys1_end; + int keys2_end = (::cuda::std::min)(valid_items, keys2_beg + size); + + int keys1_count = keys1_end - keys1_beg; + int keys2_count = keys2_end - keys2_beg; + + int partition_diag = MergePath( + &temp_storage.keys_shared[keys1_beg], + &temp_storage.keys_shared[keys2_beg], + keys1_count, + keys2_count, + diag, + compare_op); + + int keys1_beg_loc = keys1_beg + partition_diag; + int keys1_end_loc = keys1_end; + int keys2_beg_loc = keys2_beg + diag - partition_diag; + int keys2_end_loc = keys2_end; + int keys1_count_loc = keys1_end_loc - keys1_beg_loc; + int keys2_count_loc = keys2_end_loc - keys2_beg_loc; + SerialMerge( + &temp_storage.keys_shared[0], + keys1_beg_loc, + keys2_beg_loc, + keys1_count_loc, + keys2_count_loc, + keys, + indices, + compare_op); + + if (!KEYS_ONLY) + { + Sync(); + +// store keys in shmem +// +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; ++item) + { + int idx = ITEMS_PER_THREAD * linear_tid + item; + temp_storage.items_shared[idx] = items[item]; + } + + Sync(); + +// gather items from shmem +// +#pragma unroll + for (int item = 0; item < ITEMS_PER_THREAD; ++item) + { + items[item] = temp_storage.items_shared[indices[item]]; + } + } + } + } // func block_merge_sort + + /** + * @brief Sorts items partitioned across a CUDA thread block using + * a merge sorting method. + * + * @par + * StableSort is stable: it preserves the relative ordering of equivalent + * elements. That is, if `x` and `y` are elements such that `x` precedes `y`, + * and if the two elements are equivalent (neither `x < y` nor `y < x`) then + * a postcondition of StableSort is that `x` still precedes `y`. + * + * @tparam CompareOp + * functor type having member `bool operator()(KeyT lhs, KeyT rhs)`. + * `CompareOp` is a model of [Strict Weak Ordering]. + * + * @param[in,out] keys + * Keys to sort + * + * @param[in] compare_op + * Comparison function object which returns true if the first argument is + * ordered before the second + * + * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void StableSort(KeyT (&keys)[ITEMS_PER_THREAD], CompareOp compare_op) + { + Sort(keys, compare_op); + } + + /** + * @brief Sorts items partitioned across a CUDA thread block using + * a merge sorting method. + * + * @par + * StableSort is stable: it preserves the relative ordering of equivalent + * elements. That is, if `x` and `y` are elements such that `x` precedes `y`, + * and if the two elements are equivalent (neither `x < y` nor `y < x`) then + * a postcondition of StableSort is that `x` still precedes `y`. + * + * @tparam CompareOp + * functor type having member `bool operator()(KeyT lhs, KeyT rhs)`. + * `CompareOp` is a model of [Strict Weak Ordering]. + * + * @param[in,out] keys + * Keys to sort + * + * @param[in,out] items + * Values to sort + * + * @param[in] compare_op + * Comparison function object which returns true if the first argument is + * ordered before the second + * + * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + StableSort(KeyT (&keys)[ITEMS_PER_THREAD], ValueT (&items)[ITEMS_PER_THREAD], CompareOp compare_op) + { + Sort(keys, items, compare_op); + } + + /** + * @brief Sorts items partitioned across a CUDA thread block using + * a merge sorting method. + * + * @par + * - StableSort is stable: it preserves the relative ordering of equivalent + * elements. That is, if `x` and `y` are elements such that `x` precedes + * `y`, and if the two elements are equivalent (neither `x < y` nor `y < x`) + * then a postcondition of StableSort is that `x` still precedes `y`. + * - The value of `oob_default` is assigned to all elements that are out of + * `valid_items` boundaries. It's expected that `oob_default` is ordered + * after any value in the `valid_items` boundaries. The algorithm always + * sorts a fixed amount of elements, which is equal to + * `ITEMS_PER_THREAD * BLOCK_THREADS`. + * If there is a value that is ordered after `oob_default`, it won't be + * placed within `valid_items` boundaries. + * + * @tparam CompareOp + * functor type having member `bool operator()(KeyT lhs, KeyT rhs)`. + * `CompareOp` is a model of [Strict Weak Ordering]. + * + * @param[in,out] keys + * Keys to sort + * + * @param[in] compare_op + * Comparison function object which returns true if the first argument is + * ordered before the second + * + * @param[in] valid_items + * Number of valid items to sort + * + * @param[in] oob_default + * Default value to assign out-of-bound items + * + * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + StableSort(KeyT (&keys)[ITEMS_PER_THREAD], CompareOp compare_op, int valid_items, KeyT oob_default) + { + Sort(keys, compare_op, valid_items, oob_default); + } + + /** + * @brief Sorts items partitioned across a CUDA thread block using + * a merge sorting method. + * + * @par + * - StableSort is stable: it preserves the relative ordering of equivalent + * elements. That is, if `x` and `y` are elements such that `x` precedes + * `y`, and if the two elements are equivalent (neither `x < y` nor `y < x`) + * then a postcondition of StableSort is that `x` still precedes `y`. + * - The value of `oob_default` is assigned to all elements that are out of + * `valid_items` boundaries. It's expected that `oob_default` is ordered + * after any value in the `valid_items` boundaries. The algorithm always + * sorts a fixed amount of elements, which is equal to + * `ITEMS_PER_THREAD * BLOCK_THREADS`. If there is a value that is ordered + * after `oob_default`, it won't be placed within `valid_items` boundaries. + * + * @tparam CompareOp + * functor type having member `bool operator()(KeyT lhs, KeyT rhs)`. + * `CompareOp` is a model of [Strict Weak Ordering]. + * + * @tparam IS_LAST_TILE + * True if `valid_items` isn't equal to the `ITEMS_PER_TILE` + * + * @param[in,out] keys + * Keys to sort + * + * @param[in,out] items + * Values to sort + * + * @param[in] compare_op + * Comparison function object which returns true if the first argument is + * ordered before the second + * + * @param[in] valid_items + * Number of valid items to sort + * + * @param[in] oob_default + * Default value to assign out-of-bound items + * + * [Strict Weak Ordering]: https://en.cppreference.com/w/cpp/concepts/strict_weak_order + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void StableSort( + KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&items)[ITEMS_PER_THREAD], + CompareOp compare_op, + int valid_items, + KeyT oob_default) + { + Sort(keys, items, compare_op, valid_items, oob_default); + } + +private: + _CCCL_DEVICE _CCCL_FORCEINLINE void Sync() const + { + static_cast(this)->SyncImplementation(); + } +}; + +/** + * @brief The BlockMergeSort class provides methods for sorting items + * partitioned across a CUDA thread block using a merge sorting method. + * + * @tparam KeyT + * KeyT type + * + * @tparam BLOCK_DIM_X + * The thread block length in threads along the X dimension + * + * @tparam ITEMS_PER_THREAD + * The number of items per thread + * + * @tparam ValueT + * **[optional]** ValueT type (default: `cub::NullType`, which indicates + * a keys-only sort) + * + * @tparam BLOCK_DIM_Y + * **[optional]** The thread block length in threads along the Y dimension + * (default: 1) + * + * @tparam BLOCK_DIM_Z + * **[optional]** The thread block length in threads along the Z dimension + * (default: 1) + * + * @par Overview + * BlockMergeSort arranges items into ascending order using a comparison + * functor with less-than semantics. Merge sort can handle arbitrary types + * and comparison functors, but is slower than BlockRadixSort when sorting + * arithmetic types into ascending/descending order. + * + * @par A Simple Example + * @blockcollective{BlockMergeSort} + * @par + * The code snippet below illustrates a sort of 512 integer keys that are + * partitioned across 128 threads * where each thread owns 4 consecutive items. + * @par + * @code + * #include // or equivalently + * + * struct CustomLess + * { + * template + * __device__ bool operator()(const DataType &lhs, const DataType &rhs) + * { + * return lhs < rhs; + * } + * }; + * + * __global__ void ExampleKernel(...) + * { + * // Specialize BlockMergeSort for a 1D block of 128 threads owning 4 integer items each + * using BlockMergeSort = cub::BlockMergeSort; + * + * // Allocate shared memory for BlockMergeSort + * __shared__ typename BlockMergeSort::TempStorage temp_storage_shuffle; + * + * // Obtain a segment of consecutive items that are blocked across threads + * int thread_keys[4]; + * ... + * + * BlockMergeSort(temp_storage_shuffle).Sort(thread_keys, CustomLess()); + * ... + * } + * @endcode + * @par + * Suppose the set of input `thread_keys` across the block of threads is + * `{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }`. + * The corresponding output `thread_keys` in those threads will be + * `{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }`. + * + * @par Re-using dynamically allocating shared memory + * The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of + * dynamically shared memory with BlockReduce and how to re-purpose + * the same memory region. + * + * This example can be easily adapted to the storage required by BlockMergeSort. + */ +template +class BlockMergeSort + : public BlockMergeSortStrategy< + KeyT, + ValueT, + BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + ITEMS_PER_THREAD, + BlockMergeSort> +{ +private: + // The thread block size in threads + static constexpr int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; + static constexpr int ITEMS_PER_TILE = ITEMS_PER_THREAD * BLOCK_THREADS; + + using BlockMergeSortStrategyT = BlockMergeSortStrategy; + +public: + _CCCL_DEVICE _CCCL_FORCEINLINE BlockMergeSort() + : BlockMergeSortStrategyT(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE explicit BlockMergeSort(typename BlockMergeSortStrategyT::TempStorage& temp_storage) + : BlockMergeSortStrategyT(temp_storage, RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + +private: + _CCCL_DEVICE _CCCL_FORCEINLINE void SyncImplementation() const + { + __syncthreads(); + } + + friend BlockMergeSortStrategyT; +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_radix_rank.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_radix_rank.cuh new file mode 100644 index 0000000000000000000000000000000000000000..1155de4ae21aaf6ee61b25dc939a76dd198cbd62 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_radix_rank.cuh @@ -0,0 +1,1217 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! cub::BlockRadixRank provides operations for ranking unsigned integer types within a CUDA thread block + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @brief Radix ranking algorithm, the algorithm used to implement stable ranking of the +//! keys from a single tile. Note that different ranking algorithms require different +//! initial arrangements of keys to function properly. +enum RadixRankAlgorithm +{ + //! Ranking using the BlockRadixRank algorithm with `MEMOIZE_OUTER_SCAN == false`. + //! It uses thread-private histograms, and thus uses more shared memory. + //! Requires blocked arrangement of keys. Does not support count callbacks. + RADIX_RANK_BASIC, + + //! Ranking using the BlockRadixRank algorithm with `MEMOIZE_OUTER_SCAN == true`. + //! Similar to RADIX_RANK BASIC, it requires blocked arrangement of keys and does not support count callbacks. + RADIX_RANK_MEMOIZE, + + //! Ranking using the BlockRadixRankMatch algorithm. It uses warp-private histograms and matching for ranking + //! the keys in a single warp. Therefore, it uses less shared memory compared to RADIX_RANK_BASIC. + //! It requires warp-striped key arrangement and supports count callbacks. + RADIX_RANK_MATCH, + + //! Ranking using the BlockRadixRankMatchEarlyCounts algorithm with `MATCH_ALGORITHM == WARP_MATCH_ANY`. + //! An alternative implementation of match-based ranking that computes bin counts early. + //! Because of this, it works better with onesweep sorting, which requires bin counts for decoupled look-back. + //! Assumes warp-striped key arrangement and supports count callbacks. + RADIX_RANK_MATCH_EARLY_COUNTS_ANY, + + //! Ranking using the BlockRadixRankEarlyCounts algorithm with `MATCH_ALGORITHM == WARP_MATCH_ATOMIC_OR`. + //! It uses extra space in shared memory to generate warp match masks using `atomicOr()`. + //! This is faster when there are few matches, but can lead to slowdowns if the number of matching keys among + //! warp lanes is high. Assumes warp-striped key arrangement and supports count callbacks. + RADIX_RANK_MATCH_EARLY_COUNTS_ATOMIC_OR +}; + +/** Empty callback implementation */ +template +struct BlockRadixRankEmptyCallback +{ + _CCCL_DEVICE _CCCL_FORCEINLINE void operator()(int (&bins)[BINS_PER_THREAD]) {} +}; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document +namespace detail +{ + +template +struct warp_in_block_matcher_t +{ + static _CCCL_DEVICE ::cuda::std::uint32_t match_any(::cuda::std::uint32_t label, ::cuda::std::uint32_t warp_id) + { + if (warp_id == static_cast<::cuda::std::uint32_t>(PartialWarpId)) + { + return MatchAny(label); + } + + return MatchAny(label); + } +}; + +template +struct warp_in_block_matcher_t +{ + static _CCCL_DEVICE ::cuda::std::uint32_t match_any(::cuda::std::uint32_t label, ::cuda::std::uint32_t warp_id) + { + return MatchAny(label); + } +}; + +} // namespace detail +#endif // _CCCL_DOXYGEN_INVOKED + +//! @rst +//! BlockRadixRank provides operations for ranking unsigned integer types within a CUDA thread block. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - Keys must be in a form suitable for radix ranking (i.e., unsigned bits). +//! - @blocked +//! +//! Performance Considerations +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - @granularity +//! +//! .. code-block:: c++ +//! +//! #include +//! +//! __global__ void ExampleKernel(...) +//! { +//! constexpr int block_threads = 2; +//! constexpr int radix_bits = 5; +//! +//! // Specialize BlockRadixRank for a 1D block of 2 threads +//! // Specialize BlockRadixRank for a 1D block of 2 threads +//! using block_radix_rank = cub::BlockRadixRank; +//! using storage_t = typename block_radix_rank::TempStorage; +//! +//! // Allocate shared memory for BlockRadixSort +//! __shared__ storage_t temp_storage; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int keys[2]; +//! int ranks[2]; +//! ... +//! +//! cub::BFEDigitExtractor extractor(0, radix_bits); +//! block_radix_rank(temp_storage).RankKeys(keys, ranks, extractor); +//! +//! ... +//! +//! Suppose the set of input ``keys`` across the block of threads is ``{ [16,10], [9,11] }``. +//! The corresponding output ``ranks`` in those threads will be ``{ [3,1], [0,2] }``. +//! +//! Re-using dynamically allocating shared memory +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of dynamically shared memory with +//! BlockReduce and how to re-purpose the same memory region. +//! This example can be easily adapted to the storage required by BlockRadixRank. +//! +//! @endrst +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam RADIX_BITS +//! The number of radix bits per digit place +//! +//! @tparam IS_DESCENDING +//! Whether or not the sorted-order is high-to-low +//! +//! @tparam MEMOIZE_OUTER_SCAN +//! **[optional]** Whether or not to buffer outer raking scan +//! partials to incur fewer shared memory reads at the expense of higher register pressure +//! (default: true for architectures SM35 and newer, false otherwise). +//! See `BlockScanAlgorithm::BLOCK_SCAN_RAKING_MEMOIZE` for more details. +//! +//! @tparam INNER_SCAN_ALGORITHM +//! **[optional]** The cub::BlockScanAlgorithm algorithm to use (default: cub::BLOCK_SCAN_WARP_SCANS) +//! +//! @tparam SMEM_CONFIG +//! **[optional]** Shared memory bank mode (default: `cudaSharedMemBankSizeFourByte`) +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused. +template +class BlockRadixRank +{ +private: + // Integer type for digit counters (to be packed into words of type PackedCounters) + using DigitCounter = unsigned short; + + // Integer type for packing DigitCounters into columns of shared memory banks + using PackedCounter = + ::cuda::std::_If; + + static constexpr DigitCounter max_tile_size = ::cuda::std::numeric_limits::max(); + + enum + { + // The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + + RADIX_DIGITS = 1 << RADIX_BITS, + + LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(0), + WARP_THREADS = 1 << LOG_WARP_THREADS, + WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, + + BYTES_PER_COUNTER = sizeof(DigitCounter), + LOG_BYTES_PER_COUNTER = Log2::VALUE, + + PACKING_RATIO = static_cast(sizeof(PackedCounter) / sizeof(DigitCounter)), + LOG_PACKING_RATIO = Log2::VALUE, + + // Always at least one lane + LOG_COUNTER_LANES = CUB_MAX((int(RADIX_BITS) - int(LOG_PACKING_RATIO)), 0), + COUNTER_LANES = 1 << LOG_COUNTER_LANES, + + // The number of packed counters per thread (plus one for padding) + PADDED_COUNTER_LANES = COUNTER_LANES + 1, + RAKING_SEGMENT = PADDED_COUNTER_LANES, + }; + +public: + enum + { + /// Number of bin-starting offsets tracked per thread + BINS_TRACKED_PER_THREAD = CUB_MAX(1, (RADIX_DIGITS + BLOCK_THREADS - 1) / BLOCK_THREADS), + }; + +private: + /// BlockScan type + using BlockScan = BlockScan; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + struct __align__(16) _TempStorage + { + union Aliasable + { + DigitCounter digit_counters[PADDED_COUNTER_LANES][BLOCK_THREADS][PACKING_RATIO]; + PackedCounter raking_grid[BLOCK_THREADS][RAKING_SEGMENT]; + + } aliasable; + + // Storage for scanning local ranks + typename BlockScan::TempStorage block_scan; + }; +#endif // !_CCCL_DOXYGEN_INVOKED + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + + /// Copy of raking segment, promoted to registers + PackedCounter cached_segment[RAKING_SEGMENT]; + + /** + * Internal storage allocator + */ + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + /** + * Performs upsweep raking reduction, returning the aggregate + */ + _CCCL_DEVICE _CCCL_FORCEINLINE PackedCounter Upsweep() + { + PackedCounter* smem_raking_ptr = temp_storage.aliasable.raking_grid[linear_tid]; + PackedCounter* raking_ptr; + + if (MEMOIZE_OUTER_SCAN) + { +// Copy data into registers +#pragma unroll + for (int i = 0; i < RAKING_SEGMENT; i++) + { + cached_segment[i] = smem_raking_ptr[i]; + } + raking_ptr = cached_segment; + } + else + { + raking_ptr = smem_raking_ptr; + } + + return cub::internal::ThreadReduce(raking_ptr, ::cuda::std::plus<>{}); + } + + /// Performs exclusive downsweep raking scan + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveDownsweep(PackedCounter raking_partial) + { + PackedCounter* smem_raking_ptr = temp_storage.aliasable.raking_grid[linear_tid]; + + PackedCounter* raking_ptr = (MEMOIZE_OUTER_SCAN) ? cached_segment : smem_raking_ptr; + + // Exclusive raking downsweep scan + internal::ThreadScanExclusive(raking_ptr, raking_ptr, ::cuda::std::plus<>{}, raking_partial); + + if (MEMOIZE_OUTER_SCAN) + { +// Copy data back to smem +#pragma unroll + for (int i = 0; i < RAKING_SEGMENT; i++) + { + smem_raking_ptr[i] = cached_segment[i]; + } + } + } + + /** + * Reset shared memory digit counters + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ResetCounters() + { +// Reset shared memory digit counters +#pragma unroll + for (int LANE = 0; LANE < PADDED_COUNTER_LANES; LANE++) + { + *((PackedCounter*) temp_storage.aliasable.digit_counters[LANE][linear_tid]) = 0; + } + } + + /** + * Block-scan prefix callback + */ + struct PrefixCallBack + { + _CCCL_DEVICE _CCCL_FORCEINLINE PackedCounter operator()(PackedCounter block_aggregate) + { + PackedCounter block_prefix = 0; + +// Propagate totals in packed fields +#pragma unroll + for (int PACKED = 1; PACKED < PACKING_RATIO; PACKED++) + { + block_prefix += block_aggregate << (sizeof(DigitCounter) * 8 * PACKED); + } + + return block_prefix; + } + }; + + /** + * Scan shared memory digit counters. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void ScanCounters() + { + // Upsweep scan + PackedCounter raking_partial = Upsweep(); + + // Compute exclusive sum + PackedCounter exclusive_partial; + PrefixCallBack prefix_call_back; + BlockScan(temp_storage.block_scan).ExclusiveSum(raking_partial, exclusive_partial, prefix_call_back); + + // Downsweep scan with exclusive partial + ExclusiveDownsweep(exclusive_partial); + } + +public: + /// @smemstorage{BlockScan} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRadixRank() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Collective constructor using the specified memory allocation as temporary storage. + * + * @param[in] temp_storage + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRadixRank(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Raking + //! @{ + + /** + * @brief Rank keys. + * + * @param[in] keys + * Keys for this tile + * + * @param[out] ranks + * For each key, the local rank within the tile + * + * @param[in] digit_extractor + * The digit extractor + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(UnsignedBits (&keys)[KEYS_PER_THREAD], int (&ranks)[KEYS_PER_THREAD], DigitExtractorT digit_extractor) + { + static_assert(BLOCK_THREADS * KEYS_PER_THREAD <= max_tile_size, + "DigitCounter type is too small to hold this number of keys"); + + DigitCounter thread_prefixes[KEYS_PER_THREAD]; // For each key, the count of previous keys in this tile having the + // same digit + DigitCounter* digit_counters[KEYS_PER_THREAD]; // For each key, the byte-offset of its corresponding digit counter + // in smem + + // Reset shared memory digit counters + ResetCounters(); + +#pragma unroll + for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM) + { + // Get digit + ::cuda::std::uint32_t digit = digit_extractor.Digit(keys[ITEM]); + + // Get sub-counter + ::cuda::std::uint32_t sub_counter = digit >> LOG_COUNTER_LANES; + + // Get counter lane + ::cuda::std::uint32_t counter_lane = digit & (COUNTER_LANES - 1); + + if (IS_DESCENDING) + { + sub_counter = PACKING_RATIO - 1 - sub_counter; + counter_lane = COUNTER_LANES - 1 - counter_lane; + } + + // Pointer to smem digit counter + digit_counters[ITEM] = &temp_storage.aliasable.digit_counters[counter_lane][linear_tid][sub_counter]; + + // Load thread-exclusive prefix + thread_prefixes[ITEM] = *digit_counters[ITEM]; + + // Store inclusive prefix + *digit_counters[ITEM] = thread_prefixes[ITEM] + 1; + } + + __syncthreads(); + + // Scan shared memory counters + ScanCounters(); + + __syncthreads(); + +// Extract the local ranks of each key +#pragma unroll + for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM) + { + // Add in thread block exclusive prefix + ranks[ITEM] = thread_prefixes[ITEM] + *digit_counters[ITEM]; + } + } + + /** + * @brief Rank keys. For the lower @p RADIX_DIGITS threads, digit counts for each digit are + * provided for the corresponding thread. + * + * @param[in] keys + * Keys for this tile + * + * @param[out] ranks + * For each key, the local rank within the tile (out parameter) + * + * @param[in] digit_extractor + * The digit extractor + * + * @param[out] exclusive_digit_prefix + * The exclusive prefix sum for the digits + * [(threadIdx.x * BINS_TRACKED_PER_THREAD) + * ... + * (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1] + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(UnsignedBits (&keys)[KEYS_PER_THREAD], + int (&ranks)[KEYS_PER_THREAD], + DigitExtractorT digit_extractor, + int (&exclusive_digit_prefix)[BINS_TRACKED_PER_THREAD]) + { + static_assert(BLOCK_THREADS * KEYS_PER_THREAD <= max_tile_size, + "DigitCounter type is too small to hold this number of keys"); + + // Rank keys + RankKeys(keys, ranks, digit_extractor); + +// Get the inclusive and exclusive digit totals corresponding to the calling thread. +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (linear_tid * BINS_TRACKED_PER_THREAD) + track; + + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + if (IS_DESCENDING) + { + bin_idx = RADIX_DIGITS - bin_idx - 1; + } + + // Obtain ex/inclusive digit counts. (Unfortunately these all reside in the + // first counter column, resulting in unavoidable bank conflicts.) + unsigned int counter_lane = (bin_idx & (COUNTER_LANES - 1)); + unsigned int sub_counter = bin_idx >> (LOG_COUNTER_LANES); + + exclusive_digit_prefix[track] = temp_storage.aliasable.digit_counters[counter_lane][0][sub_counter]; + } + } + } + + //! @} +}; + +/** + * Radix-rank using match.any + */ +template +class BlockRadixRankMatch +{ +private: + using RankT = int32_t; + using DigitCounterT = int32_t; + + enum + { + // The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + + RADIX_DIGITS = 1 << RADIX_BITS, + + LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(0), + WARP_THREADS = 1 << LOG_WARP_THREADS, + PARTIAL_WARP_THREADS = BLOCK_THREADS % WARP_THREADS, + WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, + + PADDED_WARPS = ((WARPS & 0x1) == 0) ? WARPS + 1 : WARPS, + + COUNTERS = PADDED_WARPS * RADIX_DIGITS, + RAKING_SEGMENT = (COUNTERS + BLOCK_THREADS - 1) / BLOCK_THREADS, + PADDED_RAKING_SEGMENT = ((RAKING_SEGMENT & 0x1) == 0) ? RAKING_SEGMENT + 1 : RAKING_SEGMENT, + }; + +public: + enum + { + /// Number of bin-starting offsets tracked per thread + BINS_TRACKED_PER_THREAD = CUB_MAX(1, (RADIX_DIGITS + BLOCK_THREADS - 1) / BLOCK_THREADS), + }; + +private: + /// BlockScan type + using BlockScanT = BlockScan; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + struct __align__(16) _TempStorage + { + typename BlockScanT::TempStorage block_scan; + + union __align__(16) Aliasable + { + volatile DigitCounterT warp_digit_counters[RADIX_DIGITS][PADDED_WARPS]; + DigitCounterT raking_grid[BLOCK_THREADS][PADDED_RAKING_SEGMENT]; + } + aliasable; + }; +#endif // !_CCCL_DOXYGEN_INVOKED + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + +public: + /// @smemstorage{BlockRadixRankMatch} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + /** + * @brief Collective constructor using the specified memory allocation as temporary storage. + * + * @param[in] temp_storage + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRadixRankMatch(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Raking + //! @{ + + /** + * @brief Computes the count of keys for each digit value, and calls the + * callback with the array of key counts. + * + * @tparam CountsCallback The callback type. It should implement an instance + * overload of operator()(int (&bins)[BINS_TRACKED_PER_THREAD]), where bins + * is an array of key counts for each digit value distributed in block + * distribution among the threads of the thread block. Key counts can be + * used, to update other data structures in global or shared + * memory. Depending on the implementation of the ranking algoirhtm + * (see BlockRadixRankMatchEarlyCounts), key counts may become available + * early, therefore, they are returned through a callback rather than a + * separate output parameter of RankKeys(). + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void CallBack(CountsCallback callback) + { + int bins[BINS_TRACKED_PER_THREAD]; +// Get count for each digit +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (linear_tid * BINS_TRACKED_PER_THREAD) + track; + constexpr int TILE_ITEMS = KEYS_PER_THREAD * BLOCK_THREADS; + + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + if (IS_DESCENDING) + { + bin_idx = RADIX_DIGITS - bin_idx - 1; + bins[track] = (bin_idx > 0 ? temp_storage.aliasable.warp_digit_counters[bin_idx - 1][0] : TILE_ITEMS) + - temp_storage.aliasable.warp_digit_counters[bin_idx][0]; + } + else + { + bins[track] = + (bin_idx < RADIX_DIGITS - 1 ? temp_storage.aliasable.warp_digit_counters[bin_idx + 1][0] : TILE_ITEMS) + - temp_storage.aliasable.warp_digit_counters[bin_idx][0]; + } + } + } + callback(bins); + } + + /** + * @brief Rank keys. + * + * @param[in] keys + * Keys for this tile + * + * @param[out] ranks + * For each key, the local rank within the tile + * + * @param[in] digit_extractor + * The digit extractor + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(UnsignedBits (&keys)[KEYS_PER_THREAD], + int (&ranks)[KEYS_PER_THREAD], + DigitExtractorT digit_extractor, + CountsCallback callback) + { + // Initialize shared digit counters + +#pragma unroll + for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM) + { + temp_storage.aliasable.raking_grid[linear_tid][ITEM] = 0; + } + + __syncthreads(); + + // Each warp will strip-mine its section of input, one strip at a time + + volatile DigitCounterT* digit_counters[KEYS_PER_THREAD]; + uint32_t warp_id = linear_tid >> LOG_WARP_THREADS; + uint32_t lane_mask_lt = ::cuda::ptx::get_sreg_lanemask_lt(); + +#pragma unroll + for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM) + { + // My digit + ::cuda::std::uint32_t digit = digit_extractor.Digit(keys[ITEM]); + + if (IS_DESCENDING) + { + digit = RADIX_DIGITS - digit - 1; + } + + // Mask of peers who have same digit as me + uint32_t peer_mask = + detail::warp_in_block_matcher_t::match_any(digit, warp_id); + + // Pointer to smem digit counter for this key + digit_counters[ITEM] = &temp_storage.aliasable.warp_digit_counters[digit][warp_id]; + + // Number of occurrences in previous strips + DigitCounterT warp_digit_prefix = *digit_counters[ITEM]; + + // Warp-sync + __syncwarp(0xFFFFFFFF); + + // Number of peers having same digit as me + int32_t digit_count = __popc(peer_mask); + + // Number of lower-ranked peers having same digit seen so far + int32_t peer_digit_prefix = __popc(peer_mask & lane_mask_lt); + + if (peer_digit_prefix == 0) + { + // First thread for each digit updates the shared warp counter + *digit_counters[ITEM] = DigitCounterT(warp_digit_prefix + digit_count); + } + + // Warp-sync + __syncwarp(0xFFFFFFFF); + + // Number of prior keys having same digit + ranks[ITEM] = warp_digit_prefix + DigitCounterT(peer_digit_prefix); + } + + __syncthreads(); + + // Scan warp counters + + DigitCounterT scan_counters[PADDED_RAKING_SEGMENT]; + +#pragma unroll + for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM) + { + scan_counters[ITEM] = temp_storage.aliasable.raking_grid[linear_tid][ITEM]; + } + + BlockScanT(temp_storage.block_scan).ExclusiveSum(scan_counters, scan_counters); + +#pragma unroll + for (int ITEM = 0; ITEM < PADDED_RAKING_SEGMENT; ++ITEM) + { + temp_storage.aliasable.raking_grid[linear_tid][ITEM] = scan_counters[ITEM]; + } + + __syncthreads(); + if (!::cuda::std::is_same>::value) + { + CallBack(callback); + } + +// Seed ranks with counter values from previous warps +#pragma unroll + for (int ITEM = 0; ITEM < KEYS_PER_THREAD; ++ITEM) + { + ranks[ITEM] += *digit_counters[ITEM]; + } + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(UnsignedBits (&keys)[KEYS_PER_THREAD], int (&ranks)[KEYS_PER_THREAD], DigitExtractorT digit_extractor) + { + RankKeys(keys, ranks, digit_extractor, BlockRadixRankEmptyCallback()); + } + + /** + * @brief Rank keys. For the lower @p RADIX_DIGITS threads, digit counts for each digit are + * provided for the corresponding thread. + * + * @param[in] keys + * Keys for this tile + * + * @param[out] ranks + * For each key, the local rank within the tile (out parameter) + * + * @param[in] digit_extractor + * The digit extractor + * + * @param[out] exclusive_digit_prefix + * The exclusive prefix sum for the digits + * [(threadIdx.x * BINS_TRACKED_PER_THREAD) + * ... + * (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1] + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void RankKeys( + UnsignedBits (&keys)[KEYS_PER_THREAD], + int (&ranks)[KEYS_PER_THREAD], + DigitExtractorT digit_extractor, + int (&exclusive_digit_prefix)[BINS_TRACKED_PER_THREAD], + CountsCallback callback) + { + RankKeys(keys, ranks, digit_extractor, callback); + +// Get exclusive count for each digit +#pragma unroll + for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) + { + int bin_idx = (linear_tid * BINS_TRACKED_PER_THREAD) + track; + + if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) + { + if (IS_DESCENDING) + { + bin_idx = RADIX_DIGITS - bin_idx - 1; + } + + exclusive_digit_prefix[track] = temp_storage.aliasable.warp_digit_counters[bin_idx][0]; + } + } + } + + /** + * @param[in] keys + * Keys for this tile + * + * @param[out] ranks + * For each key, the local rank within the tile (out parameter) + * + * @param[out] exclusive_digit_prefix + * The exclusive prefix sum for the digits + * [(threadIdx.x * BINS_TRACKED_PER_THREAD) + * ... + * (threadIdx.x * BINS_TRACKED_PER_THREAD) + BINS_TRACKED_PER_THREAD - 1] + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(UnsignedBits (&keys)[KEYS_PER_THREAD], + int (&ranks)[KEYS_PER_THREAD], + DigitExtractorT digit_extractor, + int (&exclusive_digit_prefix)[BINS_TRACKED_PER_THREAD]) + { + RankKeys( + keys, ranks, digit_extractor, exclusive_digit_prefix, BlockRadixRankEmptyCallback()); + } + + //! @} +}; + +enum WarpMatchAlgorithm +{ + WARP_MATCH_ANY, + WARP_MATCH_ATOMIC_OR +}; + +/** + * Radix-rank using matching which computes the counts of keys for each digit + * value early, at the expense of doing more work. This may be useful e.g. for + * decoupled look-back, where it reduces the time other thread blocks need to + * wait for digit counts to become available. + */ +template +struct BlockRadixRankMatchEarlyCounts +{ + // constants + enum + { + BLOCK_THREADS = BLOCK_DIM_X, + RADIX_DIGITS = 1 << RADIX_BITS, + BINS_PER_THREAD = (RADIX_DIGITS + BLOCK_THREADS - 1) / BLOCK_THREADS, + BINS_TRACKED_PER_THREAD = BINS_PER_THREAD, + FULL_BINS = BINS_PER_THREAD * BLOCK_THREADS == RADIX_DIGITS, + WARP_THREADS = CUB_PTX_WARP_THREADS, + PARTIAL_WARP_THREADS = BLOCK_THREADS % WARP_THREADS, + BLOCK_WARPS = BLOCK_THREADS / WARP_THREADS, + PARTIAL_WARP_ID = BLOCK_WARPS - 1, + WARP_MASK = ~0, + NUM_MATCH_MASKS = MATCH_ALGORITHM == WARP_MATCH_ATOMIC_OR ? BLOCK_WARPS : 0, + // Guard against declaring zero-sized array: + MATCH_MASKS_ALLOC_SIZE = NUM_MATCH_MASKS < 1 ? 1 : NUM_MATCH_MASKS, + }; + + // types + using BlockScan = cub::BlockScan; + + struct TempStorage + { + union + { + int warp_offsets[BLOCK_WARPS][RADIX_DIGITS]; + int warp_histograms[BLOCK_WARPS][RADIX_DIGITS][NUM_PARTS]; + }; + + int match_masks[MATCH_MASKS_ALLOC_SIZE][RADIX_DIGITS]; + + typename BlockScan::TempStorage prefix_tmp; + }; + + TempStorage& temp_storage; + + // internal ranking implementation + template + struct BlockRadixRankMatchInternal + { + TempStorage& s; + DigitExtractorT digit_extractor; + CountsCallback callback; + int warp; + int lane; + + _CCCL_DEVICE _CCCL_FORCEINLINE ::cuda::std::uint32_t Digit(UnsignedBits key) + { + ::cuda::std::uint32_t digit = digit_extractor.Digit(key); + return IS_DESCENDING ? RADIX_DIGITS - 1 - digit : digit; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE int ThreadBin(int u) + { + int bin = threadIdx.x * BINS_PER_THREAD + u; + return IS_DESCENDING ? RADIX_DIGITS - 1 - bin : bin; + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ComputeHistogramsWarp(UnsignedBits (&keys)[KEYS_PER_THREAD]) + { + // int* warp_offsets = &s.warp_offsets[warp][0]; + int(&warp_histograms)[RADIX_DIGITS][NUM_PARTS] = s.warp_histograms[warp]; +// compute warp-private histograms +#pragma unroll + for (int bin = lane; bin < RADIX_DIGITS; bin += WARP_THREADS) + { +#pragma unroll + for (int part = 0; part < NUM_PARTS; ++part) + { + warp_histograms[bin][part] = 0; + } + } + if (MATCH_ALGORITHM == WARP_MATCH_ATOMIC_OR) + { + int* match_masks = &s.match_masks[warp][0]; +#pragma unroll + for (int bin = lane; bin < RADIX_DIGITS; bin += WARP_THREADS) + { + match_masks[bin] = 0; + } + } + __syncwarp(WARP_MASK); + + // compute private per-part histograms + int part = lane % NUM_PARTS; +#pragma unroll + for (int u = 0; u < KEYS_PER_THREAD; ++u) + { + atomicAdd(&warp_histograms[Digit(keys[u])][part], 1); + } + + // sum different parts; + // no extra work is necessary if NUM_PARTS == 1 + if (NUM_PARTS > 1) + { + __syncwarp(WARP_MASK); + // TODO: handle RADIX_DIGITS % WARP_THREADS != 0 if it becomes necessary + constexpr int WARP_BINS_PER_THREAD = RADIX_DIGITS / WARP_THREADS; + int bins[WARP_BINS_PER_THREAD]; +#pragma unroll + for (int u = 0; u < WARP_BINS_PER_THREAD; ++u) + { + int bin = lane + u * WARP_THREADS; + bins[u] = cub::ThreadReduce(warp_histograms[bin], ::cuda::std::plus<>{}); + } + __syncthreads(); + + // store the resulting histogram in shared memory + int* warp_offsets = &s.warp_offsets[warp][0]; +#pragma unroll + for (int u = 0; u < WARP_BINS_PER_THREAD; ++u) + { + int bin = lane + u * WARP_THREADS; + warp_offsets[bin] = bins[u]; + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ComputeOffsetsWarpUpsweep(int (&bins)[BINS_PER_THREAD]) + { +// sum up warp-private histograms +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + bins[u] = 0; + int bin = ThreadBin(u); + if (FULL_BINS || (bin >= 0 && bin < RADIX_DIGITS)) + { +#pragma unroll + for (int j_warp = 0; j_warp < BLOCK_WARPS; ++j_warp) + { + int warp_offset = s.warp_offsets[j_warp][bin]; + s.warp_offsets[j_warp][bin] = bins[u]; + bins[u] += warp_offset; + } + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ComputeOffsetsWarpDownsweep(int (&offsets)[BINS_PER_THREAD]) + { +#pragma unroll + for (int u = 0; u < BINS_PER_THREAD; ++u) + { + int bin = ThreadBin(u); + if (FULL_BINS || (bin >= 0 && bin < RADIX_DIGITS)) + { + int digit_offset = offsets[u]; +#pragma unroll + for (int j_warp = 0; j_warp < BLOCK_WARPS; ++j_warp) + { + s.warp_offsets[j_warp][bin] += digit_offset; + } + } + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void ComputeRanksItem( + UnsignedBits (&keys)[KEYS_PER_THREAD], int (&ranks)[KEYS_PER_THREAD], Int2Type) + { + // compute key ranks + int lane_mask = 1 << lane; + int* warp_offsets = &s.warp_offsets[warp][0]; + int* match_masks = &s.match_masks[warp][0]; +#pragma unroll + for (int u = 0; u < KEYS_PER_THREAD; ++u) + { + ::cuda::std::uint32_t bin = Digit(keys[u]); + int* p_match_mask = &match_masks[bin]; + atomicOr(p_match_mask, lane_mask); + __syncwarp(WARP_MASK); + int bin_mask = *p_match_mask; + int leader = (WARP_THREADS - 1) - __clz(bin_mask); + int warp_offset = 0; + int popc = __popc(bin_mask & ::cuda::ptx::get_sreg_lanemask_le()); + if (lane == leader) + { + // atomic is a bit faster + warp_offset = atomicAdd(&warp_offsets[bin], popc); + } + warp_offset = __shfl_sync(WARP_MASK, warp_offset, leader); + if (lane == leader) + { + *p_match_mask = 0; + } + __syncwarp(WARP_MASK); + ranks[u] = warp_offset + popc - 1; + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void + ComputeRanksItem(UnsignedBits (&keys)[KEYS_PER_THREAD], int (&ranks)[KEYS_PER_THREAD], Int2Type) + { + // compute key ranks + int* warp_offsets = &s.warp_offsets[warp][0]; +#pragma unroll + for (int u = 0; u < KEYS_PER_THREAD; ++u) + { + ::cuda::std::uint32_t bin = Digit(keys[u]); + int bin_mask = + detail::warp_in_block_matcher_t::match_any(bin, warp); + int leader = (WARP_THREADS - 1) - __clz(bin_mask); + int warp_offset = 0; + int popc = __popc(bin_mask & ::cuda::ptx::get_sreg_lanemask_le()); + if (lane == leader) + { + // atomic is a bit faster + warp_offset = atomicAdd(&warp_offsets[bin], popc); + } + warp_offset = __shfl_sync(WARP_MASK, warp_offset, leader); + ranks[u] = warp_offset + popc - 1; + } + } + + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(UnsignedBits (&keys)[KEYS_PER_THREAD], + int (&ranks)[KEYS_PER_THREAD], + int (&exclusive_digit_prefix)[BINS_PER_THREAD]) + { + ComputeHistogramsWarp(keys); + + __syncthreads(); + int bins[BINS_PER_THREAD]; + ComputeOffsetsWarpUpsweep(bins); + callback(bins); + + BlockScan(s.prefix_tmp).ExclusiveSum(bins, exclusive_digit_prefix); + + ComputeOffsetsWarpDownsweep(exclusive_digit_prefix); + __syncthreads(); + ComputeRanksItem(keys, ranks, Int2Type()); + } + + _CCCL_DEVICE _CCCL_FORCEINLINE + BlockRadixRankMatchInternal(TempStorage& temp_storage, DigitExtractorT digit_extractor, CountsCallback callback) + : s(temp_storage) + , digit_extractor(digit_extractor) + , callback(callback) + , warp(threadIdx.x / WARP_THREADS) + , lane(::cuda::ptx::get_sreg_laneid()) + {} + }; + + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRadixRankMatchEarlyCounts(TempStorage& temp_storage) + : temp_storage(temp_storage) + {} + + /** + * @brief Rank keys. For the lower @p RADIX_DIGITS threads, digit counts for each digit are + * provided for the corresponding thread. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void RankKeys( + UnsignedBits (&keys)[KEYS_PER_THREAD], + int (&ranks)[KEYS_PER_THREAD], + DigitExtractorT digit_extractor, + int (&exclusive_digit_prefix)[BINS_PER_THREAD], + CountsCallback callback) + { + BlockRadixRankMatchInternal internal( + temp_storage, digit_extractor, callback); + internal.RankKeys(keys, ranks, exclusive_digit_prefix); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(UnsignedBits (&keys)[KEYS_PER_THREAD], + int (&ranks)[KEYS_PER_THREAD], + DigitExtractorT digit_extractor, + int (&exclusive_digit_prefix)[BINS_PER_THREAD]) + { + using CountsCallback = BlockRadixRankEmptyCallback; + BlockRadixRankMatchInternal internal( + temp_storage, digit_extractor, CountsCallback()); + internal.RankKeys(keys, ranks, exclusive_digit_prefix); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(UnsignedBits (&keys)[KEYS_PER_THREAD], int (&ranks)[KEYS_PER_THREAD], DigitExtractorT digit_extractor) + { + int exclusive_digit_prefix[BINS_PER_THREAD]; + RankKeys(keys, ranks, digit_extractor, exclusive_digit_prefix); + } +}; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document +namespace detail +{ + +// `BlockRadixRank` doesn't conform to the typical pattern, not exposing the algorithm +// template parameter. Other algorithms don't provide the same template parameters, not allowing +// multi-dimensional thread block specializations. +// +// TODO(senior-zero) for 3.0: +// - Put existing implementations into the detail namespace +// - Support multi-dimensional thread blocks in the rest of implementations +// - Repurpose BlockRadixRank as an entry name with the algorithm template parameter +template +using block_radix_rank_t = ::cuda::std::_If< + RankAlgorithm == RADIX_RANK_BASIC, + BlockRadixRank, + ::cuda::std::_If< + RankAlgorithm == RADIX_RANK_MEMOIZE, + BlockRadixRank, + ::cuda::std::_If< + RankAlgorithm == RADIX_RANK_MATCH, + BlockRadixRankMatch, + ::cuda::std::_If< + RankAlgorithm == RADIX_RANK_MATCH_EARLY_COUNTS_ANY, + BlockRadixRankMatchEarlyCounts, + BlockRadixRankMatchEarlyCounts>>>>; + +} // namespace detail +#endif // _CCCL_DOXYGEN_INVOKED + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_radix_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_radix_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..080053348d7afd15e1804dd7bd3465c9cebc407a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_radix_sort.cuh @@ -0,0 +1,2185 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * The cub::BlockRadixSort class provides [collective](../index.html#sec0) methods for radix + * sorting of items partitioned across a CUDA thread block. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! BlockRadixSort class provides :ref:`collective ` methods for sorting +//! items partitioned across a CUDA thread block using a radix sorting method. +//! +//! .. image:: ../../img/sorting_logo.png +//! :align: center +//! +//! Overview +//! -------------------------------------------------- +//! +//! The `radix sorting method `_ arranges +//! items into ascending order. It relies upon a positional representation for +//! keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits, +//! characters, etc.) specified from least-significant to most-significant. For a +//! given input sequence of keys and a set of rules specifying a total ordering +//! of the symbolic alphabet, the radix sorting method produces a lexicographic +//! ordering of those keys. +//! +//! @rowmajor +//! +//! Supported Types +//! -------------------------------------------------- +//! +//! BlockRadixSort can sort all of the built-in C++ numeric primitive types +//! (``unsigned char``, ``int``, ``double``, etc.) as well as CUDA's ``__half`` +//! half-precision floating-point type. User-defined types are supported as long +//! as decomposer object is provided. +//! +//! Floating-Point Special Cases +//! -------------------------------------------------- +//! +//! - Positive and negative zeros are considered equivalent, and will be treated +//! as such in the output. +//! - No special handling is implemented for NaN values; these are sorted +//! according to their bit representations after any transformations. +//! +//! Bitwise Key Transformations +//! -------------------------------------------------- +//! +//! Although the direct radix sorting method can only be applied to unsigned +//! integral types, BlockRadixSort is able to sort signed and floating-point +//! types via simple bit-wise transformations that ensure lexicographic key +//! ordering. +//! +//! These transformations must be considered when restricting the +//! ``[begin_bit, end_bit)`` range, as the bitwise transformations will occur +//! before the bit-range truncation. +//! +//! Any transformations applied to the keys prior to sorting are reversed +//! while writing to the final output buffer. +//! +//! Type Specific Bitwise Transformations +//! -------------------------------------------------- +//! +//! To convert the input values into a radix-sortable bitwise representation, +//! the following transformations take place prior to sorting: +//! +//! * For unsigned integral values, the keys are used directly. +//! * For signed integral values, the sign bit is inverted. +//! * For positive floating point values, the sign bit is inverted. +//! * For negative floating point values, the full key is inverted. +//! +//! No Descending Sort Transformations +//! -------------------------------------------------- +//! +//! Unlike ``DeviceRadixSort``, ``BlockRadixSort`` does not invert the input key bits +//! when performing a descending sort. Instead, it has special logic to reverse +//! the order of the keys while sorting. +//! +//! Stability +//! -------------------------------------------------- +//! +//! BlockRadixSort is stable. For floating-point types -0.0 and +0.0 +//! are considered equal and appear in the result in the same order as they +//! appear in the input. +//! +//! +//! Performance Considerations +//! -------------------------------------------------- +//! +//! * @granularity +//! +//! A Simple Example +//! -------------------------------------------------- +//! +//! @blockcollective{BlockRadixSort} +//! +//! The code snippet below illustrates a sort of 512 integer keys that +//! are partitioned in a [blocked arrangement](../index.html#sec5sec3) across 128 threads +//! where each thread owns 4 consecutive items. +//! +//! .. tab-set-code:: +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void kernel(...) +//! { +//! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer items each +//! using BlockRadixSort = cub::BlockRadixSort; +//! +//! // Allocate shared memory for BlockRadixSort +//! __shared__ typename BlockRadixSort::TempStorage temp_storage; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int thread_keys[4]; +//! ... +//! +//! // Collectively sort the keys +//! BlockRadixSort(temp_storage).Sort(thread_keys); +//! +//! ... +//! +//! .. code-block:: python +//! +//! import cuda.cooperative.experimental as cudax +//! from pynvjitlink import patch +//! patch.patch_numba_linker(lto=True) +//! +//! # Specialize radix sort for a 1D block of 128 threads owning 4 integer items each +//! block_radix_sort = cudax.block.radix_sort_keys(numba.int32, 128, 4) +//! temp_storage_bytes = block_radix_sort.temp_storage_bytes +//! +//! @cuda.jit(link=block_radix_sort.files) +//! def kernel(): +//! Allocate shared memory for radix sort +//! temp_storage = cuda.shared.array(shape=temp_storage_bytes, dtype='uint8') +//! +//! # Obtain a segment of consecutive items that are blocked across threads +//! thread_keys = cuda.local.array(shape=items_per_thread, dtype=numba.int32) +//! # ... +//! +//! // Collectively sort the keys +//! block_radix_sort(temp_storage, thread_keys) +//! # ... +//! +//! Suppose the set of input ``thread_keys`` across the block of threads is +//! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. +//! The corresponding output ``thread_keys`` in those threads will be +//! ``{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }``. +//! +//! Re-using dynamically allocating shared memory +//! -------------------------------------------------- +//! +//! The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of dynamically shared memory with +//! BlockReduce and how to re-purpose the same memory region. +//! +//! This example can be easily adapted to the storage required by BlockRadixSort. +//! @endrst +//! +//! @tparam KeyT +//! KeyT type +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam ITEMS_PER_THREAD +//! The number of items per thread +//! +//! @tparam ValueT +//! **[optional]** ValueT type (default: cub::NullType, which indicates a keys-only sort) +//! +//! @tparam RADIX_BITS +//! **[optional]** The number of radix bits per digit place (default: 4 bits) +//! +//! @tparam MEMOIZE_OUTER_SCAN +//! **[optional]** Whether or not to buffer outer raking scan partials to incur fewer shared memory +//! reads at the expense of higher register pressure (default: true for architectures SM35 and +//! newer, false otherwise). +//! +//! @tparam INNER_SCAN_ALGORITHM +//! **[optional]** The cub::BlockScanAlgorithm algorithm to use +//! (default: cub::BLOCK_SCAN_WARP_SCANS) +//! +//! @tparam SMEM_CONFIG +//! **[optional]*8 Shared memory bank mode (default: `cudaSharedMemBankSizeFourByte`) +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused +template +class BlockRadixSort +{ +private: + /****************************************************************************** + * Constants and type definitions + ******************************************************************************/ + + enum + { + // The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + + // Whether or not there are values to be trucked along with keys + KEYS_ONLY = ::cuda::std::is_same::value, + }; + + // KeyT traits and unsigned bits type + using traits = detail::radix::traits_t; + using bit_ordered_type = typename traits::bit_ordered_type; + using bit_ordered_conversion = typename traits::bit_ordered_conversion_policy; + + /// Ascending BlockRadixRank utility type + using AscendingBlockRadixRank = + BlockRadixRank; + + /// Descending BlockRadixRank utility type + using DescendingBlockRadixRank = + BlockRadixRank; + + /// Digit extractor type + using fundamental_digit_extractor_t = BFEDigitExtractor; + + /// BlockExchange utility type for keys + using BlockExchangeKeys = BlockExchange; + + /// BlockExchange utility type for values + using BlockExchangeValues = BlockExchange; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + /// Shared memory storage layout type + union _TempStorage + { + typename AscendingBlockRadixRank::TempStorage asending_ranking_storage; + typename DescendingBlockRadixRank::TempStorage descending_ranking_storage; + typename BlockExchangeKeys::TempStorage exchange_keys; + typename BlockExchangeValues::TempStorage exchange_values; + }; +#endif // _CCCL_DOXYGEN_INVOKED + + /****************************************************************************** + * Thread fields + ******************************************************************************/ + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + + /****************************************************************************** + * Utility methods + ******************************************************************************/ + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + /// Rank keys (specialized for ascending sort) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(bit_ordered_type (&unsigned_keys)[ITEMS_PER_THREAD], + int (&ranks)[ITEMS_PER_THREAD], + DigitExtractorT digit_extractor, + Int2Type /*is_descending*/) + { + AscendingBlockRadixRank(temp_storage.asending_ranking_storage).RankKeys(unsigned_keys, ranks, digit_extractor); + } + + /// Rank keys (specialized for descending sort) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + RankKeys(bit_ordered_type (&unsigned_keys)[ITEMS_PER_THREAD], + int (&ranks)[ITEMS_PER_THREAD], + DigitExtractorT digit_extractor, + Int2Type /*is_descending*/) + { + DescendingBlockRadixRank(temp_storage.descending_ranking_storage).RankKeys(unsigned_keys, ranks, digit_extractor); + } + + /// ExchangeValues (specialized for key-value sort, to-blocked arrangement) + _CCCL_DEVICE _CCCL_FORCEINLINE void ExchangeValues( + ValueT (&values)[ITEMS_PER_THREAD], + int (&ranks)[ITEMS_PER_THREAD], + Int2Type /*is_keys_only*/, + Int2Type /*is_blocked*/) + { + __syncthreads(); + + // Exchange values through shared memory in blocked arrangement + BlockExchangeValues(temp_storage.exchange_values).ScatterToBlocked(values, ranks); + } + + /// ExchangeValues (specialized for key-value sort, to-striped arrangement) + _CCCL_DEVICE _CCCL_FORCEINLINE void ExchangeValues( + ValueT (&values)[ITEMS_PER_THREAD], + int (&ranks)[ITEMS_PER_THREAD], + Int2Type /*is_keys_only*/, + Int2Type /*is_blocked*/) + { + __syncthreads(); + + // Exchange values through shared memory in blocked arrangement + BlockExchangeValues(temp_storage.exchange_values).ScatterToStriped(values, ranks); + } + + /// ExchangeValues (specialized for keys-only sort) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExchangeValues( + ValueT (& /*values*/)[ITEMS_PER_THREAD], + int (& /*ranks*/)[ITEMS_PER_THREAD], + Int2Type /*is_keys_only*/, + Int2Type /*is_blocked*/) + {} + + /** + * @brief Sort blocked arrangement + * + * @param keys + * Keys to sort + * + * @param values + * Values to sort + * + * @param begin_bit + * The beginning (least-significant) bit index needed for key comparison + * + * @param end_bit + * The past-the-end (most-significant) bit index needed for key comparison + * + * @param is_descending + * Tag whether is a descending-order sort + * + * @param is_keys_only + * Tag whether is keys-only sort + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SortBlocked( + KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + int begin_bit, + int end_bit, + Int2Type is_descending, + Int2Type is_keys_only, + DecomposerT decomposer = {}) + { + bit_ordered_type(&unsigned_keys)[ITEMS_PER_THREAD] = reinterpret_cast(keys); + +#pragma unroll + for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) + { + unsigned_keys[KEY] = bit_ordered_conversion::to_bit_ordered(decomposer, unsigned_keys[KEY]); + } + + // Radix sorting passes + while (true) + { + int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit); + auto digit_extractor = + traits::template digit_extractor(begin_bit, pass_bits, decomposer); + + // Rank the blocked keys + int ranks[ITEMS_PER_THREAD]; + RankKeys(unsigned_keys, ranks, digit_extractor, is_descending); + begin_bit += RADIX_BITS; + + __syncthreads(); + + // Exchange keys through shared memory in blocked arrangement + BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks); + + // Exchange values through shared memory in blocked arrangement + ExchangeValues(values, ranks, is_keys_only, Int2Type()); + + // Quit if done + if (begin_bit >= end_bit) + { + break; + } + + __syncthreads(); + } + +// Untwiddle bits if necessary +#pragma unroll + for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) + { + unsigned_keys[KEY] = bit_ordered_conversion::from_bit_ordered(decomposer, unsigned_keys[KEY]); + } + } + +public: +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + + /** + * @brief Sort blocked -> striped arrangement + * + * @param keys + * Keys to sort + * + * @param values + * Values to sort + * + * @param begin_bit + * The beginning (least-significant) bit index needed for key comparison + * + * @param end_bit + * The past-the-end (most-significant) bit index needed for key comparison + * + * @param is_descending + * Tag whether is a descending-order sort + * + * @param is_keys_only + * Tag whether is keys-only sort + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void SortBlockedToStriped( + KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + int begin_bit, + int end_bit, + Int2Type is_descending, + Int2Type is_keys_only, + DecomposerT decomposer = {}) + { + bit_ordered_type(&unsigned_keys)[ITEMS_PER_THREAD] = reinterpret_cast(keys); + +# pragma unroll + for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) + { + unsigned_keys[KEY] = bit_ordered_conversion::to_bit_ordered(decomposer, unsigned_keys[KEY]); + } + + // Radix sorting passes + while (true) + { + int pass_bits = CUB_MIN(RADIX_BITS, end_bit - begin_bit); + auto digit_extractor = + traits::template digit_extractor(begin_bit, pass_bits, decomposer); + + // Rank the blocked keys + int ranks[ITEMS_PER_THREAD]; + RankKeys(unsigned_keys, ranks, digit_extractor, is_descending); + begin_bit += RADIX_BITS; + + __syncthreads(); + + // Check if this is the last pass + if (begin_bit >= end_bit) + { + // Last pass exchanges keys through shared memory in striped arrangement + BlockExchangeKeys(temp_storage.exchange_keys).ScatterToStriped(keys, ranks); + + // Last pass exchanges through shared memory in striped arrangement + ExchangeValues(values, ranks, is_keys_only, Int2Type()); + + // Quit + break; + } + + // Exchange keys through shared memory in blocked arrangement + BlockExchangeKeys(temp_storage.exchange_keys).ScatterToBlocked(keys, ranks); + + // Exchange values through shared memory in blocked arrangement + ExchangeValues(values, ranks, is_keys_only, Int2Type()); + + __syncthreads(); + } + +// Untwiddle bits if necessary +# pragma unroll + for (int KEY = 0; KEY < ITEMS_PER_THREAD; KEY++) + { + unsigned_keys[KEY] = bit_ordered_conversion::from_bit_ordered(decomposer, unsigned_keys[KEY]); + } + } + +#endif // _CCCL_DOXYGEN_INVOKED + + /// @smemstorage{BlockRadixSort} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRadixSort() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Collective constructor using the specified memory allocation as temporary storage. + * + * @param[in] temp_storage + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRadixSort(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Sorting (blocked arrangements) + //! @{ + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys. + //! + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sort of 512 integer keys that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive keys. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each + //! using BlockRadixSort = cub::BlockRadixSort; + //! + //! // Allocate shared memory for BlockRadixSort + //! __shared__ typename BlockRadixSort::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_keys[4]; + //! ... + //! + //! // Collectively sort the keys + //! BlockRadixSort(temp_storage).Sort(thread_keys); + //! + //! Suppose the set of input ``thread_keys`` across the block of threads is + //! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. + //! The corresponding output ``thread_keys`` in those threads will be + //! ``{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }``. + //! @endrst + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in] begin_bit + //! **[optional]** The beginning (least-significant) bit index needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The past-the-end (most-significant) bit index needed for key comparison + _CCCL_DEVICE _CCCL_FORCEINLINE void + Sort(KeyT (&keys)[ITEMS_PER_THREAD], int begin_bit = 0, int end_bit = sizeof(KeyT) * 8) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); + } + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 2 keys that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 1 key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-bits + //! :end-before: example-end keys-bits + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + Sort(KeyT (&keys)[ITEMS_PER_THREAD], DecomposerT decomposer, int begin_bit, int end_bit) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + } + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 6 keys that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 3 consecutive keys. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys + //! :end-before: example-end keys + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + Sort(KeyT (&keys)[ITEMS_PER_THREAD], DecomposerT decomposer) + { + Sort(keys, decomposer, 0, detail::radix::traits_t::default_end_bit(decomposer)); + } + + //! @rst + //! Performs an ascending block-wide radix sort across a :ref:`blocked arrangement ` + //! of keys and values. + //! + //! - BlockRadixSort can only accommodate one associated tile of values. To "truck along" + //! more than one tile of values, simply perform a key-value sort of the keys paired + //! with a temporary value array that enumerates the key indices. The reordered indices + //! can then be used as a gather-vector for exchanging other associated tile data through + //! shared memory. + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sort of 512 integer keys and values that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive pairs. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each + //! using BlockRadixSort = cub::BlockRadixSort; + //! + //! // Allocate shared memory for BlockRadixSort + //! __shared__ typename BlockRadixSort::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_keys[4]; + //! int thread_values[4]; + //! ... + //! + //! // Collectively sort the keys and values among block threads + //! BlockRadixSort(temp_storage).Sort(thread_keys, thread_values); + //! + //! @endcode + //! @par + //! Suppose the set of input ``thread_keys`` across the block of threads is + //! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. The + //! corresponding output ``thread_keys`` in those threads will be + //! ``{ [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [508,509,510,511] }``. + //! + //! @endrst + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param[in] begin_bit + //! **[optional]** The beginning (least-significant) bit index needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The past-the-end (most-significant) bit index needed for key comparison + _CCCL_DEVICE _CCCL_FORCEINLINE void + Sort(KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8) + { + SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); + } + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys and values. + //! + //! * BlockRadixSort can only accommodate one associated tile of values. To "truck along" + //! more than one tile of values, simply perform a key-value sort of the keys paired + //! with a temporary value array that enumerates the key indices. The reordered indices + //! can then be used as a gather-vector for exchanging other associated tile data through + //! shared memory. + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 2 keys and values that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 1 pair. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-bits + //! :end-before: example-end pairs-bits + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + Sort(KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + DecomposerT decomposer, + int begin_bit, + int end_bit) + { + SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + } + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys and values. + //! + //! * BlockRadixSort can only accommodate one associated tile of values. To "truck along" + //! more than one tile of values, simply perform a key-value sort of the keys paired + //! with a temporary value array that enumerates the key indices. The reordered indices + //! can then be used as a gather-vector for exchanging other associated tile data through + //! shared memory. + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 6 keys and values that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 3 consecutive pairs. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs + //! :end-before: example-end pairs + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + Sort(KeyT (&keys)[ITEMS_PER_THREAD], ValueT (&values)[ITEMS_PER_THREAD], DecomposerT decomposer) + { + Sort(keys, values, decomposer, 0, detail::radix::traits_t::default_end_bit(decomposer)); + } + + //! @rst + //! Performs a descending block-wide radix sort over a :ref:`blocked arrangement ` + //! of keys. + //! + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sort of 512 integer keys that + //! are partitioned in a [blocked arrangement](../index.html#sec5sec3) across 128 threads + //! where each thread owns 4 consecutive keys. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each + //! using BlockRadixSort = cub::BlockRadixSort; + //! + //! // Allocate shared memory for BlockRadixSort + //! __shared__ typename BlockRadixSort::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_keys[4]; + //! ... + //! + //! // Collectively sort the keys + //! BlockRadixSort(temp_storage).Sort(thread_keys); + //! + //! Suppose the set of input ``thread_keys`` across the block of threads is + //! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. + //! The corresponding output ``thread_keys`` in those threads will be + //! ``{ [511,510,509,508], [11,10,9,8], [7,6,5,4], ..., [3,2,1,0] }``. + //! + //! @endrst + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in] begin_bit + //! **[optional]** The beginning (least-significant) bit index needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The past-the-end (most-significant) bit index needed for key comparison + _CCCL_DEVICE _CCCL_FORCEINLINE void + SortDescending(KeyT (&keys)[ITEMS_PER_THREAD], int begin_bit = 0, int end_bit = sizeof(KeyT) * 8) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); + } + + //! @rst + //! Performs a descending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 2 keys that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 1 key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-descending-bits + //! :end-before: example-end keys-descending-bits + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortDescending(KeyT (&keys)[ITEMS_PER_THREAD], DecomposerT decomposer, int begin_bit, int end_bit) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + } + + //! @rst + //! Performs a descending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 6 keys that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 3 consecutive keys. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-descending + //! :end-before: example-end keys-descending + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortDescending(KeyT (&keys)[ITEMS_PER_THREAD], DecomposerT decomposer) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlocked( + keys, + values, + 0, + detail::radix::traits_t::default_end_bit(decomposer), + Int2Type(), + Int2Type(), + decomposer); + } + + //! @rst + //! Performs a descending block-wide radix sort across a :ref:`blocked arrangement ` + //! of keys and values. + //! + //! - BlockRadixSort can only accommodate one associated tile of values. To "truck along" + //! more than one tile of values, simply perform a key-value sort of the keys paired + //! with a temporary value array that enumerates the key indices. The reordered indices + //! can then be used as a gather-vector for exchanging other associated tile data through + //! shared memory. + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sort of 512 integer keys and values that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive pairs. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each + //! using BlockRadixSort = cub::BlockRadixSort; + //! + //! // Allocate shared memory for BlockRadixSort + //! __shared__ typename BlockRadixSort::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_keys[4]; + //! int thread_values[4]; + //! ... + //! + //! // Collectively sort the keys and values among block threads + //! BlockRadixSort(temp_storage).Sort(thread_keys, thread_values); + //! + //! Suppose the set of input ``thread_keys`` across the block of threads is + //! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. The + //! corresponding output ``thread_keys`` in those threads will be + //! ``{ [511,510,509,508], [11,10,9,8], [7,6,5,4], ..., [3,2,1,0] }``. + //! + //! @endrst + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param[in] begin_bit + //! **[optional]** The beginning (least-significant) bit index needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The past-the-end (most-significant) bit index needed for key comparison + _CCCL_DEVICE _CCCL_FORCEINLINE void SortDescending( + KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8) + { + SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); + } + + //! @rst + //! Performs a descending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys and values. + //! + //! * BlockRadixSort can only accommodate one associated tile of values. To "truck along" + //! more than one tile of values, simply perform a key-value sort of the keys paired + //! with a temporary value array that enumerates the key indices. The reordered indices + //! can then be used as a gather-vector for exchanging other associated tile data through + //! shared memory. + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 2 pairs that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 1 pair. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-descending-bits + //! :end-before: example-end pairs-descending-bits + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortDescending(KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + DecomposerT decomposer, + int begin_bit, + int end_bit) + { + SortBlocked(keys, values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + } + + //! @rst + //! Performs a descending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys and values. + //! + //! * BlockRadixSort can only accommodate one associated tile of values. To "truck along" + //! more than one tile of values, simply perform a key-value sort of the keys paired + //! with a temporary value array that enumerates the key indices. The reordered indices + //! can then be used as a gather-vector for exchanging other associated tile data through + //! shared memory. + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 6 keys and values that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 3 consecutive pairs. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-descending + //! :end-before: example-end pairs-descending + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortDescending(KeyT (&keys)[ITEMS_PER_THREAD], ValueT (&values)[ITEMS_PER_THREAD], DecomposerT decomposer) + { + SortBlocked( + keys, + values, + 0, + detail::radix::traits_t::default_end_bit(decomposer), + Int2Type(), + Int2Type(), + decomposer); + } + + //! @} end member group + //! @name Sorting (blocked arrangement -> striped arrangement) + //! @{ + + //! @rst + //! Performs an ascending radix sort across a :ref:`blocked arrangement ` of keys, + //! leaving them in a :ref:`striped arrangement `. + //! + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sort of 512 integer keys that + //! are initially partitioned in a :ref:`blocked arrangement ` across 128 + //! threads where each thread owns 4 consecutive keys. The final partitioning is striped. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each + //! using BlockRadixSort = cub::BlockRadixSort; + //! + //! // Allocate shared memory for BlockRadixSort + //! __shared__ typename BlockRadixSort::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_keys[4]; + //! ... + //! + //! // Collectively sort the keys + //! BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys); + //! + //! Suppose the set of input ``thread_keys`` across the block of threads is + //! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. + //! The corresponding output ``thread_keys`` in those threads will be + //! ``{ [0,128,256,384], [1,129,257,385], [2,130,258,386], ..., [127,255,383,511] }``. + //! + //! @endrst + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in] begin_bit + //! **[optional]** The beginning (least-significant) bit index needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The past-the-end (most-significant) bit index needed for key comparison + _CCCL_DEVICE _CCCL_FORCEINLINE void + SortBlockedToStriped(KeyT (&keys)[ITEMS_PER_THREAD], int begin_bit = 0, int end_bit = sizeof(KeyT) * 8) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); + } + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys, leaving them in a + //! :ref:`striped arrangement `. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 4 keys that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 2 consecutive keys. The final partitioning is striped. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-striped-bits + //! :end-before: example-end keys-striped-bits + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortBlockedToStriped(KeyT (&keys)[ITEMS_PER_THREAD], DecomposerT decomposer, int begin_bit, int end_bit) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + } + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys, leaving them in a + //! :ref:`striped arrangement `. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 6 keys that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 3 consecutive keys. The final partitioning is striped. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-striped + //! :end-before: example-end keys-striped + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortBlockedToStriped(KeyT (&keys)[ITEMS_PER_THREAD], DecomposerT decomposer) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlockedToStriped( + keys, + values, + 0, + detail::radix::traits_t::default_end_bit(decomposer), + Int2Type(), + Int2Type(), + decomposer); + } + + //! @rst + //! Performs an ascending radix sort across a :ref:`blocked arrangement ` of keys and + //! values, leaving them in a :ref:`striped arrangement `. + //! + //! - BlockRadixSort can only accommodate one associated tile of values. To "truck along" + //! more than one tile of values, simply perform a key-value sort of the keys paired + //! with a temporary value array that enumerates the key indices. The reordered indices + //! can then be used as a gather-vector for exchanging other associated tile data through + //! shared memory. + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sort of 512 integer keys and values that + //! are initially partitioned in a [blocked arrangement](../index.html#sec5sec3) across 128 + //! threads where each thread owns 4 consecutive pairs. The final partitioning is striped. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each + //! using BlockRadixSort = cub::BlockRadixSort; + //! + //! // Allocate shared memory for BlockRadixSort + //! __shared__ typename BlockRadixSort::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_keys[4]; + //! int thread_values[4]; + //! ... + //! + //! // Collectively sort the keys and values among block threads + //! BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys, thread_values); + //! + //! Suppose the set of input ``thread_keys`` across the block of threads is + //! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. + //! The corresponding output ``thread_keys`` in those threads will be + //! ``{ [0,128,256,384], [1,129,257,385], [2,130,258,386], ..., [127,255,383,511] }``. + //! + //! @endrst + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param[in] begin_bit + //! **[optional]** The beginning (least-significant) bit index needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The past-the-end (most-significant) bit index needed for key comparison + _CCCL_DEVICE _CCCL_FORCEINLINE void SortBlockedToStriped( + KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8) + { + SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); + } + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys and values, leaving them in a + //! :ref:`striped arrangement `. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 4 pairs that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 2 consecutive pairs. The final partitioning is striped. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-striped-bits + //! :end-before: example-end pairs-striped-bits + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortBlockedToStriped(KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + DecomposerT decomposer, + int begin_bit, + int end_bit) + { + SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + } + + //! @rst + //! Performs an ascending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys and values, leaving them in a + //! :ref:`striped arrangement `. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 6 pairs that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 3 consecutive pairs. The final partitioning is striped. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-striped + //! :end-before: example-end pairs-striped + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortBlockedToStriped(KeyT (&keys)[ITEMS_PER_THREAD], ValueT (&values)[ITEMS_PER_THREAD], DecomposerT decomposer) + { + SortBlockedToStriped( + keys, + values, + 0, + detail::radix::traits_t::default_end_bit(decomposer), + Int2Type(), + Int2Type(), + decomposer); + } + + //! @rst + //! Performs a descending radix sort across a :ref:`blocked arrangement ` + //! of keys, leaving them in a :ref:`striped arrangement `. + //! + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sort of 512 integer keys that + //! are initially partitioned in a :ref:`blocked arrangement ` across 128 + //! threads where each thread owns 4 consecutive keys. The final partitioning is striped. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys each + //! using BlockRadixSort = cub::BlockRadixSort; + //! + //! // Allocate shared memory for BlockRadixSort + //! __shared__ typename BlockRadixSort::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_keys[4]; + //! ... + //! + //! // Collectively sort the keys + //! BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys); + //! + //! Suppose the set of input ``thread_keys`` across the block of threads is + //! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. + //! The corresponding output ``thread_keys`` in those threads will be + //! ``{ [511,383,255,127], [386,258,130,2], [385,257,128,1], ..., [384,256,128,0] }``. + //! + //! @endrst + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in] begin_bit + //! **[optional]** The beginning (least-significant) bit index needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The past-the-end (most-significant) bit index needed for key comparison + _CCCL_DEVICE _CCCL_FORCEINLINE void + SortDescendingBlockedToStriped(KeyT (&keys)[ITEMS_PER_THREAD], int begin_bit = 0, int end_bit = sizeof(KeyT) * 8) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); + } + + //! @rst + //! Performs a descending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys, leaving them in a + //! :ref:`striped arrangement `. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 4 keys that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 2 consecutive keys. The final partitioning is striped. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-striped-descending-bits + //! :end-before: example-end keys-striped-descending-bits + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortDescendingBlockedToStriped(KeyT (&keys)[ITEMS_PER_THREAD], DecomposerT decomposer, int begin_bit, int end_bit) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + } + + //! @rst + //! Performs a descending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys, leaving them in a + //! :ref:`striped arrangement `. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 6 keys that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 3 consecutive keys. The final partitioning is striped. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin keys-striped-descending + //! :end-before: example-end keys-striped-descending + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortDescendingBlockedToStriped(KeyT (&keys)[ITEMS_PER_THREAD], DecomposerT decomposer) + { + NullType values[ITEMS_PER_THREAD]; + + SortBlockedToStriped( + keys, + values, + 0, + detail::radix::traits_t::default_end_bit(decomposer), + Int2Type(), + Int2Type(), + decomposer); + } + + //! @rst + //! Performs a descending radix sort across a :ref:`blocked arrangement ` + //! of keys and values, leaving them in a :ref:`striped arrangement ` + //! + //! - BlockRadixSort can only accommodate one associated tile of values. To "truck along" + //! more than one tile of values, simply perform a key-value sort of the keys paired + //! with a temporary value array that enumerates the key indices. The reordered indices + //! can then be used as a gather-vector for exchanging other associated tile data through + //! shared memory. + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sort of 512 integer keys and values that + //! are initially partitioned in a :ref:`blocked arrangement ` across 128 + //! threads where each thread owns 4 consecutive pairs. The final partitioning is striped. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer keys and values each + //! using BlockRadixSort = cub::BlockRadixSort; + //! + //! // Allocate shared memory for BlockRadixSort + //! __shared__ typename BlockRadixSort::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_keys[4]; + //! int thread_values[4]; + //! ... + //! + //! // Collectively sort the keys and values among block threads + //! BlockRadixSort(temp_storage).SortBlockedToStriped(thread_keys, thread_values); + //! + //! Suppose the set of input ``thread_keys`` across the block of threads is + //! ``{ [0,511,1,510], [2,509,3,508], [4,507,5,506], ..., [254,257,255,256] }``. + //! The corresponding output ``thread_keys`` in those threads will be + //! ``{ [511,383,255,127], [386,258,130,2], [385,257,128,1], ..., [384,256,128,0] }``. + //! + //! @endrst + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param[in] begin_bit + //! **[optional]** The beginning (least-significant) bit index needed for key comparison + //! + //! @param[in] end_bit + //! **[optional]** The past-the-end (most-significant) bit index needed for key comparison + _CCCL_DEVICE _CCCL_FORCEINLINE void SortDescendingBlockedToStriped( + KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + int begin_bit = 0, + int end_bit = sizeof(KeyT) * 8) + { + SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type()); + } + + //! @rst + //! Performs a descending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys and values, leaving them in a + //! :ref:`striped arrangement `. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 4 keys and values that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 2 consecutive pairs. The final partitioning is striped. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-striped-descending-bits + //! :end-before: example-end pairs-striped-descending-bits + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + //! + //! @param[in] begin_bit + //! The least-significant bit index (inclusive) needed for + //! key comparison + //! + //! @param[in] end_bit + //! The most-significant bit index (exclusive) needed for key + //! comparison (e.g., `(sizeof(float) + sizeof(long long int)) * 8`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortDescendingBlockedToStriped( + KeyT (&keys)[ITEMS_PER_THREAD], + ValueT (&values)[ITEMS_PER_THREAD], + DecomposerT decomposer, + int begin_bit, + int end_bit) + { + SortBlockedToStriped(keys, values, begin_bit, end_bit, Int2Type(), Int2Type(), decomposer); + } + + //! @rst + //! Performs a descending block-wide radix sort over a + //! :ref:`blocked arrangement ` of keys and values, leaving them in a + //! :ref:`striped arrangement `. + //! + //! * @granularity + //! * @smemreuse + //! + //! Snippet + //! ========================================================================== + //! + //! Let's consider a user-defined ``custom_t`` type below. To sort an array of + //! ``custom_t`` objects, we have to tell CUB about relevant members of the + //! ``custom_t`` type. We do this by providing a decomposer that returns a + //! tuple of references to relevant members of the key. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin custom-type + //! :end-before: example-end custom-type + //! + //! The code snippet below illustrates a sort of 6 keys and values that + //! are partitioned in a :ref:`blocked arrangement ` across 2 threads + //! where each thread owns 3 consecutive pairs. The final partitioning is striped. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_radix_sort_custom.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin pairs-striped-descending + //! :end-before: example-end pairs-striped-descending + //! + //! @endrst + //! + //! @tparam DecomposerT + //! **[inferred]** Type of a callable object responsible for decomposing a + //! ``KeyT`` into a tuple of references to its constituent arithmetic types: + //! ``::cuda::std::tuple operator()(KeyT &key)``. + //! The leftmost element of the tuple is considered the most significant. + //! The call operator must not modify members of the key. + //! + //! @param[in,out] keys + //! Keys to sort + //! + //! @param[in,out] values + //! Values to sort + //! + //! @param decomposer + //! Callable object responsible for decomposing a ``KeyT`` into a tuple of + //! references to its constituent arithmetic types. The leftmost element of + //! the tuple is considered the most significant. The call operator must not + //! modify members of the key. + template + _CCCL_DEVICE _CCCL_FORCEINLINE // + typename ::cuda::std::enable_if< // + !::cuda::std::is_convertible::value>::type + SortDescendingBlockedToStriped( + KeyT (&keys)[ITEMS_PER_THREAD], ValueT (&values)[ITEMS_PER_THREAD], DecomposerT decomposer) + { + SortBlockedToStriped( + keys, + values, + 0, + detail::radix::traits_t::default_end_bit(decomposer), + Int2Type(), + Int2Type(), + decomposer); + } + + //@} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_raking_layout.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_raking_layout.cuh new file mode 100644 index 0000000000000000000000000000000000000000..4d675b626b8fc9252d432e999533532a966926f0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_raking_layout.cuh @@ -0,0 +1,152 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::BlockRakingLayout provides a conflict-free shared memory layout abstraction for warp-raking + * across thread block data. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! BlockRakingLayout provides a conflict-free shared memory layout abstraction for 1D raking across thread block data. +//! +//! Overview +//! ++++++++++++++++++++++++++ +//! +//! This type facilitates a shared memory usage pattern where a block of CUDA +//! threads places elements into shared memory and then reduces the active +//! parallelism to one "raking" warp of threads for serially aggregating consecutive +//! sequences of shared items. Padding is inserted to eliminate bank conflicts +//! (for most data types). +//! +//! @endrst +//! +//! @tparam T +//! The data type to be exchanged. +//! +//! @tparam BLOCK_THREADS +//! The thread block size in threads. +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused. +template +struct BlockRakingLayout +{ + //--------------------------------------------------------------------- + // Constants and type definitions + //--------------------------------------------------------------------- + + enum + { + /// The total number of elements that need to be cooperatively reduced + SHARED_ELEMENTS = BLOCK_THREADS, + + /// Maximum number of warp-synchronous raking threads + MAX_RAKING_THREADS = CUB_MIN(BLOCK_THREADS, CUB_WARP_THREADS(0)), + + /// Number of raking elements per warp-synchronous raking thread (rounded up) + SEGMENT_LENGTH = (SHARED_ELEMENTS + MAX_RAKING_THREADS - 1) / MAX_RAKING_THREADS, + + /// Never use a raking thread that will have no valid data (e.g., when BLOCK_THREADS is 62 and SEGMENT_LENGTH is 2, + /// we should only use 31 raking threads) + RAKING_THREADS = (SHARED_ELEMENTS + SEGMENT_LENGTH - 1) / SEGMENT_LENGTH, + + /// Whether we will have bank conflicts (technically we should find out if the GCD is > 1) + HAS_CONFLICTS = (CUB_SMEM_BANKS(0) % SEGMENT_LENGTH == 0), + + /// Degree of bank conflicts (e.g., 4-way) + CONFLICT_DEGREE = (HAS_CONFLICTS) ? (MAX_RAKING_THREADS * SEGMENT_LENGTH) / CUB_SMEM_BANKS(0) : 1, + + /// Pad each segment length with one element if segment length is not relatively prime to warp size and can't be + /// optimized as a vector load + USE_SEGMENT_PADDING = ((SEGMENT_LENGTH & 1) == 0) && (SEGMENT_LENGTH > 2), + + /// Total number of elements in the raking grid + GRID_ELEMENTS = RAKING_THREADS * (SEGMENT_LENGTH + USE_SEGMENT_PADDING), + + /// Whether or not we need bounds checking during raking (the number of reduction elements is not a multiple of the + /// number of raking threads) + UNGUARDED = (SHARED_ELEMENTS % RAKING_THREADS == 0), + }; + + /** + * @brief Shared memory storage type + */ + struct __align__(16) _TempStorage + { + T buff[BlockRakingLayout::GRID_ELEMENTS]; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + /** + * @brief Returns the location for the calling thread to place data into the grid + */ + static _CCCL_DEVICE _CCCL_FORCEINLINE T* PlacementPtr(TempStorage& temp_storage, unsigned int linear_tid) + { + // Offset for partial + unsigned int offset = linear_tid; + + // Add in one padding element for every segment + if (USE_SEGMENT_PADDING > 0) + { + offset += offset / SEGMENT_LENGTH; + } + + // Incorporating a block of padding partials every shared memory segment + return temp_storage.Alias().buff + offset; + } + + /** + * @brief Returns the location for the calling thread to begin sequential raking + */ + static _CCCL_DEVICE _CCCL_FORCEINLINE T* RakingPtr(TempStorage& temp_storage, unsigned int linear_tid) + { + return temp_storage.Alias().buff + (linear_tid * (SEGMENT_LENGTH + USE_SEGMENT_PADDING)); + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_reduce.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_reduce.cuh new file mode 100644 index 0000000000000000000000000000000000000000..6cf578963fc2fcec0ea1e8957d23201009284a09 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_reduce.cuh @@ -0,0 +1,626 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! The cub::BlockReduce class provides :ref:`collective ` methods for computing a parallel +//! reduction of items partitioned across a CUDA thread block. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Algorithmic variants + ******************************************************************************/ + +//! BlockReduceAlgorithm enumerates alternative algorithms for parallel reduction across a CUDA thread block. +enum BlockReduceAlgorithm +{ + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! An efficient "raking" reduction algorithm that only supports commutative + //! reduction operators (true for most operations, e.g., addition). + //! + //! Execution is comprised of three phases: + //! #. Upsweep sequential reduction in registers (if threads contribute more + //! than one input each). Threads in warps other than the first warp place + //! their partial reductions into shared memory. + //! #. Upsweep sequential reduction in shared memory. Threads within the first + //! warp continue to accumulate by raking across segments of shared partial reductions + //! #. A warp-synchronous Kogge-Stone style reduction within the raking warp. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - This variant performs less communication than BLOCK_REDUCE_RAKING_NON_COMMUTATIVE + //! and is preferable when the reduction operator is commutative. This variant + //! applies fewer reduction operators than BLOCK_REDUCE_WARP_REDUCTIONS, and can provide higher overall + //! throughput across the GPU when suitably occupied. However, turn-around latency may be + //! higher than to BLOCK_REDUCE_WARP_REDUCTIONS and thus less-desirable + //! when the GPU is under-occupied. + //! + //! @endrst + BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! An efficient "raking" reduction algorithm that supports commutative + //! (e.g., addition) and non-commutative (e.g., string concatenation) reduction + //! operators. @blocked. + //! + //! Execution is comprised of three phases: + //! #. Upsweep sequential reduction in registers (if threads contribute more + //! than one input each). Each thread then places the partial reduction + //! of its item(s) into shared memory. + //! #. Upsweep sequential reduction in shared memory. Threads within a + //! single warp rake across segments of shared partial reductions. + //! #. A warp-synchronous Kogge-Stone style reduction within the raking warp. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - This variant performs more communication than BLOCK_REDUCE_RAKING + //! and is only preferable when the reduction operator is non-commutative. This variant + //! applies fewer reduction operators than BLOCK_REDUCE_WARP_REDUCTIONS, and can provide higher overall + //! throughput across the GPU when suitably occupied. However, turn-around latency may be + //! higher than to BLOCK_REDUCE_WARP_REDUCTIONS and thus less-desirable + //! when the GPU is under-occupied. + //! + //! @endrst + BLOCK_REDUCE_RAKING, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A quick "tiled warp-reductions" reduction algorithm that supports commutative + //! (e.g., addition) and non-commutative (e.g., string concatenation) reduction + //! operators. + //! + //! Execution is comprised of four phases: + //! #. Upsweep sequential reduction in registers (if threads contribute more + //! than one input each). Each thread then places the partial reduction + //! of its item(s) into shared memory. + //! #. Compute a shallow, but inefficient warp-synchronous Kogge-Stone style + //! reduction within each warp. + //! #. A propagation phase where the warp reduction outputs in each warp are + //! updated with the aggregate from each preceding warp. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - This variant applies more reduction operators than BLOCK_REDUCE_RAKING + //! or BLOCK_REDUCE_RAKING_NON_COMMUTATIVE, which may result in lower overall + //! throughput across the GPU. However turn-around latency may be lower and + //! thus useful when the GPU is under-occupied. + //! + //! @endrst + BLOCK_REDUCE_WARP_REDUCTIONS, +}; + +//! @rst +//! The BlockReduce class provides :ref:`collective ` methods for computing a parallel reduction +//! of items partitioned across a CUDA thread block. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - A `reduction `_ (or *fold*) uses a binary combining +//! operator to compute a single aggregate from a list of input elements. +//! - @rowmajor +//! - BlockReduce can be optionally specialized by algorithm to accommodate different latency/throughput +//! workload profiles: +//! +//! #. :cpp:enumerator:`cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY`: +//! An efficient "raking" reduction algorithm that only supports commutative reduction operators. +//! #. :cpp:enumerator:`cub::BLOCK_REDUCE_RAKING`: +//! An efficient "raking" reduction algorithm that supports commutative and non-commutative reduction operators. +//! #. :cpp:enumerator:`cub::BLOCK_REDUCE_WARP_REDUCTIONS`: +//! A quick "tiled warp-reductions" reduction algorithm that supports commutative and non-commutative +//! reduction operators. +//! +//! Performance Considerations +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - @granularity +//! - Very efficient (only one synchronization barrier). +//! - Incurs zero bank conflicts for most types +//! - Computation is slightly more efficient (i.e., having lower instruction overhead) for: +//! - Summation (vs. generic reduction) +//! - ``BLOCK_THREADS`` is a multiple of the architecture's warp size +//! - Every thread has a valid input (i.e., full vs. partial-tiles) +//! - See cub::BlockReduceAlgorithm for performance details regarding algorithmic alternatives +//! +//! A Simple Example +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @blockcollective{BlockReduce} +//! +//! The code snippet below illustrates a sum reduction of 512 integer items that +//! are partitioned in a :ref:`blocked arrangement ` across 128 threads +//! where each thread owns 4 consecutive items. +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize BlockReduce for a 1D block of 128 threads of type int +//! using BlockReduce = cub::BlockReduce; +//! +//! // Allocate shared memory for BlockReduce +//! __shared__ typename BlockReduce::TempStorage temp_storage; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int thread_data[4]; +//! ... +//! +//! // Compute the block-wide sum for thread0 +//! int aggregate = BlockReduce(temp_storage).Sum(thread_data); +//! +//! Re-using dynamically allocating shared memory +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of dynamically shared memory with +//! BlockReduce and how to re-purpose the same memory region. +//! +//! @endrst +//! +//! @tparam T +//! Data type being reduced +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam ALGORITHM +//! **[optional]** cub::BlockReduceAlgorithm enumerator specifying the underlying algorithm to use +//! (default: cub::BLOCK_REDUCE_WARP_REDUCTIONS) +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused. +template +class BlockReduce +{ +private: + /// Constants + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + using WarpReductions = detail::BlockReduceWarpReductions; + using RakingCommutativeOnly = detail::BlockReduceRakingCommutativeOnly; + using Raking = detail::BlockReduceRaking; + + /// Internal specialization type + using InternalBlockReduce = + ::cuda::std::_If>; // BlockReduceRaking + + /// Shared memory storage layout type for BlockReduce + using _TempStorage = typename InternalBlockReduce::TempStorage; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + +public: + /// @smemstorage{BlockReduce} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE BlockReduce() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Collective constructor using the specified memory allocation as temporary storage. + * + * @param[in] temp_storage + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockReduce(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Generic reductions + //! @{ + + //! @rst + //! Computes a block-wide reduction for thread\ :sub:`0` using the specified binary reduction functor. + //! Each thread contributes one input element. + //! + //! - The return value is undefined in threads other than thread\ :sub:`0`. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a max reduction of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockReduce for a 1D block of 128 threads of type int + //! using BlockReduce = cub::BlockReduce; + //! + //! // Allocate shared memory for BlockReduce + //! __shared__ typename BlockReduce::TempStorage temp_storage; + //! + //! // Each thread obtains an input item + //! int thread_data; + //! ... + //! + //! // Compute the block-wide max for thread0 + //! int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cuda::maximum<>{}); + //! + //! @endrst + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] reduction_op + //! Binary reduction functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp reduction_op) + { + return InternalBlockReduce(temp_storage).template Reduce(input, BLOCK_THREADS, reduction_op); + } + + //! @rst + //! Computes a block-wide reduction for thread\ :sub:`0` using the specified binary reduction functor. + //! Each thread contributes an array of consecutive input elements. + //! + //! - The return value is undefined in threads other than thread\ :sub:`0`. + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a max reduction of 512 integer items that are partitioned in a + //! :ref:`blocked arrangement ` across 128 threads where each thread owns + //! 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockReduce for a 1D block of 128 threads of type int + //! using BlockReduce = cub::BlockReduce; + //! + //! // Allocate shared memory for BlockReduce + //! __shared__ typename BlockReduce::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Compute the block-wide max for thread0 + //! int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cuda::maximum<>{}); + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] inputs + //! Calling thread's input segment + //! + //! @param[in] reduction_op + //! Binary reduction functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T (&inputs)[ITEMS_PER_THREAD], ReductionOp reduction_op) + { + // Reduce partials + T partial = cub::ThreadReduce(inputs, reduction_op); + return Reduce(partial, reduction_op); + } + + //! @rst + //! Computes a block-wide reduction for thread\ :sub:`0` using the specified binary reduction functor. + //! The first ``num_valid`` threads each contribute one input element. + //! + //! - The return value is undefined in threads other than thread0. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a max reduction of a partially-full tile of integer items + //! that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int num_valid, ...) + //! { + //! // Specialize BlockReduce for a 1D block of 128 threads of type int + //! using BlockReduce = cub::BlockReduce; + //! + //! // Allocate shared memory for BlockReduce + //! __shared__ typename BlockReduce::TempStorage temp_storage; + //! + //! // Each thread obtains an input item + //! int thread_data; + //! if (threadIdx.x < num_valid) thread_data = ... + //! + //! // Compute the block-wide max for thread0 + //! int aggregate = BlockReduce(temp_storage).Reduce(thread_data, cuda::maximum<>{}, num_valid); + //! + //! @endrst + //! + //! @tparam ReductionOp + //! **[inferred]** Binary reduction functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] reduction_op + //! Binary reduction functor + //! + //! @param[in] num_valid + //! Number of threads containing valid elements (may be less than BLOCK_THREADS) + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, ReductionOp reduction_op, int num_valid) + { + // Determine if we skip bounds checking + if (num_valid >= BLOCK_THREADS) + { + return InternalBlockReduce(temp_storage).template Reduce(input, num_valid, reduction_op); + } + else + { + return InternalBlockReduce(temp_storage).template Reduce(input, num_valid, reduction_op); + } + } + + //! @} end member group + //! @name Summation reductions + //! @{ + + //! @rst + //! Computes a block-wide reduction for thread\ :sub:`0` using addition (+) as the reduction operator. + //! Each thread contributes one input element. + //! + //! - The return value is undefined in threads other than thread\ :sub:`0`. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sum reduction of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockReduce for a 1D block of 128 threads of type int + //! using BlockReduce = cub::BlockReduce; + //! + //! // Allocate shared memory for BlockReduce + //! __shared__ typename BlockReduce::TempStorage temp_storage; + //! + //! // Each thread obtains an input item + //! int thread_data; + //! ... + //! + //! // Compute the block-wide sum for thread0 + //! int aggregate = BlockReduce(temp_storage).Sum(thread_data); + //! + //! @endrst + //! + //! @param[in] input + //! Calling thread's input + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input) + { + return InternalBlockReduce(temp_storage).template Sum(input, BLOCK_THREADS); + } + + //! @rst + //! Computes a block-wide reduction for thread0 using addition (+) as the reduction operator. + //! Each thread contributes an array of consecutive input elements. + //! + //! - The return value is undefined in threads other than thread\ :sub:`0`. + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sum reduction of 512 integer items that are partitioned in a + //! :ref:`blocked arrangement ` across 128 threads where each thread owns + //! 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockReduce for a 1D block of 128 threads of type int + //! using BlockReduce = cub::BlockReduce; + //! + //! // Allocate shared memory for BlockReduce + //! __shared__ typename BlockReduce::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Compute the block-wide sum for thread0 + //! int aggregate = BlockReduce(temp_storage).Sum(thread_data); + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @param[in] inputs + //! Calling thread's input segment + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T (&inputs)[ITEMS_PER_THREAD]) + { + // Reduce partials + T partial = cub::ThreadReduce(inputs, ::cuda::std::plus<>{}); + return Sum(partial); + } + + //! @rst + //! Computes a block-wide reduction for thread\ :sub:`0` using addition (+) as the reduction operator. + //! The first ``num_valid`` threads each contribute one input element. + //! + //! - The return value is undefined in threads other than thread\ :sub:`0`. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a sum reduction of a partially-full tile of integer items + //! that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int num_valid, ...) + //! { + //! // Specialize BlockReduce for a 1D block of 128 threads of type int + //! using BlockReduce = cub::BlockReduce; + //! + //! // Allocate shared memory for BlockReduce + //! __shared__ typename BlockReduce::TempStorage temp_storage; + //! + //! // Each thread obtains an input item (up to num_items) + //! int thread_data; + //! if (threadIdx.x < num_valid) + //! thread_data = ... + //! + //! // Compute the block-wide sum for thread0 + //! int aggregate = BlockReduce(temp_storage).Sum(thread_data, num_valid); + //! + //! @endrst + //! + //! @param[in] input + //! Calling thread's input + //! + //! @param[in] num_valid + //! Number of threads containing valid elements (may be less than BLOCK_THREADS) + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input, int num_valid) + { + // Determine if we skip bounds checking + if (num_valid >= BLOCK_THREADS) + { + return InternalBlockReduce(temp_storage).template Sum(input, num_valid); + } + else + { + return InternalBlockReduce(temp_storage).template Sum(input, num_valid); + } + } + + //! @} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_run_length_decode.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_run_length_decode.cuh new file mode 100644 index 0000000000000000000000000000000000000000..467d9141dc3aba693b8cae66fe1d646064a17e37 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_run_length_decode.cuh @@ -0,0 +1,435 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! The BlockRunLengthDecode class supports decoding a run-length encoded array of items. That +//! is, given the two arrays ``run_value[N]`` and ``run_lengths[N]``, ``run_value[i]`` is repeated ``run_lengths[i]`` +//! many times in the output array. Due to the nature of the run-length decoding algorithm +//! ("decompression"), the output size of the run-length decoded array is runtime-dependent and +//! potentially without any upper bound. To address this, BlockRunLengthDecode allows retrieving a +//! "window" from the run-length decoded array. The window's offset can be specified and +//! BLOCK_THREADS * DECODED_ITEMS_PER_THREAD (i.e., referred to as window_size) decoded items from +//! the specified window will be returned. +//! +//! .. note:: +//! +//! Trailing runs of length 0 are supported (i.e., they may only appear at the end of the run_lengths array). +//! A run of length zero may not be followed by a run length that is not zero. +//! +//! .. code-block:: c++ +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialising BlockRunLengthDecode to run-length decode items of type uint64_t +//! using RunItemT = uint64_t; +//! // Type large enough to index into the run-length decoded array +//! using RunLengthT = uint32_t; +//! +//! // Specialising BlockRunLengthDecode for a 1D block of 128 threads +//! constexpr int BLOCK_DIM_X = 128; +//! // Specialising BlockRunLengthDecode to have each thread contribute 2 run-length encoded runs +//! constexpr int RUNS_PER_THREAD = 2; +//! // Specialising BlockRunLengthDecode to have each thread hold 4 run-length decoded items +//! constexpr int DECODED_ITEMS_PER_THREAD = 4; +//! +//! // Specialize BlockRadixSort for a 1D block of 128 threads owning 4 integer items each +//! using BlockRunLengthDecodeT = +//! cub::BlockRunLengthDecode; +//! +//! // Allocate shared memory for BlockRunLengthDecode +//! __shared__ typename BlockRunLengthDecodeT::TempStorage temp_storage; +//! +//! // The run-length encoded items and how often they shall be repeated in the run-length decoded output +//! RunItemT run_values[RUNS_PER_THREAD]; +//! RunLengthT run_lengths[RUNS_PER_THREAD]; +//! ... +//! +//! // Initialize the BlockRunLengthDecode with the runs that we want to run-length decode +//! uint32_t total_decoded_size = 0; +//! BlockRunLengthDecodeT block_rld(temp_storage, run_values, run_lengths, total_decoded_size); +//! +//! // Run-length decode ("decompress") the runs into a window buffer of limited size. This is repeated until all +//! runs +//! // have been decoded. +//! uint32_t decoded_window_offset = 0U; +//! while (decoded_window_offset < total_decoded_size) +//! { +//! RunLengthT relative_offsets[DECODED_ITEMS_PER_THREAD]; +//! RunItemT decoded_items[DECODED_ITEMS_PER_THREAD]; +//! +//! // The number of decoded items that are valid within this window (aka pass) of run-length decoding +//! uint32_t num_valid_items = total_decoded_size - decoded_window_offset; +//! block_rld.RunLengthDecode(decoded_items, relative_offsets, decoded_window_offset); +//! +//! decoded_window_offset += BLOCK_DIM_X * DECODED_ITEMS_PER_THREAD; +//! +//! ... +//! } +//! } +//! +//! Suppose the set of input ``run_values`` across the block of threads is +//! ``{ [0, 1], [2, 3], [4, 5], [6, 7], ..., [254, 255] }`` and +//! ``run_lengths`` is ``{ [1, 2], [3, 4], [5, 1], [2, 3], ..., [5, 1] }``. +//! The corresponding output ``decoded_items`` in those threads will be +//! ``{ [0, 1, 1, 2], [2, 2, 3, 3], [3, 3, 4, 4], [4, 4, 4, 5], ..., [169, 169, 170, 171] }`` +//! and ``relative_offsets`` will be +//! ``{ [0, 0, 1, 0], [1, 2, 0, 1], [2, 3, 0, 1], [2, 3, 4, 0], ..., [3, 4, 0, 0] }`` during the +//! first iteration of the while loop. +//! +//! @endrst +//! +//! @tparam ItemT +//! The data type of the items being run-length decoded +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam RUNS_PER_THREAD +//! The number of consecutive runs that each thread contributes +//! +//! @tparam DECODED_ITEMS_PER_THREAD +//! The maximum number of decoded items that each thread holds +//! +//! @tparam DecodedOffsetT +//! Type used to index into the block's decoded items (large enough to hold the sum over all the +//! runs' lengths) +//! +//! @tparam BLOCK_DIM_Y +//! The thread block length in threads along the Y dimension +//! +//! @tparam BLOCK_DIM_Z +//! The thread block length in threads along the Z dimension +template +class BlockRunLengthDecode +{ + //--------------------------------------------------------------------- + // CONFIGS & TYPE ALIASES + //--------------------------------------------------------------------- + +private: + /// The thread block size in threads + static constexpr int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z; + + /// The number of runs that the block decodes (out-of-bounds items may be padded with run lengths of '0') + static constexpr int BLOCK_RUNS = BLOCK_THREADS * RUNS_PER_THREAD; + + /// BlockScan used to determine the beginning of each run (i.e., prefix sum over the runs' length) + using RunOffsetScanT = BlockScan; + + /// Type used to index into the block's runs + using RunOffsetT = uint32_t; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + /// Shared memory type required by this thread block + union _TempStorage + { + typename RunOffsetScanT::TempStorage offset_scan; + struct + { + ItemT run_values[BLOCK_RUNS]; + DecodedOffsetT run_offsets[BLOCK_RUNS]; + } runs; + }; // union TempStorage +#endif // _CCCL_DOXYGEN_INVOKED + + /// Internal storage allocator (used when the user does not provide pre-allocated shared memory) + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + uint32_t linear_tid; + +public: + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // CONSTRUCTOR + //--------------------------------------------------------------------- + + //! @brief Constructor specialised for user-provided temporary storage, initializing using the runs' lengths. + //! The algorithm's temporary storage may not be repurposed between the constructor call and subsequent + //! `RunLengthDecode` calls. + template + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRunLengthDecode( + TempStorage& temp_storage, + ItemT (&run_values)[RUNS_PER_THREAD], + RunLengthT (&run_lengths)[RUNS_PER_THREAD], + TotalDecodedSizeT& total_decoded_size) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + { + InitWithRunLengths(run_values, run_lengths, total_decoded_size); + } + + //! @brief Constructor specialised for user-provided temporary storage, initializing using the runs' offsets. + //! The algorithm's temporary storage may not be repurposed between the constructor call and subsequent + //! `RunLengthDecode` calls. + template + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRunLengthDecode( + TempStorage& temp_storage, ItemT (&run_values)[RUNS_PER_THREAD], UserRunOffsetT (&run_offsets)[RUNS_PER_THREAD]) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + { + InitWithRunOffsets(run_values, run_offsets); + } + + /** + * \brief Constructor specialised for static temporary storage, initializing using the runs' lengths. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE BlockRunLengthDecode( + ItemT (&run_values)[RUNS_PER_THREAD], + RunLengthT (&run_lengths)[RUNS_PER_THREAD], + TotalDecodedSizeT& total_decoded_size) + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + { + InitWithRunLengths(run_values, run_lengths, total_decoded_size); + } + + /** + * \brief Constructor specialised for static temporary storage, initializing using the runs' offsets. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE + BlockRunLengthDecode(ItemT (&run_values)[RUNS_PER_THREAD], UserRunOffsetT (&run_offsets)[RUNS_PER_THREAD]) + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + { + InitWithRunOffsets(run_values, run_offsets); + } + +private: + /** + * @brief Returns the offset of the first value within @p input which compares greater than + * @p val. This version takes @p MAX_NUM_ITEMS, an upper bound of the array size, which will + * be used to determine the number of binary search iterations at compile time. + * + * @param[in] input + * Input sequence + * + * @param[in] num_items + * Input sequence length + * + * @param[in] val + * Search key + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE OffsetT StaticUpperBound(InputIteratorT input, OffsetT num_items, T val) + { + OffsetT lower_bound = 0; + OffsetT upper_bound = num_items; +#pragma unroll + for (int i = 0; i <= Log2::VALUE; i++) + { + OffsetT mid = cub::MidPoint(lower_bound, upper_bound); + mid = (::cuda::std::min)(mid, num_items - 1); + + if (val < input[mid]) + { + upper_bound = mid; + } + else + { + lower_bound = mid + 1; + } + } + + return lower_bound; + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InitWithRunOffsets(ItemT (&run_values)[RUNS_PER_THREAD], RunOffsetT (&run_offsets)[RUNS_PER_THREAD]) + { + // Keep the runs' items and the offsets of each run's beginning in the temporary storage + RunOffsetT thread_dst_offset = static_cast(linear_tid) * static_cast(RUNS_PER_THREAD); +#pragma unroll + for (int i = 0; i < RUNS_PER_THREAD; i++) + { + temp_storage.runs.run_values[thread_dst_offset] = run_values[i]; + temp_storage.runs.run_offsets[thread_dst_offset] = run_offsets[i]; + thread_dst_offset++; + } + + // Ensure run offsets and run values have been written to shared memory + __syncthreads(); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InitWithRunLengths( + ItemT (&run_values)[RUNS_PER_THREAD], + RunLengthT (&run_lengths)[RUNS_PER_THREAD], + TotalDecodedSizeT& total_decoded_size) + { + // Compute the offset for the beginning of each run + DecodedOffsetT run_offsets[RUNS_PER_THREAD]; +#pragma unroll + for (int i = 0; i < RUNS_PER_THREAD; i++) + { + run_offsets[i] = static_cast(run_lengths[i]); + } + DecodedOffsetT decoded_size_aggregate; + RunOffsetScanT(this->temp_storage.offset_scan).ExclusiveSum(run_offsets, run_offsets, decoded_size_aggregate); + total_decoded_size = static_cast(decoded_size_aggregate); + + // Ensure the prefix scan's temporary storage can be reused (may be superfluous, but depends on scan implementation) + __syncthreads(); + + InitWithRunOffsets(run_values, run_offsets); + } + +public: + /** + * \brief Run-length decodes the runs previously passed via a call to Init(...) and returns the run-length decoded + * items in a blocked arrangement to \p decoded_items. If the number of run-length decoded items exceeds the + * run-length decode buffer (i.e., `DECODED_ITEMS_PER_THREAD * BLOCK_THREADS`), only the items that fit within + * the buffer are returned. Subsequent calls to `RunLengthDecode` adjusting \p from_decoded_offset can be + * used to retrieve the remaining run-length decoded items. Calling __syncthreads() between any two calls to + * `RunLengthDecode` is not required. + * \p item_offsets can be used to retrieve each run-length decoded item's relative index within its run. E.g., the + * run-length encoded array of `3, 1, 4` with the respective run lengths of `2, 1, 3` would yield the run-length + * decoded array of `3, 3, 1, 4, 4, 4` with the relative offsets of `0, 1, 0, 0, 1, 2`. + * \smemreuse + * + * \param[out] decoded_items The run-length decoded items to be returned in a blocked arrangement + * \param[out] item_offsets The run-length decoded items' relative offset within the run they belong to + * \param[in] from_decoded_offset If invoked with from_decoded_offset that is larger than total_decoded_size results + * in undefined behavior. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void RunLengthDecode( + ItemT (&decoded_items)[DECODED_ITEMS_PER_THREAD], + RelativeOffsetT (&item_offsets)[DECODED_ITEMS_PER_THREAD], + DecodedOffsetT from_decoded_offset = 0) + { + // The (global) offset of the first item decoded by this thread + DecodedOffsetT thread_decoded_offset = from_decoded_offset + linear_tid * DECODED_ITEMS_PER_THREAD; + + // The run that the first decoded item of this thread belongs to + // If this thread's is already beyond the total decoded size, it will be assigned to the + // last run + RunOffsetT assigned_run = + StaticUpperBound(temp_storage.runs.run_offsets, BLOCK_RUNS, thread_decoded_offset) + - static_cast(1U); + + DecodedOffsetT assigned_run_begin = temp_storage.runs.run_offsets[assigned_run]; + + // If this thread is getting assigned the last run, we make sure it will not fetch any other run after this + DecodedOffsetT assigned_run_end = + (assigned_run == BLOCK_RUNS - 1) + ? thread_decoded_offset + DECODED_ITEMS_PER_THREAD + : temp_storage.runs.run_offsets[assigned_run + 1]; + + ItemT val = temp_storage.runs.run_values[assigned_run]; + +#pragma unroll + for (DecodedOffsetT i = 0; i < DECODED_ITEMS_PER_THREAD; i++) + { + decoded_items[i] = val; + item_offsets[i] = thread_decoded_offset - assigned_run_begin; + + // A thread only needs to fetch the next run if this was not the last loop iteration + const bool is_final_loop_iteration = (i + 1 >= DECODED_ITEMS_PER_THREAD); + if (!is_final_loop_iteration && (thread_decoded_offset == assigned_run_end - 1)) + { + // We make sure that a thread is not re-entering this conditional when being assigned to the last run already by + // extending the last run's length to all the thread's item + assigned_run++; + assigned_run_begin = temp_storage.runs.run_offsets[assigned_run]; + + // If this thread is getting assigned the last run, we make sure it will not fetch any other run after this + assigned_run_end = (assigned_run == BLOCK_RUNS - 1) + ? thread_decoded_offset + DECODED_ITEMS_PER_THREAD + : temp_storage.runs.run_offsets[assigned_run + 1]; + val = temp_storage.runs.run_values[assigned_run]; + } + thread_decoded_offset++; + } + } + + /** + * \brief Run-length decodes the runs previously passed via a call to Init(...) and returns the run-length decoded + * items in a blocked arrangement to `decoded_items`. If the number of run-length decoded items exceeds the + * run-length decode buffer (i.e., `DECODED_ITEMS_PER_THREAD * BLOCK_THREADS`), only the items that fit within + * the buffer are returned. Subsequent calls to `RunLengthDecode` adjusting `from_decoded_offset` can be + * used to retrieve the remaining run-length decoded items. Calling __syncthreads() between any two calls to + * `RunLengthDecode` is not required. + * + * \param[out] decoded_items The run-length decoded items to be returned in a blocked arrangement + * \param[in] from_decoded_offset If invoked with from_decoded_offset that is larger than total_decoded_size results + * in undefined behavior. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void + RunLengthDecode(ItemT (&decoded_items)[DECODED_ITEMS_PER_THREAD], DecodedOffsetT from_decoded_offset = 0) + { + DecodedOffsetT item_offsets[DECODED_ITEMS_PER_THREAD]; + RunLengthDecode(decoded_items, item_offsets, from_decoded_offset); + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_scan.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_scan.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c25bd2d258d9bc8a93c02064c94a12d21fd663db --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_scan.cuh @@ -0,0 +1,2588 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! The cub::BlockScan class provides :ref:`collective ` methods for computing a parallel prefix +//! sum/scan of items partitioned across a CUDA thread block. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +/****************************************************************************** + * Algorithmic variants + ******************************************************************************/ + +//! @brief BlockScanAlgorithm enumerates alternative algorithms for cub::BlockScan to compute a +//! parallel prefix scan across a CUDA thread block. +enum BlockScanAlgorithm +{ + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! An efficient "raking reduce-then-scan" prefix scan algorithm. Execution is comprised of five phases: + //! + //! #. Upsweep sequential reduction in registers (if threads contribute more than one input each). + //! Each thread then places the partial reduction of its item(s) into shared memory. + //! #. Upsweep sequential reduction in shared memory. + //! Threads within a single warp rake across segments of shared partial reductions. + //! #. A warp-synchronous Kogge-Stone style exclusive scan within the raking warp. + //! #. Downsweep sequential exclusive scan in shared memory. + //! Threads within a single warp rake across segments of shared partial reductions, + //! seeded with the warp-scan output. + //! #. Downsweep sequential scan in registers (if threads contribute more than one input), + //! seeded with the raking scan output. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - Although this variant may suffer longer turnaround latencies when the + //! GPU is under-occupied, it can often provide higher overall throughput + //! across the GPU when suitably occupied. + //! + //! @endrst + BLOCK_SCAN_RAKING, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! Similar to cub::BLOCK_SCAN_RAKING, but with fewer shared memory reads at the expense of higher + //! register pressure. Raking threads preserve their "upsweep" segment of values in registers while performing + //! warp-synchronous scan, allowing the "downsweep" not to re-read them from shared memory. + //! + //! @endrst + BLOCK_SCAN_RAKING_MEMOIZE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A quick "tiled warpscans" prefix scan algorithm. Execution is comprised of four phases: + //! #. Upsweep sequential reduction in registers (if threads contribute more than one input each). + //! Each thread then places the partial reduction of its item(s) into shared memory. + //! #. Compute a shallow, but inefficient warp-synchronous Kogge-Stone style scan within each warp. + //! #. A propagation phase where the warp scan outputs in each warp are updated with the aggregate + //! from each preceding warp. + //! #. Downsweep sequential scan in registers (if threads contribute more than one input), + //! seeded with the raking scan output. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - Although this variant may suffer lower overall throughput across the + //! GPU because due to a heavy reliance on inefficient warpscans, it can + //! often provide lower turnaround latencies when the GPU is under-occupied. + //! + //! @endrst + BLOCK_SCAN_WARP_SCANS, +}; + +//! @rst +//! The BlockScan class provides :ref:`collective ` methods for computing a parallel prefix +//! sum/scan of items partitioned across a CUDA thread block. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - Given a list of input elements and a binary reduction operator, a +//! `prefix scan `_ produces an output list where each element is computed +//! to be the reduction of the elements occurring earlier in the input list. *Prefix sum* connotes a prefix scan with +//! the addition operator. The term *inclusive indicates* that the *i*\ :sup:`th` output reduction incorporates +//! the *i*\ :sup:`th` input. The term *exclusive* indicates the *i*\ :sup:`th` input is not incorporated into +//! the *i*\ :sup:`th` output reduction. +//! - @rowmajor +//! - BlockScan can be optionally specialized by algorithm to accommodate different workload profiles: +//! +//! #. :cpp:enumerator:`cub::BLOCK_SCAN_RAKING`: +//! An efficient (high throughput) "raking reduce-then-scan" prefix scan algorithm. +//! #. :cpp:enumerator:`cub::BLOCK_SCAN_RAKING_MEMOIZE`: +//! Similar to cub::BLOCK_SCAN_RAKING, but having higher throughput at the expense of additional +//! register pressure for intermediate storage. +//! #. :cpp:enumerator:`cub::BLOCK_SCAN_WARP_SCANS`: +//! A quick (low latency) "tiled warpscans" prefix scan algorithm. +//! +//! Performance Considerations +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - @granularity +//! - Uses special instructions when applicable (e.g., warp ``SHFL``) +//! - Uses synchronization-free communication between warp lanes when applicable +//! - Invokes a minimal number of minimal block-wide synchronization barriers (only +//! one or two depending on algorithm selection) +//! - Incurs zero bank conflicts for most types +//! - Computation is slightly more efficient (i.e., having lower instruction overhead) for: +//! +//! - Prefix sum variants (vs. generic scan) +//! - @blocksize +//! +//! - See cub::BlockScanAlgorithm for performance details regarding algorithmic alternatives +//! +//! A Simple Example +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @blockcollective{BlockScan} +//! +//! The code snippet below illustrates an exclusive prefix sum of 512 integer items that +//! are partitioned in a :ref:`blocked arrangement ` across 128 threads +//! where each thread owns 4 consecutive items. +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(...) +//! { +//! // Specialize BlockScan for a 1D block of 128 threads of type int +//! using BlockScan = cub::BlockScan; +//! +//! // Allocate shared memory for BlockScan +//! __shared__ typename BlockScan::TempStorage temp_storage; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int thread_data[4]; +//! ... +//! +//! // Collectively compute the block-wide exclusive prefix sum +//! BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); +//! +//! Suppose the set of input ``thread_data`` across the block of threads is +//! ``{[1,1,1,1], [1,1,1,1], ..., [1,1,1,1]}``. +//! The corresponding output ``thread_data`` in those threads will be +//! ``{[0,1,2,3], [4,5,6,7], ..., [508,509,510,511]}``. +//! +//! Re-using dynamically allocating shared memory +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of dynamically shared memory with +//! BlockReduce and how to re-purpose the same memory region. +//! This example can be easily adapted to the storage required by BlockScan. +//! +//! @endrst +//! +//! @tparam T +//! Data type being scanned +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam ALGORITHM +//! **[optional]** cub::BlockScanAlgorithm enumerator specifying the underlying algorithm to use +//! (default: cub::BLOCK_SCAN_RAKING) +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension +//! (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused. +template +class BlockScan +{ +private: + /// Constants + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + /** + * Ensure the template parameterization meets the requirements of the + * specified algorithm. Currently, the BLOCK_SCAN_WARP_SCANS policy + * cannot be used with thread block sizes not a multiple of the + * architectural warp size. + */ + static constexpr BlockScanAlgorithm SAFE_ALGORITHM = + ((ALGORITHM == BLOCK_SCAN_WARP_SCANS) && (BLOCK_THREADS % CUB_WARP_THREADS(0) != 0)) + ? BLOCK_SCAN_RAKING + : ALGORITHM; + + using WarpScans = detail::BlockScanWarpScans; + using Raking = + detail::BlockScanRaking; + + /// Define the delegate type for the desired algorithm + using InternalBlockScan = ::cuda::std::_If; + + /// Shared memory storage layout type for BlockScan + using _TempStorage = typename InternalBlockScan::TempStorage; + + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + +public: + /// @smemstorage{BlockScan} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE BlockScan() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Collective constructor using the specified memory allocation as temporary storage. + * + * @param[in] temp_storage + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockScan(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Exclusive prefix sum operations + //! @{ + + //! @rst + //! Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes one input element. The value of 0 is applied as the initial value, and is assigned + //! to ``output`` in *thread*\ :sub:`0`. + //! + //! - @identityzero + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an exclusive prefix sum of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain input item for each thread + //! int thread_data; + //! ... + //! + //! // Collectively compute the block-wide exclusive prefix sum + //! BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is ``1, 1, ..., 1``. + //! The corresponding output ``thread_data`` in those threads will be ``0, 1, ..., 127``. + //! + //! @endrst + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveSum(T input, T& output) + { + T initial_value{}; + + ExclusiveScan(input, output, initial_value, ::cuda::std::plus<>{}); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes one input element. + //! The value of 0 is applied as the initial value, and is assigned to ``output`` in *thread*\ :sub:`0`. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - @identityzero + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an exclusive prefix sum of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain input item for each thread + //! int thread_data; + //! ... + //! + //! // Collectively compute the block-wide exclusive prefix sum + //! int block_aggregate; + //! BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data, block_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is ``1, 1, ..., 1``. + //! The corresponding output ``thread_data`` in those threads will be ``0, 1, ..., 127``. + //! Furthermore the value ``128`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[out] block_aggregate + //! block-wide aggregate reduction of input items + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveSum(T input, T& output, T& block_aggregate) + { + T initial_value{}; + + ExclusiveScan(input, output, initial_value, ::cuda::std::plus<>{}, block_aggregate); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes one input element. Instead of using 0 as the block-wide prefix, the call-back functor + //! ``block_prefix_callback_op`` is invoked by the first warp in the block, and the value returned by + //! *lane*\ :sub:`0` in that warp is used as the "seed" value that logically prefixes the thread block's + //! scan inputs. Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - @identityzero + //! - The ``block_prefix_callback_op`` functor must implement a member function + //! ``T operator()(T block_aggregate)``. The functor's input parameter ``block_aggregate`` is the same value + //! also returned by the scan operation. The functor will be invoked by the first warp of threads in the block, + //! however only the return value from *lane*\ :sub:`0` is applied as the block-wide prefix. Can be stateful. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a single thread block that progressively + //! computes an exclusive prefix sum over multiple "tiles" of input using a + //! prefix functor to maintain a running total between block-wide scans. Each tile consists + //! of 128 integer items that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // A stateful callback functor that maintains a running prefix to be applied + //! // during consecutive scan operations. + //! struct BlockPrefixCallbackOp + //! { + //! // Running prefix + //! int running_total; + //! + //! // Constructor + //! __host__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} + //! + //! // Callback operator to be entered by the first warp of threads in the block. + //! // Thread-0 is responsible for returning a value for seeding the block-wide scan. + //! __host__ int operator()(int block_aggregate) + //! { + //! int old_prefix = running_total; + //! running_total += block_aggregate; + //! return old_prefix; + //! } + //! }; + //! + //! __global__ void ExampleKernel(int *d_data, int num_items, ...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Initialize running total + //! BlockPrefixCallbackOp prefix_op(0); + //! + //! // Have the block iterate over segments of items + //! for (int block_offset = 0; block_offset < num_items; block_offset += 128) + //! { + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data = d_data[block_offset]; + //! + //! // Collectively compute the block-wide exclusive prefix sum + //! BlockScan(temp_storage).ExclusiveSum( + //! thread_data, thread_data, prefix_op); + //! __syncthreads(); + //! + //! // Store scanned items to output segment + //! d_data[block_offset] = thread_data; + //! } + //! + //! Suppose the input ``d_data`` is ``1, 1, 1, 1, 1, 1, 1, 1, ...``. + //! The corresponding output for the first segment will be ``0, 1, ..., 127``. + //! The output for the second segment will be ``128, 129, ..., 255``. + //! + //! @endrst + //! + //! @tparam BlockPrefixCallbackOp + //! **[inferred]** Call-back functor type having member `T operator()(T block_aggregate)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in,out] block_prefix_callback_op + //! @rst + //! *warp*\ :sub:`0` only call-back functor for specifying a block-wide prefix to be applied to + //! the logical input sequence. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveSum(T input, T& output, BlockPrefixCallbackOp& block_prefix_callback_op) + { + ExclusiveScan(input, output, ::cuda::std::plus<>{}, block_prefix_callback_op); + } + + //! @} end member group + //! @name Exclusive prefix sum operations (multiple data per thread) + //! @{ + + //! @rst + //! Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes an array of consecutive input elements. + //! The value of 0 is applied as the initial value, and is assigned to ``output[0]`` in *thread*\ :sub:`0`. + //! + //! - @identityzero + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an exclusive prefix sum of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute the block-wide exclusive prefix sum + //! BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }``. + //! The corresponding output ``thread_data`` in those threads will be + //! ``{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }``. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveSum(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD]) + { + T initial_value{}; + + ExclusiveScan(input, output, initial_value, ::cuda::std::plus<>{}); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes an array of consecutive input elements. + //! The value of 0 is applied as the initial value, and is assigned to ``output[0]`` in *thread*\ :sub:`0`. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - @identityzero + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an exclusive prefix sum of 512 integer items that are partitioned in + //! a :ref:`blocked arrangement ` across 128 threads where each thread owns + //! 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute the block-wide exclusive prefix sum + //! int block_aggregate; + //! BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data, block_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }``. + //! The corresponding output ``thread_data`` in those threads will be + //! ``{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }``. + //! Furthermore the value ``512`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[out] block_aggregate + //! block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveSum(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], T& block_aggregate) + { + // Reduce consecutive thread items in registers + T initial_value{}; + + ExclusiveScan(input, output, initial_value, ::cuda::std::plus<>{}, block_aggregate); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes an array of consecutive input elements. + //! Instead of using 0 as the block-wide prefix, the call-back functor ``block_prefix_callback_op`` is invoked by + //! the first warp in the block, and the value returned by *lane*\ :sub:`0` in that warp is used as the "seed" + //! value that logically prefixes the thread block's scan inputs. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - @identityzero + //! - The ``block_prefix_callback_op`` functor must implement a member function ``T operator()(T block_aggregate)``. + //! The functor's input parameter ``block_aggregate`` is the same value also returned + //! by the scan operation. The functor will be invoked by the first warp of threads in + //! the block, however only the return value from *lane*\ :sub:`0` is applied as the block-wide prefix. + //! Can be stateful. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a single thread block that progressively + //! computes an exclusive prefix sum over multiple "tiles" of input using a + //! prefix functor to maintain a running total between block-wide scans. Each tile consists + //! of 512 integer items that are partitioned in a :ref:`blocked arrangement ` + //! across 128 threads where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // A stateful callback functor that maintains a running prefix to be applied + //! // during consecutive scan operations. + //! struct BlockPrefixCallbackOp + //! { + //! // Running prefix + //! int running_total; + //! + //! // Constructor + //! __host__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} + //! + //! // Callback operator to be entered by the first warp of threads in the block. + //! // Thread-0 is responsible for returning a value for seeding the block-wide scan. + //! __host__ int operator()(int block_aggregate) + //! { + //! int old_prefix = running_total; + //! running_total += block_aggregate; + //! return old_prefix; + //! } + //! }; + //! + //! __global__ void ExampleKernel(int *d_data, int num_items, ...) + //! { + //! // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread + //! using BlockLoad = cub::BlockLoad; + //! using BlockStore = cub::BlockStore; + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan + //! __shared__ union { + //! typename BlockLoad::TempStorage load; + //! typename BlockScan::TempStorage scan; + //! typename BlockStore::TempStorage store; + //! } temp_storage; + //! + //! // Initialize running total + //! BlockPrefixCallbackOp prefix_op(0); + //! + //! // Have the block iterate over segments of items + //! for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) + //! { + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); + //! __syncthreads(); + //! + //! // Collectively compute the block-wide exclusive prefix sum + //! int block_aggregate; + //! BlockScan(temp_storage.scan).ExclusiveSum( + //! thread_data, thread_data, prefix_op); + //! __syncthreads(); + //! + //! // Store scanned items to output segment + //! BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); + //! __syncthreads(); + //! } + //! + //! Suppose the input ``d_data`` is ``1, 1, 1, 1, 1, 1, 1, 1, ...``. + //! The corresponding output for the first segment will be ``0, 1, 2, 3, ..., 510, 511``. + //! The output for the second segment will be ``512, 513, 514, 515, ..., 1022, 1023``. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam BlockPrefixCallbackOp + //! **[inferred]** Call-back functor type having member + //! `T operator()(T block_aggregate)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in,out] block_prefix_callback_op + //! @rst + //! *warp*\ :sub:`0` only call-back functor for specifying a block-wide prefix to be applied to + //! the logical input sequence. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveSum( + T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], BlockPrefixCallbackOp& block_prefix_callback_op) + { + ExclusiveScan(input, output, ::cuda::std::plus<>{}, block_prefix_callback_op); + } + + //! @} end member group // Exclusive prefix sums + //! @name Exclusive prefix scan operations + //! @{ + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes one input element. + //! + //! - Supports non-commutative scan operators. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an exclusive prefix max scan of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain input item for each thread + //! int thread_data; + //! ... + //! + //! // Collectively compute the block-wide exclusive prefix max scan + //! BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is ``0, -1, 2, -3, ..., 126, -127``. + //! The corresponding output ``thread_data`` in those threads will be ``INT_MIN, 0, 0, 2, ..., 124, 126``. + //! + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in] initial_value + //! @rst + //! Initial value to seed the exclusive scan (and is assigned to `output[0]` in *thread*\ :sub:`0`) + //! @endrst + //! + //! @param[in] scan_op + //! Binary scan functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& output, T initial_value, ScanOp scan_op) + { + InternalBlockScan(temp_storage).ExclusiveScan(input, output, initial_value, scan_op); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes one input element. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - Supports non-commutative scan operators. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an exclusive prefix max scan of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain input item for each thread + //! int thread_data; + //! ... + //! + //! // Collectively compute the block-wide exclusive prefix max scan + //! int block_aggregate; + //! BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cuda::maximum<>{}, + //! block_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is ``0, -1, 2, -3, ..., 126, -127``. + //! The corresponding output ``thread_data`` in those threads will be ``INT_MIN, 0, 0, 2, ..., 124, 126``. + //! Furthermore the value ``126`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member ``T operator()(const T &a, const T &b)`` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to ``input``) + //! + //! @param[in] initial_value + //! @rst + //! Initial value to seed the exclusive scan (and is assigned to ``output[0]`` in *thread*\ :sub:`0`) + //! @endrst + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[out] block_aggregate + //! block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T input, T& output, T initial_value, ScanOp scan_op, T& block_aggregate) + { + InternalBlockScan(temp_storage).ExclusiveScan(input, output, initial_value, scan_op, block_aggregate); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes one input element. The call-back functor ``block_prefix_callback_op`` is invoked by + //! the first warp in the block, and the value returned by *lane*\ :sub:`0` in that warp is used as + //! the "seed" value that logically prefixes the thread block's scan inputs. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - The ``block_prefix_callback_op`` functor must implement a member function ``T operator()(T block_aggregate)``. + //! The functor's input parameter ``block_aggregate`` is the same value also returned by the scan operation. + //! The functor will be invoked by the first warp of threads in the block, however only the return value from + //! *lane*\ :sub:`0` is applied as the block-wide prefix. Can be stateful. + //! - Supports non-commutative scan operators. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a single thread block that progressively + //! computes an exclusive prefix max scan over multiple "tiles" of input using a + //! prefix functor to maintain a running total between block-wide scans. + //! Each tile consists of 128 integer items that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // A stateful callback functor that maintains a running prefix to be applied + //! // during consecutive scan operations. + //! struct BlockPrefixCallbackOp + //! { + //! // Running prefix + //! int running_total; + //! + //! // Constructor + //! __host__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} + //! + //! // Callback operator to be entered by the first warp of threads in the block. + //! // Thread-0 is responsible for returning a value for seeding the block-wide scan. + //! __host__ int operator()(int block_aggregate) + //! { + //! int old_prefix = running_total; + //! running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; + //! return old_prefix; + //! } + //! }; + //! + //! __global__ void ExampleKernel(int *d_data, int num_items, ...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Initialize running total + //! BlockPrefixCallbackOp prefix_op(INT_MIN); + //! + //! // Have the block iterate over segments of items + //! for (int block_offset = 0; block_offset < num_items; block_offset += 128) + //! { + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data = d_data[block_offset]; + //! + //! // Collectively compute the block-wide exclusive prefix max scan + //! BlockScan(temp_storage).ExclusiveScan( + //! thread_data, thread_data, INT_MIN, cuda::maximum<>{}, prefix_op); + //! __syncthreads(); + //! + //! // Store scanned items to output segment + //! d_data[block_offset] = thread_data; + //! } + //! + //! Suppose the input ``d_data`` is ``0, -1, 2, -3, 4, -5, ...``. + //! The corresponding output for the first segment will be ``INT_MIN, 0, 0, 2, ..., 124, 126``. + //! The output for the second segment will be ``126, 128, 128, 130, ..., 252, 254``. + //! + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @tparam BlockPrefixCallbackOp + //! **[inferred]** Call-back functor type having member `T operator()(T block_aggregate)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[in,out] block_prefix_callback_op + //! @rst + //! *warp*\ :sub:`0` only call-back functor for specifying a block-wide prefix to be applied to + //! the logical input sequence. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T input, T& output, ScanOp scan_op, BlockPrefixCallbackOp& block_prefix_callback_op) + { + InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op, block_prefix_callback_op); + } + + //! @} end member group // Inclusive prefix sums + //! @name Exclusive prefix scan operations (multiple data per thread) + //! @{ + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. + //! + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an exclusive prefix max scan of 512 integer + //! items that are partitioned in a [blocked arrangement](../index.html#sec5sec3) + //! across 128 threads where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute the block-wide exclusive prefix max scan + //! BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }``. + //! The corresponding output ``thread_data`` in those threads will be + //! ``{ [INT_MIN,0,0,2], [2,4,4,6], ..., [506,508,508,510] }``. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member + //! `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] initial_value + //! @rst + //! Initial value to seed the exclusive scan (and is assigned to `output[0]` in *thread*\ :sub:`0`) + //! @endrst + //! + //! @param[in] scan_op + //! Binary scan functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], T initial_value, ScanOp scan_op) + { + // Reduce consecutive thread items in registers + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_prefix, thread_prefix, initial_value, scan_op); + + // Exclusive scan in registers with prefix as seed + internal::ThreadScanExclusive(input, output, scan_op, thread_prefix); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an exclusive prefix max scan of 512 integer items that are partitioned in + //! a :ref:`blocked arrangement ` across 128 threads where each thread owns + //! 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute the block-wide exclusive prefix max scan + //! int block_aggregate; + //! BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cuda::maximum<>{}, + //! block_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }``. + //! The corresponding output ``thread_data`` in those threads will be + //! ``{ [INT_MIN,0,0,2], [2,4,4,6], ..., [506,508,508,510] }``. + //! Furthermore the value ``510`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] initial_value + //! @rst + //! Initial value to seed the exclusive scan (and is assigned to `output[0]` in *thread*\ :sub:`0`) + //! @endrst + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[out] block_aggregate + //! block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan( + T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], T initial_value, ScanOp scan_op, T& block_aggregate) + { + // Reduce consecutive thread items in registers + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_prefix, thread_prefix, initial_value, scan_op, block_aggregate); + + // Exclusive scan in registers with prefix as seed + internal::ThreadScanExclusive(input, output, scan_op, thread_prefix); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. + //! The call-back functor ``block_prefix_callback_op`` is invoked by the first warp in the block, and the value + //! returned by *lane*\ :sub:`0` in that warp is used as the "seed" value that logically prefixes the thread + //! block's scan inputs. Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - The ``block_prefix_callback_op`` functor must implement a member function + //! ``T operator()(T block_aggregate)``. The functor's input parameter ``block_aggregate`` + //! is the same value also returned by the scan operation. The functor will be invoked by the + //! first warp of threads in the block, however only the return value from + //! *lane*\ :sub:`0` is applied as the block-wide prefix. Can be stateful. + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a single thread block that progressively + //! computes an exclusive prefix max scan over multiple "tiles" of input using a + //! prefix functor to maintain a running total between block-wide scans. Each tile consists + //! of 128 integer items that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // A stateful callback functor that maintains a running prefix to be applied + //! // during consecutive scan operations. + //! struct BlockPrefixCallbackOp + //! { + //! // Running prefix + //! int running_total; + //! + //! // Constructor + //! __host__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} + //! + //! // Callback operator to be entered by the first warp of threads in the block. + //! // Thread-0 is responsible for returning a value for seeding the block-wide scan. + //! __host__ int operator()(int block_aggregate) + //! { + //! int old_prefix = running_total; + //! running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; + //! return old_prefix; + //! } + //! }; + //! + //! __global__ void ExampleKernel(int *d_data, int num_items, ...) + //! { + //! // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread + //! using BlockLoad = cub::BlockLoad ; + //! using BlockStore = cub::BlockStore ; + //! using BlockScan = cub::BlockScan ; + //! + //! // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan + //! __shared__ union { + //! typename BlockLoad::TempStorage load; + //! typename BlockScan::TempStorage scan; + //! typename BlockStore::TempStorage store; + //! } temp_storage; + //! + //! // Initialize running total + //! BlockPrefixCallbackOp prefix_op(0); + //! + //! // Have the block iterate over segments of items + //! for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) + //! { + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); + //! __syncthreads(); + //! + //! // Collectively compute the block-wide exclusive prefix max scan + //! BlockScan(temp_storage.scan).ExclusiveScan( + //! thread_data, thread_data, INT_MIN, cuda::maximum<>{}, prefix_op); + //! __syncthreads(); + //! + //! // Store scanned items to output segment + //! BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); + //! __syncthreads(); + //! } + //! + //! Suppose the input ``d_data`` is ``0, -1, 2, -3, 4, -5, ...``. + //! The corresponding output for the first segment will be + //! ``INT_MIN, 0, 0, 2, 2, 4, ..., 508, 510``. + //! The output for the second segment will be + //! ``510, 512, 512, 514, 514, 516, ..., 1020, 1022``. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @tparam BlockPrefixCallbackOp + //! **[inferred]** Call-back functor type having member `T operator()(T block_aggregate)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[in,out] block_prefix_callback_op + //! @rst + //! *warp*\ :sub:`0` only call-back functor for specifying a block-wide prefix to be applied to + //! the logical input sequence. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan( + T (&input)[ITEMS_PER_THREAD], + T (&output)[ITEMS_PER_THREAD], + ScanOp scan_op, + BlockPrefixCallbackOp& block_prefix_callback_op) + { + // Reduce consecutive thread items in registers + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_prefix_callback_op); + + // Exclusive scan in registers with prefix as seed + internal::ThreadScanExclusive(input, output, scan_op, thread_prefix); + } + + //! @} end member group +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document no-initial-value scans + + //! @name Exclusive prefix scan operations (no initial value, single datum per thread) + //! @{ + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes one input element. + //! With no initial value, the output computed for *thread*\ :sub:`0` is undefined. + //! + //! - Supports non-commutative scan operators. + //! - @rowmajor + //! - @smemreuse + //! + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& output, ScanOp scan_op) + { + InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes one input element. Also provides every thread with the block-wide + //! ``block_aggregate`` of all inputs. With no initial value, the output computed for + //! *thread*\ :sub:`0` is undefined. + //! + //! - Supports non-commutative scan operators. + //! - @rowmajor + //! - @smemreuse + //! + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[out] block_aggregate + //! block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& output, ScanOp scan_op, T& block_aggregate) + { + InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op, block_aggregate); + } + + //! @} end member group + //! @name Exclusive prefix scan operations (no initial value, multiple data per thread) + //! @{ + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. With no initial value, the + //! output computed for *thread*\ :sub:`0` is undefined. + //! + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], ScanOp scan_op) + { + // Reduce consecutive thread items in registers + T thread_partial = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_partial, thread_partial, scan_op); + + // Exclusive scan in registers with prefix + internal::ThreadScanExclusive(input, output, scan_op, thread_partial, (linear_tid != 0)); + } + + //! @rst + //! Computes an exclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. Also provides every thread + //! with the block-wide ``block_aggregate`` of all inputs. + //! With no initial value, the output computed for *thread*\ :sub:`0` is undefined. + //! + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[out] block_aggregate + //! block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], ScanOp scan_op, T& block_aggregate) + { + // Reduce consecutive thread items in registers + T thread_partial = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_partial, thread_partial, scan_op, block_aggregate); + + // Exclusive scan in registers with prefix + internal::ThreadScanExclusive(input, output, scan_op, thread_partial, (linear_tid != 0)); + } + + //! @} end member group +#endif // _CCCL_DOXYGEN_INVOKED // Do not document no-initial-value scans + + //! @name Inclusive prefix sum operations + //! @{ + + //! @rst + //! Computes an inclusive block-wide prefix scan using addition (+) + //! as the scan operator. Each thread contributes one input element. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix sum of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain input item for each thread + //! int thread_data; + //! ... + //! + //! // Collectively compute the block-wide inclusive prefix sum + //! BlockScan(temp_storage).InclusiveSum(thread_data, thread_data); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is ``1, 1, ..., 1``. + //! The corresponding output ``thread_data`` in those threads will be ``1, 2, ..., 128``. + //! + //! @endrst + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveSum(T input, T& output) + { + InclusiveScan(input, output, ::cuda::std::plus<>{}); + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes one input element. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix sum of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain input item for each thread + //! int thread_data; + //! ... + //! + //! // Collectively compute the block-wide inclusive prefix sum + //! int block_aggregate; + //! BlockScan(temp_storage).InclusiveSum(thread_data, thread_data, block_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is ``1, 1, ..., 1``. + //! The corresponding output ``thread_data`` in those threads will be ``1, 2, ..., 128``. + //! Furthermore the value ``128`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[out] block_aggregate + //! block-wide aggregate reduction of input items + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveSum(T input, T& output, T& block_aggregate) + { + InclusiveScan(input, output, ::cuda::std::plus<>{}, block_aggregate); + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes one input element. Instead of using 0 as the block-wide prefix, the call-back functor + //! ``block_prefix_callback_op`` is invoked by the first warp in the block, and the value returned by + //! *lane*\ :sub:`0` in that warp is used as the "seed" value that logically prefixes the thread block's + //! scan inputs. Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - The ``block_prefix_callback_op`` functor must implement a member function + //! ``T operator()(T block_aggregate)``. The functor's input parameter + //! ``block_aggregate`` is the same value also returned by the scan operation. + //! The functor will be invoked by the first warp of threads in the block, + //! however only the return value from *lane*\ :sub:`0` is applied + //! as the block-wide prefix. Can be stateful. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a single thread block that progressively + //! computes an inclusive prefix sum over multiple "tiles" of input using a + //! prefix functor to maintain a running total between block-wide scans. + //! Each tile consists of 128 integer items that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // A stateful callback functor that maintains a running prefix to be applied + //! // during consecutive scan operations. + //! struct BlockPrefixCallbackOp + //! { + //! // Running prefix + //! int running_total; + //! + //! // Constructor + //! __host__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} + //! + //! // Callback operator to be entered by the first warp of threads in the block. + //! // Thread-0 is responsible for returning a value for seeding the block-wide scan. + //! __host__ int operator()(int block_aggregate) + //! { + //! int old_prefix = running_total; + //! running_total += block_aggregate; + //! return old_prefix; + //! } + //! }; + //! + //! __global__ void ExampleKernel(int *d_data, int num_items, ...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Initialize running total + //! BlockPrefixCallbackOp prefix_op(0); + //! + //! // Have the block iterate over segments of items + //! for (int block_offset = 0; block_offset < num_items; block_offset += 128) + //! { + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data = d_data[block_offset]; + //! + //! // Collectively compute the block-wide inclusive prefix sum + //! BlockScan(temp_storage).InclusiveSum( + //! thread_data, thread_data, prefix_op); + //! __syncthreads(); + //! + //! // Store scanned items to output segment + //! d_data[block_offset] = thread_data; + //! } + //! + //! Suppose the input ``d_data`` is ``1, 1, 1, 1, 1, 1, 1, 1, ...``. + //! The corresponding output for the first segment will be ``1, 2, ..., 128``. + //! The output for the second segment will be ``129, 130, ..., 256``. + //! + //! @endrst + //! + //! @tparam BlockPrefixCallbackOp + //! **[inferred]** Call-back functor type having member `T operator()(T block_aggregate)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in,out] block_prefix_callback_op + //! @rst + //! *warp*\ :sub:`0` only call-back functor for specifying a block-wide prefix to be applied + //! to the logical input sequence. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveSum(T input, T& output, BlockPrefixCallbackOp& block_prefix_callback_op) + { + InclusiveScan(input, output, ::cuda::std::plus<>{}, block_prefix_callback_op); + } + + //! @} end member group + //! @name Inclusive prefix sum operations (multiple data per thread) + //! @{ + + //! @rst + //! Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes an array of consecutive input elements. + //! + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix sum of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute the block-wide inclusive prefix sum + //! BlockScan(temp_storage).InclusiveSum(thread_data, thread_data); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }``. The corresponding output + //! ``thread_data`` in those threads will be ``{ [1,2,3,4], [5,6,7,8], ..., [509,510,511,512] }``. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveSum(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD]) + { + if (ITEMS_PER_THREAD == 1) + { + InclusiveSum(input[0], output[0]); + } + else + { + // Reduce consecutive thread items in registers + ::cuda::std::plus<> scan_op; + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveSum(thread_prefix, thread_prefix); + + // Inclusive scan in registers with prefix as seed + internal::ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); + } + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes an array of consecutive input elements. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix sum of 512 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute the block-wide inclusive prefix sum + //! int block_aggregate; + //! BlockScan(temp_storage).InclusiveSum(thread_data, thread_data, block_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }``. The + //! corresponding output ``thread_data`` in those threads will be + //! ``{ [1,2,3,4], [5,6,7,8], ..., [509,510,511,512] }``. + //! Furthermore the value ``512`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[out] block_aggregate + //! block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveSum(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], T& block_aggregate) + { + if (ITEMS_PER_THREAD == 1) + { + InclusiveSum(input[0], output[0], block_aggregate); + } + else + { + // Reduce consecutive thread items in registers + ::cuda::std::plus<> scan_op; + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveSum(thread_prefix, thread_prefix, block_aggregate); + + // Inclusive scan in registers with prefix as seed + internal::ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); + } + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. + //! Each thread contributes an array of consecutive input elements. + //! Instead of using 0 as the block-wide prefix, the call-back functor ``block_prefix_callback_op`` is invoked by + //! the first warp in the block, and the value returned by *lane*\ :sub:`0` in that warp is used as the "seed" + //! value that logically prefixes the thread block's scan inputs. Also provides every thread with the + //! block-wide ``block_aggregate`` of all inputs. + //! + //! - The ``block_prefix_callback_op`` functor must implement a member function + //! ``T operator()(T block_aggregate)``. The functor's input parameter + //! ``block_aggregate`` is the same value also returned by the scan operation. + //! The functor will be invoked by the first warp of threads in the block, + //! however only the return value from *lane*\ :sub:`0` is applied + //! as the block-wide prefix. Can be stateful. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a single thread block that progressively + //! computes an inclusive prefix sum over multiple "tiles" of input using a + //! prefix functor to maintain a running total between block-wide scans. Each tile consists + //! of 512 integer items that are partitioned in a :ref:`blocked arrangement ` + //! across 128 threads where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // A stateful callback functor that maintains a running prefix to be applied + //! // during consecutive scan operations. + //! struct BlockPrefixCallbackOp + //! { + //! // Running prefix + //! int running_total; + //! + //! // Constructor + //! __host__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} + //! + //! // Callback operator to be entered by the first warp of threads in the block. + //! // Thread-0 is responsible for returning a value for seeding the block-wide scan. + //! __host__ int operator()(int block_aggregate) + //! { + //! int old_prefix = running_total; + //! running_total += block_aggregate; + //! return old_prefix; + //! } + //! }; + //! + //! __global__ void ExampleKernel(int *d_data, int num_items, ...) + //! { + //! // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread + //! using BlockLoad = cub::BlockLoad ; + //! using BlockStore = cub::BlockStore ; + //! using BlockScan = cub::BlockScan ; + //! + //! // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan + //! __shared__ union { + //! typename BlockLoad::TempStorage load; + //! typename BlockScan::TempStorage scan; + //! typename BlockStore::TempStorage store; + //! } temp_storage; + //! + //! // Initialize running total + //! BlockPrefixCallbackOp prefix_op(0); + //! + //! // Have the block iterate over segments of items + //! for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) + //! { + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); + //! __syncthreads(); + //! + //! // Collectively compute the block-wide inclusive prefix sum + //! BlockScan(temp_storage.scan).IncluisveSum( + //! thread_data, thread_data, prefix_op); + //! __syncthreads(); + //! + //! // Store scanned items to output segment + //! BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); + //! __syncthreads(); + //! } + //! + //! Suppose the input ``d_data`` is ``1, 1, 1, 1, 1, 1, 1, 1, ...``. + //! The corresponding output for the first segment will be + //! ``1, 2, 3, 4, ..., 511, 512``. The output for the second segment will be + //! ``513, 514, 515, 516, ..., 1023, 1024``. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam BlockPrefixCallbackOp + //! **[inferred]** Call-back functor type having member `T operator()(T block_aggregate)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in,out] block_prefix_callback_op + //! @rst + //! *warp*\ :sub:`0` only call-back functor for specifying a block-wide prefix to be applied to the + //! logical input sequence. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveSum( + T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], BlockPrefixCallbackOp& block_prefix_callback_op) + { + if (ITEMS_PER_THREAD == 1) + { + InclusiveSum(input[0], output[0], block_prefix_callback_op); + } + else + { + // Reduce consecutive thread items in registers + ::cuda::std::plus<> scan_op; + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveSum(thread_prefix, thread_prefix, block_prefix_callback_op); + + // Inclusive scan in registers with prefix as seed + internal::ThreadScanInclusive(input, output, scan_op, thread_prefix); + } + } + + //! @} end member group + //! @name Inclusive prefix scan operations + //! @{ + + //! @rst + //! Computes an inclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes one input element. + //! + //! - Supports non-commutative scan operators. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix max scan of 128 integer items that + //! are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain input item for each thread + //! int thread_data; + //! ... + //! + //! // Collectively compute the block-wide inclusive prefix max scan + //! BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``0, -1, 2, -3, ..., 126, -127``. The corresponding output ``thread_data`` + //! in those threads will be ``0, 0, 2, 2, ..., 126, 126``. + //! + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& output, ScanOp scan_op) + { + InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op); + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes one input element. Also provides every thread with the block-wide + //! ``block_aggregate`` of all inputs. + //! + //! - Supports non-commutative scan operators. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix max scan of 128 + //! integer items that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain input item for each thread + //! int thread_data; + //! ... + //! + //! // Collectively compute the block-wide inclusive prefix max scan + //! int block_aggregate; + //! BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cuda::maximum<>{}, block_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``0, -1, 2, -3, ..., 126, -127``. The corresponding output ``thread_data`` + //! in those threads will be ``0, 0, 2, 2, ..., 126, 126``. Furthermore the value + //! ``126`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[out] block_aggregate + //! Block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& output, ScanOp scan_op, T& block_aggregate) + { + InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op, block_aggregate); + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes one input element. The call-back functor ``block_prefix_callback_op`` + //! is invoked by the first warp in the block, and the value returned by *lane*\ :sub:`0` in that warp is used as + //! the "seed" value that logically prefixes the thread block's scan inputs. + //! Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - The ``block_prefix_callback_op`` functor must implement a member function + //! ``T operator()(T block_aggregate)``. The functor's input parameter + //! ``block_aggregate`` is the same value also returned by the scan operation. + //! The functor will be invoked by the first warp of threads in the block, + //! however only the return value from *lane*\ :sub:`0` is applied + //! as the block-wide prefix. Can be stateful. + //! - Supports non-commutative scan operators. + //! - @rowmajor + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a single thread block that progressively + //! computes an inclusive prefix max scan over multiple "tiles" of input using a + //! prefix functor to maintain a running total between block-wide scans. Each tile consists + //! of 128 integer items that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // A stateful callback functor that maintains a running prefix to be applied + //! // during consecutive scan operations. + //! struct BlockPrefixCallbackOp + //! { + //! // Running prefix + //! int running_total; + //! + //! // Constructor + //! __host__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} + //! + //! // Callback operator to be entered by the first warp of threads in the block. + //! // Thread-0 is responsible for returning a value for seeding the block-wide scan. + //! __host__ int operator()(int block_aggregate) + //! { + //! int old_prefix = running_total; + //! running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; + //! return old_prefix; + //! } + //! }; + //! + //! __global__ void ExampleKernel(int *d_data, int num_items, ...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Initialize running total + //! BlockPrefixCallbackOp prefix_op(INT_MIN); + //! + //! // Have the block iterate over segments of items + //! for (int block_offset = 0; block_offset < num_items; block_offset += 128) + //! { + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data = d_data[block_offset]; + //! + //! // Collectively compute the block-wide inclusive prefix max scan + //! BlockScan(temp_storage).InclusiveScan( + //! thread_data, thread_data, cuda::maximum<>{}, prefix_op); + //! __syncthreads(); + //! + //! // Store scanned items to output segment + //! d_data[block_offset] = thread_data; + //! } + //! + //! Suppose the input ``d_data`` is ``0, -1, 2, -3, 4, -5, ...``. + //! The corresponding output for the first segment will be + //! ``0, 0, 2, 2, ..., 126, 126``. The output for the second segment + //! will be ``128, 128, 130, 130, ..., 254, 254``. + //! + //! @endrst + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @tparam BlockPrefixCallbackOp + //! **[inferred]** Call-back functor type having member `T operator()(T block_aggregate)` + //! + //! @param[in] input + //! Calling thread's input item + //! + //! @param[out] output + //! Calling thread's output item (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[in,out] block_prefix_callback_op + //! @rst + //! *warp*\ :sub:`0` only call-back functor for specifying a block-wide prefix to be applied to + //! the logical input sequence. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T input, T& output, ScanOp scan_op, BlockPrefixCallbackOp& block_prefix_callback_op) + { + InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op, block_prefix_callback_op); + } + + //! @} end member group + //! @name Inclusive prefix scan operations (multiple data per thread) + //! @{ + + //! @rst + //! Computes an inclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. + //! + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix max scan of 512 integer items that + //! are partitioned in a [blocked arrangement](../index.html#sec5sec3) across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute the block-wide inclusive prefix max scan + //! BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cuda::maximum<>{}); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }``. + //! The corresponding output ``thread_data`` in those threads will be + //! ``{ [0,0,2,2], [4,4,6,6], ..., [508,508,510,510] }``. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], ScanOp scan_op) + { + if (ITEMS_PER_THREAD == 1) + { + InclusiveScan(input[0], output[0], scan_op); + } + else + { + // Reduce consecutive thread items in registers + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_prefix, thread_prefix, scan_op); + + // Inclusive scan in registers with prefix as seed (first thread does not seed) + internal::ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); + } + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. + //! + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix max scan of 128 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 64 threads + //! where each thread owns 2 consecutive items. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_scan_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin inclusive-scan-array-init-value + //! :end-before: example-end inclusive-scan-array-init-value + //! + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] initial_value + //! Initial value to seed the inclusive scan (uniform across block) + //! + //! @param[in] scan_op + //! Binary scan functor + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], T initial_value, ScanOp scan_op) + { + // Reduce consecutive thread items in registers + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_prefix, thread_prefix, initial_value, scan_op); + + // Exclusive scan in registers with prefix as seed + internal::ThreadScanInclusive(input, output, scan_op, thread_prefix); + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. Also provides every thread + //! with the block-wide ``block_aggregate`` of all inputs. + //! + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix max scan of 512 integer items that + //! are partitioned in a [blocked arrangement](../index.html#sec5sec3) across 128 threads + //! where each thread owns 4 consecutive items. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(...) + //! { + //! // Specialize BlockScan for a 1D block of 128 threads of type int + //! using BlockScan = cub::BlockScan; + //! + //! // Allocate shared memory for BlockScan + //! __shared__ typename BlockScan::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Collectively compute the block-wide inclusive prefix max scan + //! int block_aggregate; + //! BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cuda::maximum<>{}, block_aggregate); + //! + //! Suppose the set of input ``thread_data`` across the block of threads is + //! ``{ [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }``. + //! The corresponding output ``thread_data`` in those threads will be + //! ``{ [0,0,2,2], [4,4,6,6], ..., [508,508,510,510] }``. + //! Furthermore the value ``510`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[out] block_aggregate + //! Block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], ScanOp scan_op, T& block_aggregate) + { + if (ITEMS_PER_THREAD == 1) + { + InclusiveScan(input[0], output[0], scan_op, block_aggregate); + } + else + { + // Reduce consecutive thread items in registers + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan (with no initial value) + ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_aggregate); + + // Inclusive scan in registers with prefix as seed (first thread does not seed) + internal::ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); + } + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. Also provides every thread + //! with the block-wide ``block_aggregate`` of all inputs. + //! + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates an inclusive prefix max scan of 128 integer items that + //! are partitioned in a :ref:`blocked arrangement ` across 64 threads + //! where each thread owns 2 consecutive items. + //! + //! .. literalinclude:: ../../../cub/test/catch2_test_block_scan_api.cu + //! :language: c++ + //! :dedent: + //! :start-after: example-begin inclusive-scan-array-aggregate-init-value + //! :end-before: example-end inclusive-scan-array-aggregate-init-value + //! + //! The value ``126`` will be stored in ``block_aggregate`` for all threads. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] initial_value + //! Initial value to seed the inclusive scan (uniform across block). It is not taken + //! into account for block_aggregate. + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[out] block_aggregate + //! Block-wide aggregate reduction of input items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan( + T (&input)[ITEMS_PER_THREAD], T (&output)[ITEMS_PER_THREAD], T initial_value, ScanOp scan_op, T& block_aggregate) + { + // Reduce consecutive thread items in registers + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_prefix, thread_prefix, initial_value, scan_op, block_aggregate); + + // Exclusive scan in registers with prefix as seed + internal::ThreadScanInclusive(input, output, scan_op, thread_prefix); + } + + //! @rst + //! Computes an inclusive block-wide prefix scan using the specified binary ``scan_op`` functor. + //! Each thread contributes an array of consecutive input elements. + //! The call-back functor ``block_prefix_callback_op`` is invoked by the first warp in the block, + //! and the value returned by *lane*\ :sub:`0` in that warp is used as the "seed" value that logically prefixes the + //! thread block's scan inputs. Also provides every thread with the block-wide ``block_aggregate`` of all inputs. + //! + //! - The ``block_prefix_callback_op`` functor must implement a member function ``T operator()(T block_aggregate)``. + //! The functor's input parameter ``block_aggregate`` is the same value also returned by the scan operation. + //! The functor will be invoked by the first warp of threads in the block, however only the return value + //! from *lane*\ :sub:`0` is applied as the block-wide prefix. Can be stateful. + //! - Supports non-commutative scan operators. + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates a single thread block that progressively + //! computes an inclusive prefix max scan over multiple "tiles" of input using a + //! prefix functor to maintain a running total between block-wide scans. Each tile consists + //! of 128 integer items that are partitioned across 128 threads. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! // A stateful callback functor that maintains a running prefix to be applied + //! // during consecutive scan operations. + //! struct BlockPrefixCallbackOp + //! { + //! // Running prefix + //! int running_total; + //! + //! // Constructor + //! __host__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} + //! + //! // Callback operator to be entered by the first warp of threads in the block. + //! // Thread-0 is responsible for returning a value for seeding the block-wide scan. + //! __host__ int operator()(int block_aggregate) + //! { + //! int old_prefix = running_total; + //! running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; + //! return old_prefix; + //! } + //! }; + //! + //! __global__ void ExampleKernel(int *d_data, int num_items, ...) + //! { + //! // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread + //! using BlockLoad = cub::BlockLoad ; + //! using BlockStore = cub::BlockStore ; + //! using BlockScan = cub::BlockScan ; + //! + //! // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan + //! __shared__ union { + //! typename BlockLoad::TempStorage load; + //! typename BlockScan::TempStorage scan; + //! typename BlockStore::TempStorage store; + //! } temp_storage; + //! + //! // Initialize running total + //! BlockPrefixCallbackOp prefix_op(0); + //! + //! // Have the block iterate over segments of items + //! for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) + //! { + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); + //! __syncthreads(); + //! + //! // Collectively compute the block-wide inclusive prefix max scan + //! BlockScan(temp_storage.scan).InclusiveScan( + //! thread_data, thread_data, cuda::maximum<>{}, prefix_op); + //! __syncthreads(); + //! + //! // Store scanned items to output segment + //! BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); + //! __syncthreads(); + //! } + //! + //! Suppose the input ``d_data`` is ``0, -1, 2, -3, 4, -5, ...``. + //! The corresponding output for the first segment will be + //! ``0, 0, 2, 2, 4, 4, ..., 510, 510``. The output for the second + //! segment will be ``512, 512, 514, 514, 516, 516, ..., 1022, 1022``. + //! + //! @endrst + //! + //! @tparam ITEMS_PER_THREAD + //! **[inferred]** The number of consecutive items partitioned onto each thread. + //! + //! @tparam ScanOp + //! **[inferred]** Binary scan functor type having member `T operator()(const T &a, const T &b)` + //! + //! @tparam BlockPrefixCallbackOp + //! **[inferred]** Call-back functor type having member `T operator()(T block_aggregate)` + //! + //! @param[in] input + //! Calling thread's input items + //! + //! @param[out] output + //! Calling thread's output items (may be aliased to `input`) + //! + //! @param[in] scan_op + //! Binary scan functor + //! + //! @param[in,out] block_prefix_callback_op + //! @rst + //! *warp*\ :sub:`0` only call-back functor for specifying a block-wide prefix to be applied to + //! the logical input sequence. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan( + T (&input)[ITEMS_PER_THREAD], + T (&output)[ITEMS_PER_THREAD], + ScanOp scan_op, + BlockPrefixCallbackOp& block_prefix_callback_op) + { + if (ITEMS_PER_THREAD == 1) + { + InclusiveScan(input[0], output[0], scan_op, block_prefix_callback_op); + } + else + { + // Reduce consecutive thread items in registers + T thread_prefix = cub::ThreadReduce(input, scan_op); + + // Exclusive thread block-scan + ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_prefix_callback_op); + + // Inclusive scan in registers with prefix as seed + internal::ThreadScanInclusive(input, output, scan_op, thread_prefix); + } + } + + //! @} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_shuffle.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_shuffle.cuh new file mode 100644 index 0000000000000000000000000000000000000000..93d8715c63b03d4168c53fa11ce22b6b2d6e52de --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_shuffle.cuh @@ -0,0 +1,348 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! The cub::BlockShuffle class provides :ref:`collective ` methods for shuffling data +//! partitioned across a CUDA thread block. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! The BlockShuffle class provides :ref:`collective ` +//! methods for shuffling data partitioned across a CUDA thread block. +//! +//! Overview +//! ++++++++++++++++ +//! +//! It is commonplace for blocks of threads to rearrange data items between threads. +//! The BlockShuffle abstraction allows threads to efficiently shift items either +//! (a) up to their successor or +//! (b) down to their predecessor +//! +//! @endrst +//! +//! @tparam T +//! The data type to be exchanged. +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused +template +class BlockShuffle +{ +private: + enum + { + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + + LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(0), + WARP_THREADS = 1 << LOG_WARP_THREADS, + WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, + }; + + /// Shared memory storage layout type (last element from each thread's input) + using _TempStorage = T[BLOCK_THREADS]; + +public: + /// \smemstorage{BlockShuffle} + struct TempStorage : Uninitialized<_TempStorage> + {}; + +private: + /// Shared storage reference + _TempStorage& temp_storage; + + /// Linear thread-id + unsigned int linear_tid; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + +public: + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE BlockShuffle() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Collective constructor using the specified memory allocation + * as temporary storage. + * + * @param[in] temp_storage + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockShuffle(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Shuffle movement + //! @{ + + //! @rst + //! + //! Each *thread*\ :sub:`i` obtains the ``input`` provided by *thread*\ :sub:`i + distance`. + //! The offset ``distance`` may be negative. + //! + //! - @smemreuse + //! + //! @endrst + //! + //! @param[in] input + //! @rst + //! The input item from the calling thread (*thread*\ :sub:`i`) + //! @endrst + //! + //! @param[out] output + //! @rst + //! The ``input`` item from the successor (or predecessor) thread + //! *thread*\ :sub:`i + distance` (may be aliased to ``input``). + //! This value is only updated for for *thread*\ :sub:`i` when + //! ``0 <= (i + distance) < BLOCK_THREADS - 1`` + //! @endrst + //! + //! @param[in] distance + //! Offset distance (may be negative) + _CCCL_DEVICE _CCCL_FORCEINLINE void Offset(T input, T& output, int distance = 1) + { + temp_storage[linear_tid] = input; + + __syncthreads(); + + const int offset_tid = static_cast(linear_tid) + distance; + if ((offset_tid >= 0) && (offset_tid < BLOCK_THREADS)) + { + output = temp_storage[static_cast(offset_tid)]; + } + } + + //! @rst + //! Each *thread*\ :sub:`i` obtains the ``input`` provided by *thread*\ :sub:`i + distance`. + //! + //! - @smemreuse + //! + //! @endrst + //! + //! @param[in] input + //! The calling thread's input item + //! + //! @param[out] output + //! @rst + //! The ``input`` item from thread + //! *thread*\ :sub:`(i + distance>) % BLOCK_THREADS` (may be aliased to ``input``). + //! This value is not updated for *thread*\ :sub:`BLOCK_THREADS - 1`. + //! @endrst + //! + //! @param[in] distance + //! Offset distance (`0 < distance < `BLOCK_THREADS`) + _CCCL_DEVICE _CCCL_FORCEINLINE void Rotate(T input, T& output, unsigned int distance = 1) + { + temp_storage[linear_tid] = input; + + __syncthreads(); + + unsigned int offset = linear_tid + distance; + if (offset >= BLOCK_THREADS) + { + offset -= BLOCK_THREADS; + } + + output = temp_storage[offset]; + } + + //! @rst + //! The thread block rotates its :ref:`blocked arrangement ` of + //! ``input`` items, shifting it up by one item. + //! + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! @endrst + //! + //! @param[in] input + //! The calling thread's input items + //! + //! @param[out] prev + //! @rst + //! The corresponding predecessor items (may be aliased to ``input``). + //! The item ``prev[0]`` is not updated for *thread*\ :sub:`0`. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Up(T (&input)[ITEMS_PER_THREAD], T (&prev)[ITEMS_PER_THREAD]) + { + temp_storage[linear_tid] = input[ITEMS_PER_THREAD - 1]; + + __syncthreads(); + +#pragma unroll + for (int ITEM = ITEMS_PER_THREAD - 1; ITEM > 0; --ITEM) + { + prev[ITEM] = input[ITEM - 1]; + } + + if (linear_tid > 0) + { + prev[0] = temp_storage[linear_tid - 1]; + } + } + + //! @rst + //! The thread block rotates its :ref:`blocked arrangement ` + //! of ``input`` items, shifting it up by one item. All threads receive the ``input`` provided by + //! *thread*\ :sub:`BLOCK_THREADS - 1`. + //! + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! @endrst + //! + //! @param[in] input + //! The calling thread's input items + //! + //! @param[out] prev + //! @rst + //! The corresponding predecessor items (may be aliased to ``input``). + //! The item ``prev[0]`` is not updated for *thread*\ :sub:`0`. + //! @endrst + //! + //! @param[out] block_suffix + //! @rst + //! The item ``input[ITEMS_PER_THREAD - 1]`` from *thread*\ :sub:`BLOCK_THREADS - 1`, provided to all threads + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Up(T (&input)[ITEMS_PER_THREAD], T (&prev)[ITEMS_PER_THREAD], T& block_suffix) + { + Up(input, prev); + block_suffix = temp_storage[BLOCK_THREADS - 1]; + } + + //! @rst + //! The thread block rotates its :ref:`blocked arrangement ` + //! of ``input`` items, shifting it down by one item. + //! + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! @endrst + //! + //! @param[in] input + //! The calling thread's input items + //! + //! @param[out] prev + //! @rst + //! The corresponding predecessor items (may be aliased to ``input``). + //! The value ``prev[0]`` is not updated for *thread*\ :sub:`BLOCK_THREADS - 1`. + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Down(T (&input)[ITEMS_PER_THREAD], T (&prev)[ITEMS_PER_THREAD]) + { + temp_storage[linear_tid] = input[0]; + + __syncthreads(); + +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD - 1; ITEM++) + { + prev[ITEM] = input[ITEM + 1]; + } + + if (linear_tid < BLOCK_THREADS - 1) + { + prev[ITEMS_PER_THREAD - 1] = temp_storage[linear_tid + 1]; + } + } + + //! @rst + //! The thread block rotates its :ref:`blocked arrangement ` of input items, + //! shifting it down by one item. All threads receive ``input[0]`` provided by *thread*\ :sub:`0`. + //! + //! - @blocked + //! - @granularity + //! - @smemreuse + //! + //! @endrst + //! + //! @param[in] input + //! The calling thread's input items + //! + //! @param[out] prev + //! @rst + //! The corresponding predecessor items (may be aliased to ``input``). + //! The value ``prev[0]`` is not updated for *thread*\ :sub:`BLOCK_THREADS - 1`. + //! @endrst + //! + //! @param[out] block_prefix + //! @rst + //! The item ``input[0]`` from *thread*\ :sub:`0`, provided to all threads + //! @endrst + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Down(T (&input)[ITEMS_PER_THREAD], T (&prev)[ITEMS_PER_THREAD], T& block_prefix) + { + Down(input, prev); + block_prefix = temp_storage[0]; + } + + //! @} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_store.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_store.cuh new file mode 100644 index 0000000000000000000000000000000000000000..e207a1d76c19ab4a2b0faa7903c1edf7eb158d83 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/block_store.cuh @@ -0,0 +1,1240 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! Operations for writing linear segments of data from the CUDA thread block + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +CUB_NAMESPACE_BEGIN + +//! @name Blocked arrangement I/O (direct) +//! @{ + +//! @rst +//! Store a blocked arrangement of items across a thread block into a linear segment of items +//! +//! @blocked +//! +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to store. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam OutputIteratorT +//! **[inferred]** The random-access iterator type for output @iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread +//! (e.g., ``(threadIdx.y * blockDim.x) + linear_tid`` for 2D thread blocks) +//! +//! @param[in] block_itr +//! The thread block's base output iterator for storing to +//! +//! @param[in] items +//! Data to store +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +StoreDirectBlocked(int linear_tid, OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) +{ + OutputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); + +// Store directly in thread-blocked order +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + thread_itr[ITEM] = items[ITEM]; + } +} + +//! @rst +//! Store a blocked arrangement of items across a +//! thread block into a linear segment of items, guarded by range +//! +//! @blocked +//! +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to store. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam OutputIteratorT +//! **[inferred]** The random-access iterator type for output @iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread +//! (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D thread blocks) +//! +//! @param[in] block_itr +//! The thread block's base output iterator for storing to +//! +//! @param[in] items +//! Data to store +//! +//! @param[in] valid_items +//! Number of valid items to write +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +StoreDirectBlocked(int linear_tid, OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) +{ + OutputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); + +// Store directly in thread-blocked order +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + if (ITEM + (linear_tid * ITEMS_PER_THREAD) < valid_items) + { + thread_itr[ITEM] = items[ITEM]; + } + } +} + +//! @rst +//! Store a blocked arrangement of items across a +//! thread block into a linear segment of items. +//! +//! @blocked +//! +//! The output offset (``block_ptr + block_offset``) must be quad-item aligned, +//! which is the default starting offset returned by ``cudaMalloc()`` +//! +//! The following conditions will prevent vectorization and storing will +//! fall back to cub::BLOCK_STORE_DIRECT: +//! +//! - ``ITEMS_PER_THREAD`` is odd +//! - The data type ``T`` is not a built-in primitive or CUDA vector type +//! (e.g., ``short``, ``int2``, ``double``, ``float2``, etc.) +//! +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to store. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread +//! (e.g., ``(threadIdx.y * blockDim.x) + linear_tid`` for 2D thread blocks) +//! +//! @param[in] block_ptr +//! Input pointer for storing from +//! +//! @param[in] items +//! Data to store +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +StoreDirectBlockedVectorized(int linear_tid, T* block_ptr, T (&items)[ITEMS_PER_THREAD]) +{ + enum + { + // Maximum CUDA vector size is 4 elements + MAX_VEC_SIZE = CUB_MIN(4, ITEMS_PER_THREAD), + + // Vector size must be a power of two and an even divisor of the items per thread + VEC_SIZE = + ((((MAX_VEC_SIZE - 1) & MAX_VEC_SIZE) == 0) && ((ITEMS_PER_THREAD % MAX_VEC_SIZE) == 0)) ? MAX_VEC_SIZE : 1, + + VECTORS_PER_THREAD = ITEMS_PER_THREAD / VEC_SIZE, + }; + + // Vector type + using Vector = typename CubVector::Type; + + // Alias global pointer + Vector* block_ptr_vectors = reinterpret_cast(const_cast(block_ptr)); + + // Alias pointers (use "raw" array here which should get optimized away to prevent conservative PTXAS lmem spilling) + Vector raw_vector[VECTORS_PER_THREAD]; + T* raw_items = reinterpret_cast(raw_vector); + +// Copy +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + raw_items[ITEM] = items[ITEM]; + } + + // Direct-store using vector types + StoreDirectBlocked(linear_tid, block_ptr_vectors, raw_vector); +} + +//! @} end member group +//! @name Striped arrangement I/O (direct) +//! @{ + +//! @rst +//! Store a striped arrangement of data across the thread block into a +//! linear segment of items. +//! +//! @striped +//! +//! @endrst +//! +//! @tparam BLOCK_THREADS +//! The thread block size in threads +//! +//! @tparam T +//! **[inferred]** The data type to store. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam OutputIteratorT +//! **[inferred]** The random-access iterator type for output @iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread +//! (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D thread blocks) +//! +//! @param[in] block_itr +//! The thread block's base output iterator for storing to +//! +//! @param[in] items +//! Data to store +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +StoreDirectStriped(int linear_tid, OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) +{ + OutputIteratorT thread_itr = block_itr + linear_tid; + +// Store directly in striped order +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + thread_itr[(ITEM * BLOCK_THREADS)] = items[ITEM]; + } +} + +//! @rst +//! Store a striped arrangement of data across the thread block into +//! a linear segment of items, guarded by range +//! +//! @striped +//! +//! @endrst +//! +//! @tparam BLOCK_THREADS +//! The thread block size in threads +//! +//! @tparam T +//! **[inferred]** The data type to store. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam OutputIteratorT +//! **[inferred]** The random-access iterator type for output @iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread +//! (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D thread blocks) +//! +//! @param[in] block_itr +//! The thread block's base output iterator for storing to +//! +//! @param[in] items +//! Data to store +//! +//! @param[in] valid_items +//! Number of valid items to write +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +StoreDirectStriped(int linear_tid, OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) +{ + OutputIteratorT thread_itr = block_itr + linear_tid; + +// Store directly in striped order +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + if ((ITEM * BLOCK_THREADS) + linear_tid < valid_items) + { + thread_itr[(ITEM * BLOCK_THREADS)] = items[ITEM]; + } + } +} + +//! @} end member group +//! @name Warp-striped arrangement I/O (direct) +//! @{ + +//! @rst +//! Store a warp-striped arrangement of data across the +//! thread block into a linear segment of items. +//! +//! @warpstriped +//! +//! Usage Considerations +//! ++++++++++++++++++++ +//! +//! The number of threads in the thread block must be a multiple of the architecture's warp size. +//! +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to store. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam OutputIteratorT +//! **[inferred]** The random-access iterator type for output @iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread +//! (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D thread blocks) +//! +//! @param[in] block_itr +//! The thread block's base output iterator for storing to +//! +//! @param[out] items +//! Data to load +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +StoreDirectWarpStriped(int linear_tid, OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) +{ + int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); + int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; + int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; + + OutputIteratorT thread_itr = block_itr + warp_offset + tid; + +// Store directly in warp-striped order +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + thread_itr[(ITEM * CUB_PTX_WARP_THREADS)] = items[ITEM]; + } +} + +//! @rst +//! Store a warp-striped arrangement of data across the thread block into a +//! linear segment of items, guarded by range +//! +//! @warpstriped +//! +//! Usage Considerations +//! ++++++++++++++++++++ +//! +//! The number of threads in the thread block must be a multiple of the architecture's warp size. +//! +//! @endrst +//! +//! @tparam T +//! **[inferred]** The data type to store. +//! +//! @tparam ITEMS_PER_THREAD +//! **[inferred]** The number of consecutive items partitioned onto each thread. +//! +//! @tparam OutputIteratorT +//! **[inferred]** The random-access iterator type for output @iterator. +//! +//! @param[in] linear_tid +//! A suitable 1D thread-identifier for the calling thread +//! (e.g., `(threadIdx.y * blockDim.x) + linear_tid` for 2D thread blocks) +//! +//! @param[in] block_itr +//! The thread block's base output iterator for storing to +//! +//! @param[in] items +//! Data to store +//! +//! @param[in] valid_items +//! Number of valid items to write +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +StoreDirectWarpStriped(int linear_tid, OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) +{ + int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); + int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; + int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; + + OutputIteratorT thread_itr = block_itr + warp_offset + tid; + +// Store directly in warp-striped order +#pragma unroll + for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) + { + if (warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS) < valid_items) + { + thread_itr[(ITEM * CUB_PTX_WARP_THREADS)] = items[ITEM]; + } + } +} + +//! @} end member group + +//----------------------------------------------------------------------------- +// Generic BlockStore abstraction +//----------------------------------------------------------------------------- + +//! cub::BlockStoreAlgorithm enumerates alternative algorithms for cub::BlockStore to write a +//! blocked arrangement of items across a CUDA thread block to a linear segment of memory. +enum BlockStoreAlgorithm +{ + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` of data is written directly to memory. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) decreases as the + //! access stride between threads increases (i.e., the number items per thread). + //! + //! @endrst + BLOCK_STORE_DIRECT, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`striped arrangement ` of data is written directly to memory. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! The utilization of memory transactions (coalescing) remains high regardless + //! of items written per thread. + //! + //! @endrst + BLOCK_STORE_STRIPED, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` of data is written directly + //! to memory using CUDA's built-in vectorized stores as a coalescing optimization. + //! For example, ``st.global.v4.s32`` instructions will be generated + //! when ``T = int`` and ``ITEMS_PER_THREAD % 4 == 0``. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high until the the + //! access stride between threads (i.e., the number items per thread) exceeds the + //! maximum vector store width (typically 4 items or 64B, whichever is lower). + //! - The following conditions will prevent vectorization and writing will fall back to cub::BLOCK_STORE_DIRECT: + //! + //! - ``ITEMS_PER_THREAD`` is odd + //! - The ``OutputIteratorT`` is not a simple pointer type + //! - The block output offset is not quadword-aligned + //! - The data type ``T`` is not a built-in primitive or CUDA vector type + //! (e.g., ``short``, ``int2``, ``double``, ``float2``, etc.) + //! + //! @endrst + BLOCK_STORE_VECTORIZE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` is locally + //! transposed and then efficiently written to memory as a :ref:`striped arrangement `. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high regardless + //! of items written per thread. + //! - The local reordering incurs slightly longer latencies and throughput than the + //! direct cub::BLOCK_STORE_DIRECT and cub::BLOCK_STORE_VECTORIZE alternatives. + //! + //! @endrst + BLOCK_STORE_TRANSPOSE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` is locally + //! transposed and then efficiently written to memory as a + //! :ref:`warp-striped arrangement `. + //! + //! Usage Considerations + //! ++++++++++++++++++++++++++ + //! + //! - BLOCK_THREADS must be a multiple of WARP_THREADS + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high regardless + //! of items written per thread. + //! - The local reordering incurs slightly longer latencies and throughput than the + //! direct cub::BLOCK_STORE_DIRECT and cub::BLOCK_STORE_VECTORIZE alternatives. + //! + //! @endrst + BLOCK_STORE_WARP_TRANSPOSE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` is locally + //! transposed and then efficiently written to memory as a + //! :ref:`warp-striped arrangement `. + //! To reduce the shared memory requirement, only one warp's worth of shared + //! memory is provisioned and is subsequently time-sliced among warps. + //! + //! Usage Considerations + //! ++++++++++++++++++++++++++ + //! + //! - BLOCK_THREADS must be a multiple of WARP_THREADS + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high regardless + //! of items written per thread. + //! - Provisions less shared memory temporary storage, but incurs larger + //! latencies than the BLOCK_STORE_WARP_TRANSPOSE alternative. + //! + //! @endrst + BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED, +}; + +//! @rst +//! The BlockStore class provides :ref:`collective ` data movement +//! methods for writing a :ref:`blocked arrangement ` of items +//! partitioned across a CUDA thread block to a linear segment of memory. +//! +//! Overview +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! - The BlockStore class provides a single data movement abstraction that can be specialized +//! to implement different cub::BlockStoreAlgorithm strategies. This facilitates different +//! performance policies for different architectures, data types, granularity sizes, etc. +//! - BlockStore can be optionally specialized by different data movement strategies: +//! +//! #. :cpp:enumerator:`cub::BLOCK_STORE_DIRECT`: +//! A :ref:`blocked arrangement ` of data is written directly to memory. +//! #. :cpp:enumerator:`cub::BLOCK_STORE_STRIPED`: +//! A :ref:`striped arrangement ` of data is written directly to memory. +//! #. :cpp:enumerator:`cub::BLOCK_STORE_VECTORIZE`: +//! A :ref:`blocked arrangement ` of data is written directly to memory +//! using CUDA's built-in vectorized stores as a coalescing optimization. +//! #. :cpp:enumerator:`cub::BLOCK_STORE_TRANSPOSE`: +//! A :ref:`blocked arrangement ` is locally transposed into +//! a :ref:`striped arrangement ` which is then written to memory. +//! #. :cpp:enumerator:`cub::BLOCK_STORE_WARP_TRANSPOSE`: +//! A :ref:`blocked arrangement ` is locally transposed into +//! a :ref:`warp-striped arrangement ` which is then written to memory. +//! #. :cpp:enumerator:`cub::BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED`: +//! A :ref:`blocked arrangement ` is locally transposed into +//! a :ref:`warp-striped arrangement ` which is then written to memory. +//! To reduce the shared memory requireent, only one warp's worth of shared memory is provisioned and is +//! subsequently time-sliced among warps. +//! +//! - @rowmajor +//! +//! A Simple Example +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! @blockcollective{BlockStore} +//! +//! The code snippet below illustrates the storing of a "blocked" arrangement +//! of 512 integers across 128 threads (where each thread owns 4 consecutive items) +//! into a linear segment of memory. The store is specialized for ``BLOCK_STORE_WARP_TRANSPOSE``, +//! meaning items are locally reordered among threads so that memory references will be +//! efficiently coalesced using a warp-striped access pattern. +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(int *d_data, ...) +//! { +//! // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each +//! using BlockStore = cub::BlockStore; +//! +//! // Allocate shared memory for BlockStore +//! __shared__ typename BlockStore::TempStorage temp_storage; +//! +//! // Obtain a segment of consecutive items that are blocked across threads +//! int thread_data[4]; +//! ... +//! +//! // Store items to linear memory +//! BlockStore(temp_storage).Store(d_data, thread_data); +//! +//! Suppose the set of ``thread_data`` across the block of threads is +//! ``{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }``. +//! The output ``d_data`` will be ``0, 1, 2, 3, 4, 5, ...``. +//! +//! Re-using dynamically allocating shared memory +//! +++++++++++++++++++++++++++++++++++++++++++++ +//! +//! The ``block/example_block_reduce_dyn_smem.cu`` example illustrates usage of +//! dynamically shared memory with BlockReduce and how to re-purpose the same memory region. +//! This example can be easily adapted to the storage required by BlockStore. +//! +//! @endrst +//! +//! @tparam T +//! The type of data to be written. +//! +//! @tparam BLOCK_DIM_X +//! The thread block length in threads along the X dimension +//! +//! @tparam ITEMS_PER_THREAD +//! The number of consecutive items partitioned onto each thread. +//! +//! @tparam ALGORITHM +//! **[optional]** cub::BlockStoreAlgorithm tuning policy enumeration (default: cub::BLOCK_STORE_DIRECT) +//! +//! @tparam BLOCK_DIM_Y +//! **[optional]** The thread block length in threads along the Y dimension (default: 1) +//! +//! @tparam BLOCK_DIM_Z +//! **[optional]** The thread block length in threads along the Z dimension (default: 1) +//! +//! @tparam LEGACY_PTX_ARCH +//! **[optional]** Unused. +template +class BlockStore +{ +private: + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + /// Store helper + template + struct StoreInternal; + + template + struct StoreInternal + { + /// Shared memory storage layout type + using TempStorage = NullType; + + /// Linear thread-id + int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + /** + * @brief Store items into a linear segment of memory + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + StoreDirectBlocked(linear_tid, block_itr, items); + } + + /** + * @brief Store items into a linear segment of memory, guarded by range + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + * + * @param[in] valid_items + * Number of valid items to write + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + StoreDirectBlocked(linear_tid, block_itr, items, valid_items); + } + }; + + /** + * BLOCK_STORE_STRIPED specialization of store helper + */ + template + struct StoreInternal + { + /// Shared memory storage layout type + using TempStorage = NullType; + + /// Linear thread-id + int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + /** + * @brief Store items into a linear segment of memory + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + StoreDirectStriped(linear_tid, block_itr, items); + } + + /** + * @brief Store items into a linear segment of memory, guarded by range + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + * + * @param[in] valid_items + * Number of valid items to write + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + StoreDirectStriped(linear_tid, block_itr, items, valid_items); + } + }; + + /** + * BLOCK_STORE_VECTORIZE specialization of store helper + */ + template + struct StoreInternal + { + /// Shared memory storage layout type + using TempStorage = NullType; + + /// Linear thread-id + int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + /** + * @brief Store items into a linear segment of memory, + * specialized for native pointer types (attempts vectorization) + * + * @param[in] block_ptr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(T* block_ptr, T (&items)[ITEMS_PER_THREAD]) + { + StoreDirectBlockedVectorized(linear_tid, block_ptr, items); + } + + /** + * @brief Store items into a linear segment of memory, + * specialized for opaque input iterators (skips vectorization) + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + StoreDirectBlocked(linear_tid, block_itr, items); + } + + /** + * @brief Store items into a linear segment of memory, guarded by range + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + * + * @param[in] valid_items + * Number of valid items to write + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + StoreDirectBlocked(linear_tid, block_itr, items, valid_items); + } + }; + + /** + * BLOCK_STORE_TRANSPOSE specialization of store helper + */ + template + struct StoreInternal + { + // BlockExchange utility type for keys + using BlockExchange = BlockExchange; + + /// Shared memory storage layout type + struct _TempStorage : BlockExchange::TempStorage + { + /// Temporary storage for partially-full block guard + volatile int valid_items; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + /// Thread reference to shared storage + _TempStorage& temp_storage; + + /// Linear thread-id + int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& temp_storage, int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + /** + * @brief Store items into a linear segment of memory + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + BlockExchange(temp_storage).BlockedToStriped(items); + StoreDirectStriped(linear_tid, block_itr, items); + } + + /** + * @brief Store items into a linear segment of memory, guarded by range + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + * + * @param[in] valid_items + * Number of valid items to write + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + BlockExchange(temp_storage).BlockedToStriped(items); + if (linear_tid == 0) + { + // Move through volatile smem as a workaround to prevent RF spilling on + // subsequent loads + temp_storage.valid_items = valid_items; + } + __syncthreads(); + StoreDirectStriped(linear_tid, block_itr, items, temp_storage.valid_items); + } + }; + + /** + * BLOCK_STORE_WARP_TRANSPOSE specialization of store helper + */ + template + struct StoreInternal + { + enum + { + WARP_THREADS = CUB_WARP_THREADS(0) + }; + + // Assert BLOCK_THREADS must be a multiple of WARP_THREADS + static_assert(int(BLOCK_THREADS) % int(WARP_THREADS) == 0, "BLOCK_THREADS must be a multiple of WARP_THREADS"); + + // BlockExchange utility type for keys + using BlockExchange = BlockExchange; + + /// Shared memory storage layout type + struct _TempStorage : BlockExchange::TempStorage + { + /// Temporary storage for partially-full block guard + volatile int valid_items; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + /// Thread reference to shared storage + _TempStorage& temp_storage; + + /// Linear thread-id + int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& temp_storage, int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + /** + * @brief Store items into a linear segment of memory + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + BlockExchange(temp_storage).BlockedToWarpStriped(items); + StoreDirectWarpStriped(linear_tid, block_itr, items); + } + + /** + * @brief Store items into a linear segment of memory, guarded by range + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + * + * @param[in] valid_items + * Number of valid items to write + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + BlockExchange(temp_storage).BlockedToWarpStriped(items); + if (linear_tid == 0) + { + // Move through volatile smem as a workaround to prevent RF spilling on + // subsequent loads + temp_storage.valid_items = valid_items; + } + __syncthreads(); + StoreDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); + } + }; + + /** + * BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED specialization of store helper + */ + template + struct StoreInternal + { + enum + { + WARP_THREADS = CUB_WARP_THREADS(0) + }; + + // Assert BLOCK_THREADS must be a multiple of WARP_THREADS + static_assert(int(BLOCK_THREADS) % int(WARP_THREADS) == 0, "BLOCK_THREADS must be a multiple of WARP_THREADS"); + + // BlockExchange utility type for keys + using BlockExchange = BlockExchange; + + /// Shared memory storage layout type + struct _TempStorage : BlockExchange::TempStorage + { + /// Temporary storage for partially-full block guard + volatile int valid_items; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + /// Thread reference to shared storage + _TempStorage& temp_storage; + + /// Linear thread-id + int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE StoreInternal(TempStorage& temp_storage, int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + /** + * @brief Store items into a linear segment of memory + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + BlockExchange(temp_storage).BlockedToWarpStriped(items); + StoreDirectWarpStriped(linear_tid, block_itr, items); + } + + /** + * @brief Store items into a linear segment of memory, guarded by range + * + * @param[in] block_itr + * The thread block's base output iterator for storing to + * + * @param[in] items + * Data to store + * + * @param[in] valid_items + * Number of valid items to write + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + BlockExchange(temp_storage).BlockedToWarpStriped(items); + if (linear_tid == 0) + { + // Move through volatile smem as a workaround to prevent RF spilling on + // subsequent loads + temp_storage.valid_items = valid_items; + } + __syncthreads(); + StoreDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); + } + }; + + /// Internal load implementation to use + using InternalStore = StoreInternal; + + /// Shared memory storage layout type + using _TempStorage = typename InternalStore::TempStorage; + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + /// Thread reference to shared storage + _TempStorage& temp_storage; + + /// Linear thread-id + int linear_tid; + +public: + //! @smemstorage{BlockStore} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + /** + * @brief Collective constructor using a private static allocation of shared memory as temporary storage. + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockStore() + : temp_storage(PrivateStorage()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Collective constructor using the specified memory allocation as temporary storage. + * + * @param temp_storage[in] + * Reference to memory allocation having layout type TempStorage + */ + _CCCL_DEVICE _CCCL_FORCEINLINE BlockStore(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //! @} end member group + //! @name Data movement + //! @{ + + //! @rst + //! Store items into a linear segment of memory + //! + //! - @blocked + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the storing of a "blocked" arrangement + //! of 512 integers across 128 threads (where each thread owns 4 consecutive items) + //! into a linear segment of memory. The store is specialized for ``BLOCK_STORE_WARP_TRANSPOSE``, + //! meaning items are locally reordered among threads so that memory references will be + //! efficiently coalesced using a warp-striped access pattern. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, ...) + //! { + //! // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each + //! using BlockStore = cub::BlockStore; + //! + //! // Allocate shared memory for BlockStore + //! __shared__ typename BlockStore::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Store items to linear memory + //! int thread_data[4]; + //! BlockStore(temp_storage).Store(d_data, thread_data); + //! + //! Suppose the set of ``thread_data`` across the block of threads is + //! ``{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }``. + //! The output ``d_data`` will be ``0, 1, 2, 3, 4, 5, ...``. + //! + //! @endrst + //! + //! @param block_itr[out] + //! The thread block's base output iterator for storing to + //! + //! @param items[in] + //! Data to store + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD]) + { + InternalStore(temp_storage, linear_tid).Store(block_itr, items); + } + + //! @rst + //! Store items into a linear segment of memory, guarded by range. + //! + //! - @blocked + //! - @smemreuse + //! + //! Snippet + //! +++++++ + //! + //! The code snippet below illustrates the guarded storing of a "blocked" arrangement + //! of 512 integers across 128 threads (where each thread owns 4 consecutive items) + //! into a linear segment of memory. The store is specialized for ``BLOCK_STORE_WARP_TRANSPOSE``, + //! meaning items are locally reordered among threads so that memory references will be + //! efficiently coalesced using a warp-striped access pattern. + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, int valid_items, ...) + //! { + //! // Specialize BlockStore for a 1D block of 128 threads owning 4 integer items each + //! using BlockStore = cub::BlockStore; + //! + //! // Allocate shared memory for BlockStore + //! __shared__ typename BlockStore::TempStorage temp_storage; + //! + //! // Obtain a segment of consecutive items that are blocked across threads + //! int thread_data[4]; + //! ... + //! + //! // Store items to linear memory + //! int thread_data[4]; + //! BlockStore(temp_storage).Store(d_data, thread_data, valid_items); + //! + //! Suppose the set of ``thread_data`` across the block of threads is + //! ``{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }`` and ``valid_items`` is ``5``. + //! The output ``d_data`` will be ``0, 1, 2, 3, 4, ?, ?, ?, ...``, with + //! only the first two threads being unmasked to store portions of valid data. + //! + //! @endrst + //! + //! @param block_itr[out] + //! The thread block's base output iterator for storing to + //! + //! @param items[in] + //! Data to store + //! + //! @param valid_items[in] + //! Number of valid items to write + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Store(OutputIteratorT block_itr, T (&items)[ITEMS_PER_THREAD], int valid_items) + { + InternalStore(temp_storage, linear_tid).Store(block_itr, items, valid_items); + } + + //! @} end member group +}; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document +template > +struct BlockStoreType +{ + using type = cub::BlockStore; +}; +#endif // _CCCL_DOXYGEN_INVOKED + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/radix_rank_sort_operations.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/radix_rank_sort_operations.cuh new file mode 100644 index 0000000000000000000000000000000000000000..35bdfe8ee02f6acbf401a98b60f751ce95b8dc46 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/radix_rank_sort_operations.cuh @@ -0,0 +1,617 @@ +/****************************************************************************** + * Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * radix_rank_sort_operations.cuh contains common abstractions, definitions and + * operations used for radix sorting and ranking. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +CUB_NAMESPACE_BEGIN + +/** \brief Base struct for digit extractor. Contains common code to provide + special handling for floating-point -0.0. + + \note This handles correctly both the case when the keys are + bitwise-complemented after twiddling for descending sort (in onesweep) as + well as when the keys are not bit-negated, but the implementation handles + descending sort separately (in other implementations in CUB). Twiddling + alone maps -0.0f to 0x7fffffff and +0.0f to 0x80000000 for float, which are + subsequent bit patterns and bitwise complements of each other. For onesweep, + both -0.0f and +0.0f are mapped to the bit pattern of +0.0f (0x80000000) for + ascending sort, and to the pattern of -0.0f (0x7fffffff) for descending + sort. For all other sorting implementations in CUB, both are always mapped + to +0.0f. Since bit patterns for both -0.0f and +0.0f are next to each other + and only one of them is used, the sorting works correctly. For double, the + same applies, but with 64-bit patterns. +*/ +template ::CATEGORY> +struct BaseDigitExtractor +{ + using TraitsT = Traits; + using UnsignedBits = typename TraitsT::UnsignedBits; + + static _CCCL_HOST_DEVICE _CCCL_FORCEINLINE UnsignedBits ProcessFloatMinusZero(UnsignedBits key) + { + return key; + } +}; + +template +struct BaseDigitExtractor +{ + using TraitsT = Traits; + using UnsignedBits = typename TraitsT::UnsignedBits; + + static _CCCL_HOST_DEVICE _CCCL_FORCEINLINE UnsignedBits ProcessFloatMinusZero(UnsignedBits key) + { + UnsignedBits TWIDDLED_MINUS_ZERO_BITS = + TraitsT::TwiddleIn(UnsignedBits(1) << UnsignedBits(8 * sizeof(UnsignedBits) - 1)); + UnsignedBits TWIDDLED_ZERO_BITS = TraitsT::TwiddleIn(0); + return key == TWIDDLED_MINUS_ZERO_BITS ? TWIDDLED_ZERO_BITS : key; + } +}; + +/** \brief A wrapper type to extract digits. Uses the BFE intrinsic to extract a + * key from a digit. */ +template +struct BFEDigitExtractor : BaseDigitExtractor +{ + using typename BaseDigitExtractor::UnsignedBits; + + ::cuda::std::uint32_t bit_start; + ::cuda::std::uint32_t num_bits; + + explicit _CCCL_DEVICE _CCCL_FORCEINLINE + BFEDigitExtractor(::cuda::std::uint32_t bit_start = 0, ::cuda::std::uint32_t num_bits = 0) + : bit_start(bit_start) + , num_bits(num_bits) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE ::cuda::std::uint32_t Digit(UnsignedBits key) const + { + return BFE(this->ProcessFloatMinusZero(key), bit_start, num_bits); + } +}; + +/** \brief A wrapper type to extract digits. Uses a combination of shift and + * bitwise and to extract digits. */ +template +struct ShiftDigitExtractor : BaseDigitExtractor +{ + using typename BaseDigitExtractor::UnsignedBits; + + ::cuda::std::uint32_t bit_start; + ::cuda::std::uint32_t mask; + + explicit _CCCL_DEVICE _CCCL_FORCEINLINE + ShiftDigitExtractor(::cuda::std::uint32_t bit_start = 0, ::cuda::std::uint32_t num_bits = 0) + : bit_start(bit_start) + , mask((1 << num_bits) - 1) + {} + + _CCCL_DEVICE _CCCL_FORCEINLINE ::cuda::std::uint32_t Digit(UnsignedBits key) const + { + return ::cuda::std::uint32_t(this->ProcessFloatMinusZero(key) >> UnsignedBits(bit_start)) & mask; + } +}; + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document +namespace detail +{ + +template +struct logic_helper_t; + +template +struct true_t +{ + static constexpr bool value = true; +}; + +template +using all_t = // + ::cuda::std::is_same< // + logic_helper_t, // + logic_helper_t::value...>>; + +struct identity_decomposer_t +{ + template + _CCCL_HOST_DEVICE T& operator()(T& key) const + { + return key; + } +}; + +template +_CCCL_HOST_DEVICE void +for_each_member_impl_helper(F f, const ::cuda::std::tuple& tpl, THRUST_NS_QUALIFIER::index_sequence) +{ + auto sink = {(f(::cuda::std::get(tpl)), 0)...}; + (void) sink; +} + +template +_CCCL_HOST_DEVICE void for_each_member_impl(F f, const ::cuda::std::tuple& tpl) +{ + static_assert(sizeof...(Ts), "Empty aggregates are not supported"); + + // Most radix operations are indifferent to the order of operations. + // Conversely, the digit extractor traverses fields from the least significant + // to the most significant to imitate bitset printing where higher bits are on + // the left. It also maps to intuition, where something coming first is more + // important. Therefore, we traverse fields on the opposite order. + for_each_member_impl_helper(f, tpl, THRUST_NS_QUALIFIER::make_reversed_index_sequence{}); +} + +template +_CCCL_HOST_DEVICE void for_each_member(F f, DecomposerT decomposer, T& aggregate) +{ + for_each_member_impl(f, decomposer(aggregate)); +} + +namespace radix +{ +template +struct is_fundamental_type +{ + static constexpr bool value = false; +}; + +template +struct is_fundamental_type::UnsignedBits>> +{ + static constexpr bool value = true; +}; + +template +struct is_tuple_of_references_to_fundamental_types_t : ::cuda::std::false_type +{}; + +template +struct is_tuple_of_references_to_fundamental_types_t< // + ::cuda::std::tuple, // + typename ::cuda::std::enable_if< // + all_t::value...>::value // + >::type> // + : ::cuda::std::true_type +{}; + +template +using decomposer_check_t = is_tuple_of_references_to_fundamental_types_t>; + +template +struct bit_ordered_conversion_policy_t +{ + using bit_ordered_type = typename Traits::UnsignedBits; + + static _CCCL_HOST_DEVICE bit_ordered_type to_bit_ordered(detail::identity_decomposer_t, bit_ordered_type val) + { + return Traits::TwiddleIn(val); + } + + static _CCCL_HOST_DEVICE bit_ordered_type from_bit_ordered(detail::identity_decomposer_t, bit_ordered_type val) + { + return Traits::TwiddleOut(val); + } +}; + +template +struct bit_ordered_inversion_policy_t +{ + using bit_ordered_type = typename Traits::UnsignedBits; + + static _CCCL_HOST_DEVICE bit_ordered_type inverse(detail::identity_decomposer_t, bit_ordered_type val) + { + return ~val; + } +}; + +template ::value> +struct traits_t +{ + using bit_ordered_type = typename Traits::UnsignedBits; + using bit_ordered_conversion_policy = bit_ordered_conversion_policy_t; + using bit_ordered_inversion_policy = bit_ordered_inversion_policy_t; + + template + using digit_extractor_t = FundamentalExtractorT; + + static _CCCL_HOST_DEVICE bit_ordered_type min_raw_binary_key(detail::identity_decomposer_t) + { + return Traits::LOWEST_KEY; + } + + static _CCCL_HOST_DEVICE bit_ordered_type max_raw_binary_key(detail::identity_decomposer_t) + { + return Traits::MAX_KEY; + } + + static _CCCL_HOST_DEVICE int default_end_bit(detail::identity_decomposer_t) + { + return sizeof(T) * 8; + } + + template + static _CCCL_HOST_DEVICE digit_extractor_t + digit_extractor(int begin_bit, int num_bits, detail::identity_decomposer_t) + { + return FundamentalExtractorT(begin_bit, num_bits); + } +}; + +template +struct min_raw_binary_key_f +{ + DecomposerT decomposer; + + template + _CCCL_HOST_DEVICE void operator()(T& field) + { + using traits = traits_t::type>; + using bit_ordered_type = typename traits::bit_ordered_type; + reinterpret_cast(field) = traits::min_raw_binary_key(detail::identity_decomposer_t{}); + } +}; + +template +_CCCL_HOST_DEVICE void min_raw_binary_key(DecomposerT decomposer, T& aggregate) +{ + detail::for_each_member(min_raw_binary_key_f{decomposer}, decomposer, aggregate); +} + +template +struct max_raw_binary_key_f +{ + DecomposerT decomposer; + + template + _CCCL_HOST_DEVICE void operator()(T& field) + { + using traits = traits_t::type>; + using bit_ordered_type = typename traits::bit_ordered_type; + reinterpret_cast(field) = traits::max_raw_binary_key(detail::identity_decomposer_t{}); + } +}; + +template +_CCCL_HOST_DEVICE void max_raw_binary_key(DecomposerT decomposer, T& aggregate) +{ + detail::for_each_member(max_raw_binary_key_f{decomposer}, decomposer, aggregate); +} + +template +struct to_bit_ordered_f +{ + DecomposerT decomposer; + + template + _CCCL_HOST_DEVICE void operator()(T& field) + { + using traits = traits_t::type>; + using bit_ordered_type = typename traits::bit_ordered_type; + using bit_ordered_conversion = typename traits::bit_ordered_conversion_policy; + + auto& ordered_field = reinterpret_cast(field); + ordered_field = bit_ordered_conversion::to_bit_ordered(detail::identity_decomposer_t{}, ordered_field); + } +}; + +template +_CCCL_HOST_DEVICE void to_bit_ordered(DecomposerT decomposer, T& aggregate) +{ + detail::for_each_member(to_bit_ordered_f{decomposer}, decomposer, aggregate); +} + +template +struct from_bit_ordered_f +{ + DecomposerT decomposer; + + template + _CCCL_HOST_DEVICE void operator()(T& field) + { + using traits = traits_t::type>; + using bit_ordered_type = typename traits::bit_ordered_type; + using bit_ordered_conversion = typename traits::bit_ordered_conversion_policy; + + auto& ordered_field = reinterpret_cast(field); + ordered_field = bit_ordered_conversion::from_bit_ordered(detail::identity_decomposer_t{}, ordered_field); + } +}; + +template +_CCCL_HOST_DEVICE void from_bit_ordered(DecomposerT decomposer, T& aggregate) +{ + detail::for_each_member(from_bit_ordered_f{decomposer}, decomposer, aggregate); +} + +template +struct inverse_f +{ + DecomposerT decomposer; + + template + _CCCL_HOST_DEVICE void operator()(T& field) + { + using traits = traits_t::type>; + using bit_ordered_type = typename traits::bit_ordered_type; + + auto& ordered_field = reinterpret_cast(field); + ordered_field = ~ordered_field; + } +}; + +template +_CCCL_HOST_DEVICE void inverse(DecomposerT decomposer, T& aggregate) +{ + detail::for_each_member(inverse_f{decomposer}, decomposer, aggregate); +} + +template +struct default_end_bit_f +{ + int& result; + DecomposerT decomposer; + + template + _CCCL_HOST_DEVICE void operator()(T& field) + { + result += sizeof(field) * 8; + } +}; + +template +_CCCL_HOST_DEVICE int default_end_bit(DecomposerT decomposer, T& aggregate) +{ + int result{}; + detail::for_each_member(default_end_bit_f{result, decomposer}, decomposer, aggregate); + return result; +} + +struct digit_f +{ + ::cuda::std::uint32_t& dst; + ::cuda::std::uint32_t& dst_bit_start; + ::cuda::std::uint32_t& src_bit_start; + ::cuda::std::uint32_t& num_bits; + + template + _CCCL_HOST_DEVICE void operator()(T& src) + { + constexpr ::cuda::std::uint32_t src_size = sizeof(T) * 8; + + if (src_bit_start >= src_size) + { + src_bit_start -= src_size; + } + else + { + using traits = traits_t::type>; + using bit_ordered_type = typename traits::bit_ordered_type; + + const ::cuda::std::uint32_t bits_to_copy = (::cuda::std::min)(src_size - src_bit_start, num_bits); + + if (bits_to_copy) + { + bit_ordered_type ordered_src = + BaseDigitExtractor::ProcessFloatMinusZero(reinterpret_cast(src)); + + const ::cuda::std::uint32_t mask = (1 << bits_to_copy) - 1; + dst = dst | (((ordered_src >> src_bit_start) & mask) << dst_bit_start); + + num_bits -= bits_to_copy; + dst_bit_start += bits_to_copy; + } + src_bit_start = 0; + } + } +}; + +template +_CCCL_HOST_DEVICE void +digit(DecomposerT decomposer, + ::cuda::std::uint32_t& dst, + T& src, + ::cuda::std::uint32_t& dst_bit_start, + ::cuda::std::uint32_t& src_bit_start, + ::cuda::std::uint32_t& num_bits) +{ + detail::for_each_member(digit_f{dst, dst_bit_start, src_bit_start, num_bits}, decomposer, src); +} + +template +struct custom_digit_extractor_t +{ + DecomposerT decomposer; + ::cuda::std::uint32_t bit_start; + ::cuda::std::uint32_t num_bits; + + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE + custom_digit_extractor_t(DecomposerT decomposer, ::cuda::std::uint32_t bit_start, ::cuda::std::uint32_t num_bits) + : decomposer(decomposer) + , bit_start(bit_start) + , num_bits(num_bits) + {} + + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ::cuda::std::uint32_t Digit(T& key) const + { + ::cuda::std::uint32_t result{}; + ::cuda::std::uint32_t dst_bit_start{}; + ::cuda::std::uint32_t src_bit_start = bit_start; + ::cuda::std::uint32_t bits_remaining{num_bits}; + digit(decomposer, result, key, dst_bit_start, src_bit_start, bits_remaining); + return result; + } +}; + +struct custom_bit_conversion_policy_t +{ + template + static _CCCL_HOST_DEVICE T to_bit_ordered(DecomposerT decomposer, T val) + { + detail::radix::to_bit_ordered(decomposer, val); + return val; + } + + template + static _CCCL_HOST_DEVICE T from_bit_ordered(DecomposerT decomposer, T val) + { + detail::radix::from_bit_ordered(decomposer, val); + return val; + } +}; + +struct custom_bit_inversion_policy_t +{ + template + static _CCCL_HOST_DEVICE T inverse(DecomposerT decomposer, T val) + { + detail::radix::inverse(decomposer, val); + return val; + } +}; + +template +struct traits_t +{ + using bit_ordered_type = T; + using bit_ordered_conversion_policy = custom_bit_conversion_policy_t; + using bit_ordered_inversion_policy = custom_bit_inversion_policy_t; + + template + using digit_extractor_t = custom_digit_extractor_t; + + template + static _CCCL_HOST_DEVICE bit_ordered_type min_raw_binary_key(DecomposerT decomposer) + { + T val{}; + detail::radix::min_raw_binary_key(decomposer, val); + return val; + } + + template + static _CCCL_HOST_DEVICE bit_ordered_type max_raw_binary_key(DecomposerT decomposer) + { + T val{}; + detail::radix::max_raw_binary_key(decomposer, val); + return val; + } + + template + static _CCCL_HOST_DEVICE int default_end_bit(DecomposerT decomposer) + { + T aggregate{}; + return detail::radix::default_end_bit(decomposer, aggregate); + } + + template + static _CCCL_HOST_DEVICE digit_extractor_t + digit_extractor(int begin_bit, int num_bits, DecomposerT decomposer) + { + return custom_digit_extractor_t(decomposer, begin_bit, num_bits); + } +}; + +} // namespace radix + +} // namespace detail +#endif // _CCCL_DOXYGEN_INVOKED + +//! Twiddling keys for radix sort +template +struct RadixSortTwiddle +{ +private: + using traits = detail::radix::traits_t; + using bit_ordered_type = typename traits::bit_ordered_type; + using bit_ordered_conversion_policy = typename traits::bit_ordered_conversion_policy; + using bit_ordered_inversion_policy = typename traits::bit_ordered_inversion_policy; + +public: + template + static _CCCL_HOST_DEVICE _CCCL_FORCEINLINE // + bit_ordered_type + In(bit_ordered_type key, DecomposerT decomposer = {}) + { + key = bit_ordered_conversion_policy::to_bit_ordered(decomposer, key); + _CCCL_IF_CONSTEXPR (IS_DESCENDING) + { + key = bit_ordered_inversion_policy::inverse(decomposer, key); + } + return key; + } + + template + static _CCCL_HOST_DEVICE _CCCL_FORCEINLINE // + bit_ordered_type + Out(bit_ordered_type key, DecomposerT decomposer = {}) + { + _CCCL_IF_CONSTEXPR (IS_DESCENDING) + { + key = bit_ordered_inversion_policy::inverse(decomposer, key); + } + key = bit_ordered_conversion_policy::from_bit_ordered(decomposer, key); + return key; + } + + template + static _CCCL_HOST_DEVICE _CCCL_FORCEINLINE // + bit_ordered_type + DefaultKey(DecomposerT decomposer = {}) + { + return IS_DESCENDING ? traits::min_raw_binary_key(decomposer) : traits::max_raw_binary_key(decomposer); + } +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_histogram_atomic.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_histogram_atomic.cuh new file mode 100644 index 0000000000000000000000000000000000000000..4103641dbe2d073337604dc41bf7c00137ee672f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_histogram_atomic.cuh @@ -0,0 +1,91 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * The cub::BlockHistogramAtomic class provides atomic-based methods for constructing block-wide + * histograms from data samples partitioned across a CUDA thread block. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief The BlockHistogramAtomic class provides atomic-based methods for constructing block-wide + * histograms from data samples partitioned across a CUDA thread block. + */ +template +struct BlockHistogramAtomic +{ + /// Shared memory storage layout type + struct TempStorage + {}; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockHistogramAtomic(TempStorage& temp_storage) {} + + /** + * @brief Composite data onto an existing histogram + * + * @param[in] items + * Calling thread's input values to histogram + * + * @param[out] histogram + * Reference to shared/device-accessible memory histogram + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Composite(T (&items)[ITEMS_PER_THREAD], CounterT histogram[BINS]) + { + // Update histogram +#pragma unroll + for (int i = 0; i < ITEMS_PER_THREAD; ++i) + { + atomicAdd(histogram + items[i], 1); + } + } +}; +} // namespace detail + +template +using BlockHistogramAtomic CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::BlockHistogramAtomic; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_histogram_sort.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_histogram_sort.cuh new file mode 100644 index 0000000000000000000000000000000000000000..127f30953b29e7f394f06c4cb865ec2b1cde0316 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_histogram_sort.cuh @@ -0,0 +1,261 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * The cub::BlockHistogramSort class provides sorting-based methods for constructing block-wide + * histograms from data samples partitioned across a CUDA thread block. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief The BlockHistogramSort class provides sorting-based methods for constructing block-wide + * histograms from data samples partitioned across a CUDA thread block. + * + * @tparam T + * Sample type + * + * @tparam BLOCK_DIM_X + * The thread block length in threads along the X dimension + * + * @tparam ITEMS_PER_THREAD + * The number of samples per thread + * + * @tparam BINS + * The number of bins into which histogram samples may fall + * + * @tparam BLOCK_DIM_Y + * The thread block length in threads along the Y dimension + * + * @tparam BLOCK_DIM_Z + * The thread block length in threads along the Z dimension + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective (unused) + */ +template +struct BlockHistogramSort +{ + /// Constants + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + // Parameterize BlockRadixSort type for our thread block + using BlockRadixSortT = + BlockRadixSort; + + // Parameterize BlockDiscontinuity type for our thread block + using BlockDiscontinuityT = BlockDiscontinuity; + + /// Shared memory + union _TempStorage + { + // Storage for sorting bin values + typename BlockRadixSortT::TempStorage sort; + + struct Discontinuities + { + // Storage for detecting discontinuities in the tile of sorted bin values + typename BlockDiscontinuityT::TempStorage flag; + + // Storage for noting begin/end offsets of bin runs in the tile of sorted bin values + unsigned int run_begin[BINS]; + unsigned int run_end[BINS]; + } discontinuities; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + // Thread fields + _TempStorage& temp_storage; + unsigned int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockHistogramSort(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + // Discontinuity functor + struct DiscontinuityOp + { + // Reference to temp_storage + _TempStorage& temp_storage; + + // Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE DiscontinuityOp(_TempStorage& temp_storage) + : temp_storage(temp_storage) + {} + + // Discontinuity predicate + _CCCL_DEVICE _CCCL_FORCEINLINE bool operator()(const T& a, const T& b, int b_index) + { + if (a != b) + { + // Note the begin/end offsets in shared storage + temp_storage.discontinuities.run_begin[b] = b_index; + temp_storage.discontinuities.run_end[a] = b_index; + + return true; + } + else + { + return false; + } + } + }; + + /** + * @brief Composite data onto an existing histogram + * + * @param[in] items + * Calling thread's input values to histogram + * + * @param[out] histogram + * Reference to shared/device-accessible memory histogram + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Composite(T (&items)[ITEMS_PER_THREAD], CounterT histogram[BINS]) + { + enum + { + TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD + }; + + // Sort bytes in blocked arrangement + BlockRadixSortT(temp_storage.sort).Sort(items); + + __syncthreads(); + + // Initialize the shared memory's run_begin and run_end for each bin + int histo_offset = 0; + +#pragma unroll + for (; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS) + { + temp_storage.discontinuities.run_begin[histo_offset + linear_tid] = TILE_SIZE; + temp_storage.discontinuities.run_end[histo_offset + linear_tid] = TILE_SIZE; + } + // Finish up with guarded initialization if necessary + if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS)) + { + temp_storage.discontinuities.run_begin[histo_offset + linear_tid] = TILE_SIZE; + temp_storage.discontinuities.run_end[histo_offset + linear_tid] = TILE_SIZE; + } + + __syncthreads(); + + int flags[ITEMS_PER_THREAD]; // unused + + // Compute head flags to demarcate contiguous runs of the same bin in the sorted tile + DiscontinuityOp flag_op(temp_storage); + BlockDiscontinuityT(temp_storage.discontinuities.flag).FlagHeads(flags, items, flag_op); + + // Update begin for first item + if (linear_tid == 0) + { + temp_storage.discontinuities.run_begin[items[0]] = 0; + } + + __syncthreads(); + + // Composite into histogram + histo_offset = 0; + +#pragma unroll + for (; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS) + { + int thread_offset = histo_offset + linear_tid; + CounterT count = + temp_storage.discontinuities.run_end[thread_offset] - temp_storage.discontinuities.run_begin[thread_offset]; + histogram[thread_offset] += count; + } + + // Finish up with guarded composition if necessary + if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS)) + { + int thread_offset = histo_offset + linear_tid; + CounterT count = + temp_storage.discontinuities.run_end[thread_offset] - temp_storage.discontinuities.run_begin[thread_offset]; + histogram[thread_offset] += count; + } + } +}; +} // namespace detail + +template +using BlockHistogramSort CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = + detail::BlockHistogramSort; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_raking.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_raking.cuh new file mode 100644 index 0000000000000000000000000000000000000000..90f8f12236f39219ef2b33233bc7a167d9b1e38e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_raking.cuh @@ -0,0 +1,268 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::BlockReduceRaking provides raking-based methods of parallel reduction across a CUDA thread + * block. Supports non-commutative reduction operators. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief BlockReduceRaking provides raking-based methods of parallel reduction across a CUDA thread + * block. Supports non-commutative reduction operators. + * + * Supports non-commutative binary reduction operators. Unlike commutative + * reduction operators (e.g., addition), the application of a non-commutative + * reduction operator (e.g, string concatenation) across a sequence of inputs must + * honor the relative ordering of items and partial reductions when applying the + * reduction operator. + * + * Compared to the implementation of BlockReduceRakingCommutativeOnly (which + * does not support non-commutative operators), this implementation requires a + * few extra rounds of inter-thread communication. + * + * @tparam T + * Data type being reduced + * + * @tparam BLOCK_DIM_X + * The thread block length in threads along the X dimension + * + * @tparam BLOCK_DIM_Y + * The thread block length in threads along the Y dimension + * + * @tparam BLOCK_DIM_Z + * The thread block length in threads along the Z dimension + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct BlockReduceRaking +{ + /// Constants + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + /// Layout type for padded thread block raking grid + using BlockRakingLayout = BlockRakingLayout; + + /// WarpReduce utility type + using WarpReduce = typename WarpReduce::InternalWarpReduce; + + /// Constants + enum + { + /// Number of raking threads + RAKING_THREADS = BlockRakingLayout::RAKING_THREADS, + + /// Number of raking elements per warp synchronous raking thread + SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH, + + /// Cooperative work can be entirely warp synchronous + WARP_SYNCHRONOUS = (int(RAKING_THREADS) == int(BLOCK_THREADS)), + + /// Whether or not warp-synchronous reduction should be unguarded (i.e., the warp-reduction elements is a power of + /// two + WARP_SYNCHRONOUS_UNGUARDED = PowerOfTwo::VALUE, + + /// Whether or not accesses into smem are unguarded + RAKING_UNGUARDED = BlockRakingLayout::UNGUARDED, + + }; + + /// Shared memory storage layout type + union _TempStorage + { + /// Storage for warp-synchronous reduction + typename WarpReduce::TempStorage warp_storage; + + /// Padded thread block raking grid + typename BlockRakingLayout::TempStorage raking_grid; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + // Thread fields + _TempStorage& temp_storage; + unsigned int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockReduceRaking(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] partial + * [lane0 only] Warp-wide aggregate reduction of input items + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T RakingReduction( + ReductionOp reduction_op, T* raking_segment, T partial, int num_valid, Int2Type /*iteration*/) + { + // Update partial if addend is in range + if ((IS_FULL_TILE && RAKING_UNGUARDED) || ((linear_tid * SEGMENT_LENGTH) + ITERATION < num_valid)) + { + T addend = raking_segment[ITERATION]; + partial = reduction_op(partial, addend); + } + return RakingReduction(reduction_op, raking_segment, partial, num_valid, Int2Type()); + } + + /** + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] partial + * [lane0 only] Warp-wide aggregate reduction of input items + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T RakingReduction( + ReductionOp /*reduction_op*/, + T* /*raking_segment*/, + T partial, + int /*num_valid*/, + Int2Type /*iteration*/) + { + return partial; + } + + /** + * @brief Computes a thread block-wide reduction using the specified reduction operator. The + * first num_valid threads each contribute one reduction partial. The return value is + * only valid for thread0. + * + * @param[in] partial + * Calling thread's input partial reductions + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + * + * @param[in] reduction_op + * Binary reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T partial, int num_valid, ReductionOp reduction_op) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp synchronous reduction (unguarded if active threads is a power-of-two) + partial = WarpReduce(temp_storage.warp_storage).template Reduce(partial, num_valid, reduction_op); + } + else + { + // Place partial into shared memory grid. + *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid) = partial; + + __syncthreads(); + + // Reduce parallelism to one warp + if (linear_tid < RAKING_THREADS) + { + // Raking reduction in grid + T* raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); + partial = raking_segment[0]; + + partial = RakingReduction(reduction_op, raking_segment, partial, num_valid, Int2Type<1>()); + + int valid_raking_threads = (IS_FULL_TILE) ? RAKING_THREADS : (num_valid + SEGMENT_LENGTH - 1) / SEGMENT_LENGTH; + + // sync before re-using shmem (warp_storage/raking_grid are aliased) + static_assert(RAKING_THREADS <= CUB_PTX_WARP_THREADS, "RAKING_THREADS must be <= warp size."); + unsigned int mask = static_cast((1ull << RAKING_THREADS) - 1); + __syncwarp(mask); + + partial = WarpReduce(temp_storage.warp_storage) + .template Reduce<(IS_FULL_TILE && RAKING_UNGUARDED)>(partial, valid_raking_threads, reduction_op); + } + } + + return partial; + } + + /** + * @brief Computes a thread block-wide reduction using addition (+) as the reduction operator. + * The first num_valid threads each contribute one reduction partial. The return value is + * only valid for thread0. + * + * @param[in] partial + * Calling thread's input partial reductions + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T partial, int num_valid) + { + ::cuda::std::plus<> reduction_op; + + return Reduce(partial, num_valid, reduction_op); + } +}; +} // namespace detail + +template +using BlockReduceRaking CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::BlockReduceRaking; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_raking_commutative_only.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_raking_commutative_only.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7841db5f18ab4dc856e7b9c3fcb4b13d2645e868 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_raking_commutative_only.cuh @@ -0,0 +1,242 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::BlockReduceRakingCommutativeOnly provides raking-based methods of parallel reduction across + * a CUDA thread block. Does not support non-commutative reduction operators. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief BlockReduceRakingCommutativeOnly provides raking-based methods of parallel reduction + * across a CUDA thread block. Does not support non-commutative reduction operators. Does not + * support block sizes that are not a multiple of the warp size. + * + * @tparam T + * Data type being reduced + * + * @tparam BLOCK_DIM_X + * The thread block length in threads along the X dimension + * + * @tparam BLOCK_DIM_Y + * The thread block length in threads along the Y dimension + * + * @tparam BLOCK_DIM_Z + * The thread block length in threads along the Z dimension + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct BlockReduceRakingCommutativeOnly +{ + /// Constants + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + // The fall-back implementation to use when BLOCK_THREADS is not a multiple of the warp size or not all threads have + // valid values + using FallBack = detail::BlockReduceRaking; + + /// Constants + enum + { + /// Number of warp threads + WARP_THREADS = CUB_WARP_THREADS(0), + + /// Whether or not to use fall-back + USE_FALLBACK = ((BLOCK_THREADS % WARP_THREADS != 0) || (BLOCK_THREADS <= WARP_THREADS)), + + /// Number of raking threads + RAKING_THREADS = WARP_THREADS, + + /// Number of threads actually sharing items with the raking threads + SHARING_THREADS = CUB_MAX(1, BLOCK_THREADS - RAKING_THREADS), + + /// Number of raking elements per warp synchronous raking thread + SEGMENT_LENGTH = SHARING_THREADS / WARP_THREADS, + }; + + /// WarpReduce utility type + using WarpReduce = WarpReduce; + + /// Layout type for padded thread block raking grid + using BlockRakingLayout = BlockRakingLayout; + + /// Shared memory storage layout type + union _TempStorage + { + struct DefaultStorage + { + /// Storage for warp-synchronous reduction + typename WarpReduce::TempStorage warp_storage; + + /// Padded thread block raking grid + typename BlockRakingLayout::TempStorage raking_grid; + } default_storage; + + /// Fall-back storage for non-commutative block reduction + typename FallBack::TempStorage fallback_storage; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + // Thread fields + _TempStorage& temp_storage; + unsigned int linear_tid; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockReduceRakingCommutativeOnly(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + /** + * @brief Computes a thread block-wide reduction using addition (+) as the reduction operator. + * The first num_valid threads each contribute one reduction partial. + * The return value is only valid for thread0. + * + * @param[in] partial + * Calling thread's input partial reductions + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T partial, int num_valid) + { + if (USE_FALLBACK || !FULL_TILE) + { + return FallBack(temp_storage.fallback_storage).template Sum(partial, num_valid); + } + else + { + // Place partial into shared memory grid + if (linear_tid >= RAKING_THREADS) + { + *BlockRakingLayout::PlacementPtr(temp_storage.default_storage.raking_grid, linear_tid - RAKING_THREADS) = + partial; + } + + __syncthreads(); + + // Reduce parallelism to one warp + if (linear_tid < RAKING_THREADS) + { + // Raking reduction in grid + T* raking_segment = BlockRakingLayout::RakingPtr(temp_storage.default_storage.raking_grid, linear_tid); + partial = cub::internal::ThreadReduce(raking_segment, ::cuda::std::plus<>{}, partial); + + // Warp reduction + partial = WarpReduce(temp_storage.default_storage.warp_storage).Sum(partial); + } + } + + return partial; + } + + /** + * @brief Computes a thread block-wide reduction using the specified reduction operator. + * The first num_valid threads each contribute one reduction partial. + * The return value is only valid for thread0. + * + * @param[in] partial + * Calling thread's input partial reductions + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + * + * @param[in] reduction_op + * Binary reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T partial, int num_valid, ReductionOp reduction_op) + { + if (USE_FALLBACK || !FULL_TILE) + { + return FallBack(temp_storage.fallback_storage).template Reduce(partial, num_valid, reduction_op); + } + else + { + // Place partial into shared memory grid + if (linear_tid >= RAKING_THREADS) + { + *BlockRakingLayout::PlacementPtr(temp_storage.default_storage.raking_grid, linear_tid - RAKING_THREADS) = + partial; + } + + __syncthreads(); + + // Reduce parallelism to one warp + if (linear_tid < RAKING_THREADS) + { + // Raking reduction in grid + T* raking_segment = BlockRakingLayout::RakingPtr(temp_storage.default_storage.raking_grid, linear_tid); + partial = cub::internal::ThreadReduce(raking_segment, reduction_op, partial); + + // Warp reduction + partial = WarpReduce(temp_storage.default_storage.warp_storage).Reduce(partial, reduction_op); + } + } + + return partial; + } +}; +} // namespace detail + +template +using BlockReduceRakingCommutativeOnly CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::BlockReduceRakingCommutativeOnly; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_warp_reductions.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_warp_reductions.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2dfa526771fc29cd99fa83176328712f6aa98dd1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_reduce_warp_reductions.cuh @@ -0,0 +1,267 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction + * across a CUDA thread block. Supports non-commutative reduction operators. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction + * across a CUDA thread block. Supports non-commutative reduction operators. + * @tparam T + * Data type being reduced + * + * @tparam BLOCK_DIM_X + * The thread block length in threads along the X dimension + * + * @tparam BLOCK_DIM_Y + * The thread block length in threads along the Y dimension + * + * @tparam BLOCK_DIM_Z + * The thread block length in threads along the Z dimension + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct BlockReduceWarpReductions +{ + /// Constants + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + + /// Number of warp threads + WARP_THREADS = CUB_WARP_THREADS(0), + + /// Number of active warps + WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, + + /// The logical warp size for warp reductions + LOGICAL_WARP_SIZE = CUB_MIN(BLOCK_THREADS, WARP_THREADS), + + /// Whether or not the logical warp size evenly divides the thread block size + EVEN_WARP_MULTIPLE = (BLOCK_THREADS % LOGICAL_WARP_SIZE == 0) + }; + + /// WarpReduce utility type + using WarpReduce = typename WarpReduce::InternalWarpReduce; + + /// Shared memory storage layout type + struct _TempStorage + { + /// Buffer for warp-synchronous reduction + typename WarpReduce::TempStorage warp_reduce[WARPS]; + + /// Shared totals from each warp-synchronous reduction + T warp_aggregates[WARPS]; + + /// Shared prefix for the entire thread block + T block_prefix; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + // Thread fields + _TempStorage& temp_storage; + int linear_tid; + int warp_id; + int lane_id; + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockReduceWarpReductions(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + , warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS) + , lane_id(::cuda::ptx::get_sreg_laneid()) + {} + + /** + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] warp_aggregate + * [lane0 only] Warp-wide aggregate reduction of input items + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T ApplyWarpAggregates( + ReductionOp reduction_op, T warp_aggregate, int num_valid, Int2Type /*successor_warp*/) + { + if (FULL_TILE || (SUCCESSOR_WARP * LOGICAL_WARP_SIZE < num_valid)) + { + T addend = temp_storage.warp_aggregates[SUCCESSOR_WARP]; + warp_aggregate = reduction_op(warp_aggregate, addend); + } + return ApplyWarpAggregates(reduction_op, warp_aggregate, num_valid, Int2Type()); + } + + /** + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] warp_aggregate + * [lane0 only] Warp-wide aggregate reduction of input items + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T ApplyWarpAggregates( + ReductionOp /*reduction_op*/, T warp_aggregate, int /*num_valid*/, Int2Type /*successor_warp*/) + { + return warp_aggregate; + } + + /** + * @brief Returns block-wide aggregate in thread0. + * + * @param[in] reduction_op + * Binary reduction operator + * + * @param[in] warp_aggregate + * [lane0 only] Warp-wide aggregate reduction of input items + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T ApplyWarpAggregates(ReductionOp reduction_op, T warp_aggregate, int num_valid) + { + // Share lane aggregates + if (lane_id == 0) + { + detail::uninitialized_copy_single(temp_storage.warp_aggregates + warp_id, warp_aggregate); + } + + __syncthreads(); + + // Update total aggregate in warp 0, lane 0 + if (linear_tid == 0) + { + warp_aggregate = ApplyWarpAggregates(reduction_op, warp_aggregate, num_valid, Int2Type<1>()); + } + + return warp_aggregate; + } + + /** + * @brief Computes a thread block-wide reduction using addition (+) as the reduction operator. + * The first num_valid threads each contribute one reduction partial. The return value is + * only valid for thread0. + * + * @param[in] input + * Calling thread's input partial reductions + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Sum(T input, int num_valid) + { + ::cuda::std::plus<> reduction_op; + int warp_offset = (warp_id * LOGICAL_WARP_SIZE); + int warp_num_valid = ((FULL_TILE && EVEN_WARP_MULTIPLE) || (warp_offset + LOGICAL_WARP_SIZE <= num_valid)) + ? LOGICAL_WARP_SIZE + : num_valid - warp_offset; + + // Warp reduction in every warp + T warp_aggregate = + WarpReduce(temp_storage.warp_reduce[warp_id]) + .template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE)>(input, warp_num_valid, ::cuda::std::plus<>{}); + + // Update outputs and block_aggregate with warp-wide aggregates from lane-0s + return ApplyWarpAggregates(reduction_op, warp_aggregate, num_valid); + } + + /** + * @brief Computes a thread block-wide reduction using the specified reduction operator. + * The first num_valid threads each contribute one reduction partial. + * The return value is only valid for thread0. + * + * @param[in] input + * Calling thread's input partial reductions + * + * @param[in] num_valid + * Number of valid elements (may be less than BLOCK_THREADS) + * + * @param[in] reduction_op + * Binary reduction operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Reduce(T input, int num_valid, ReductionOp reduction_op) + { + int warp_offset = warp_id * LOGICAL_WARP_SIZE; + int warp_num_valid = ((FULL_TILE && EVEN_WARP_MULTIPLE) || (warp_offset + LOGICAL_WARP_SIZE <= num_valid)) + ? LOGICAL_WARP_SIZE + : num_valid - warp_offset; + + // Warp reduction in every warp + T warp_aggregate = WarpReduce(temp_storage.warp_reduce[warp_id]) + .template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE)>(input, warp_num_valid, reduction_op); + + // Update outputs and block_aggregate with warp-wide aggregates from lane-0s + return ApplyWarpAggregates(reduction_op, warp_aggregate, num_valid); + } +}; +} // namespace detail + +template +using BlockReduceWarpReductions CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::BlockReduceWarpReductions; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_scan_raking.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_scan_raking.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2af4b8693fc990cab577a5dd1ec9d88858913f83 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_scan_raking.cuh @@ -0,0 +1,805 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::BlockScanRaking provides variants of raking-based parallel prefix scan across a + * CUDA thread block. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief BlockScanRaking provides variants of raking-based parallel prefix scan across a CUDA + * thread block. + * + * @tparam T + * Data type being scanned + * + * @tparam BLOCK_DIM_X + * The thread block length in threads along the X dimension + * + * @tparam BLOCK_DIM_Y + * The thread block length in threads along the Y dimension + * + * @tparam BLOCK_DIM_Z + * The thread block length in threads along the Z dimension + * + * @tparam MEMOIZE + * Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the + * expense of higher register pressure + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct BlockScanRaking +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + /// Constants + enum + { + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + }; + + /// Layout type for padded thread block raking grid + using BlockRakingLayout = BlockRakingLayout; + + /// Constants + enum + { + /// Number of raking threads + RAKING_THREADS = BlockRakingLayout::RAKING_THREADS, + + /// Number of raking elements per warp synchronous raking thread + SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH, + + /// Cooperative work can be entirely warp synchronous + WARP_SYNCHRONOUS = (int(BLOCK_THREADS) == int(RAKING_THREADS)), + }; + + /// WarpScan utility type + using WarpScan = WarpScan; + + /// Shared memory storage layout type + struct _TempStorage + { + /// Buffer for warp-synchronous scan + typename WarpScan::TempStorage warp_scan; + + /// Padded thread block raking grid + typename BlockRakingLayout::TempStorage raking_grid; + + /// Block aggregate + T block_aggregate; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + // Thread fields + _TempStorage& temp_storage; + unsigned int linear_tid; + T cached_segment[SEGMENT_LENGTH]; + + //--------------------------------------------------------------------- + // Utility methods + //--------------------------------------------------------------------- + + /** + * @brief Templated reduction + * + * @param[in] raking_ptr + * Input array + * + * @param[in] scan_op + * Binary reduction operator + * + * @param[in] raking_partial + * Prefix to seed reduction with + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + GuardedReduce(T* raking_ptr, ScanOp scan_op, T raking_partial, Int2Type /*iteration*/) + { + if ((BlockRakingLayout::UNGUARDED) || (((linear_tid * SEGMENT_LENGTH) + ITERATION) < BLOCK_THREADS)) + { + T addend = raking_ptr[ITERATION]; + raking_partial = scan_op(raking_partial, addend); + } + + return GuardedReduce(raking_ptr, scan_op, raking_partial, Int2Type()); + } + + /** + * @brief Templated reduction (base case) + * + * @param[in] raking_ptr + * Input array + * + * @param[in] scan_op + * Binary reduction operator + * + * @param[in] raking_partial + * Prefix to seed reduction with + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + GuardedReduce(T* /*raking_ptr*/, ScanOp /*scan_op*/, T raking_partial, Int2Type /*iteration*/) + { + return raking_partial; + } + + /** + * @brief Templated copy + * + * @param out + * [out] Out array + * + * @param in + * [in] Input array + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void CopySegment(T* out, T* in, Int2Type /*iteration*/) + { + out[ITERATION] = in[ITERATION]; + CopySegment(out, in, Int2Type()); + } + + /** + * @brief Templated copy (base case) + * + * @param[out] out + * Out array + * + * @param[in] in + * Input array + */ + _CCCL_DEVICE _CCCL_FORCEINLINE void CopySegment(T* /*out*/, T* /*in*/, Int2Type /*iteration*/) {} + + /// Performs upsweep raking reduction, returning the aggregate + template + _CCCL_DEVICE _CCCL_FORCEINLINE T Upsweep(ScanOp scan_op) + { + T* smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); + + // Read data into registers + CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); + + T raking_partial = cached_segment[0]; + + return GuardedReduce(cached_segment, scan_op, raking_partial, Int2Type<1>()); + } + + /// Performs exclusive downsweep raking scan + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveDownsweep(ScanOp scan_op, T raking_partial, bool apply_prefix = true) + { + T* smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); + + // Read data back into registers + if (!MEMOIZE) + { + CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); + } + + internal::ThreadScanExclusive(cached_segment, cached_segment, scan_op, raking_partial, apply_prefix); + + // Write data back to smem + CopySegment(smem_raking_ptr, cached_segment, Int2Type<0>()); + } + + /// Performs inclusive downsweep raking scan + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveDownsweep(ScanOp scan_op, T raking_partial, bool apply_prefix = true) + { + T* smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); + + // Read data back into registers + if (!MEMOIZE) + { + CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); + } + + internal::ThreadScanInclusive(cached_segment, cached_segment, scan_op, raking_partial, apply_prefix); + + // Write data back to smem + CopySegment(smem_raking_ptr, cached_segment, Int2Type<0>()); + } + + //--------------------------------------------------------------------- + // Constructors + //--------------------------------------------------------------------- + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockScanRaking(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + {} + + //--------------------------------------------------------------------- + // Exclusive scans + //--------------------------------------------------------------------- + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. With no initial value, + * the output computed for thread0 is undefined. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] exclusive_output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& exclusive_output, ScanOp scan_op) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp-synchronous scan + WarpScan(temp_storage.warp_scan).ExclusiveScan(input, exclusive_output, scan_op); + } + else + { + // Place thread partial into shared memory raking grid + T* placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy_single(placement_ptr, input); + + __syncthreads(); + + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) + { + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + + // Warp-synchronous scan + T exclusive_partial; + WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, scan_op); + + // Exclusive raking downsweep scan + ExclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); + } + + __syncthreads(); + + // Grab thread prefix from shared memory + exclusive_output = *placement_ptr; + } + } + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. + * + * @param[in] input + * Calling thread's input items + * + * @param[out] output + * Calling thread's output items (may be aliased to \p input) + * + * @param[in] initial_value + * Initial value to seed the exclusive scan + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& output, const T& initial_value, ScanOp scan_op) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp-synchronous scan + WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, initial_value, scan_op); + } + else + { + // Place thread partial into shared memory raking grid + T* placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy_single(placement_ptr, input); + + __syncthreads(); + + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) + { + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + + // Exclusive Warp-synchronous scan + T exclusive_partial; + WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, initial_value, scan_op); + + // Exclusive raking downsweep scan + ExclusiveDownsweep(scan_op, exclusive_partial); + } + + __syncthreads(); + + // Grab exclusive partial from shared memory + output = *placement_ptr; + } + } + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. Also provides every + * thread with the block-wide \p block_aggregate of all inputs. With no initial value, + * the output computed for thread0 is undefined. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& output, ScanOp scan_op, T& block_aggregate) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp-synchronous scan + WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, scan_op, block_aggregate); + } + else + { + // Place thread partial into shared memory raking grid + T* placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy_single(placement_ptr, input); + + __syncthreads(); + + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) + { + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + + // Warp-synchronous scan + T inclusive_partial; + T exclusive_partial; + WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); + + // Exclusive raking downsweep scan + ExclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); + + // Broadcast aggregate to all threads + if (linear_tid == RAKING_THREADS - 1) + { + temp_storage.block_aggregate = inclusive_partial; + } + } + + __syncthreads(); + + // Grab thread prefix from shared memory + output = *placement_ptr; + + // Retrieve block aggregate + block_aggregate = temp_storage.block_aggregate; + } + } + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. Also provides every + * thread with the block-wide \p block_aggregate of all inputs. + * + * @param[in] input + * Calling thread's input items + * + * @param[out] output + * Calling thread's output items (may be aliased to \p input) + * + * @param[in] initial_value + * Initial value to seed the exclusive scan + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T input, T& output, const T& initial_value, ScanOp scan_op, T& block_aggregate) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp-synchronous scan + WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, initial_value, scan_op, block_aggregate); + } + else + { + // Place thread partial into shared memory raking grid + T* placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy_single(placement_ptr, input); + + __syncthreads(); + + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) + { + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + + // Warp-synchronous scan + T exclusive_partial; + WarpScan(temp_storage.warp_scan) + .ExclusiveScan(upsweep_partial, exclusive_partial, initial_value, scan_op, block_aggregate); + + // Exclusive raking downsweep scan + ExclusiveDownsweep(scan_op, exclusive_partial); + + // Broadcast aggregate to other threads + if (linear_tid == 0) + { + temp_storage.block_aggregate = block_aggregate; + } + } + + __syncthreads(); + + // Grab exclusive partial from shared memory + output = *placement_ptr; + + // Retrieve block aggregate + block_aggregate = temp_storage.block_aggregate; + } + } + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. the call-back functor \p + * block_prefix_callback_op is invoked by the first warp in the block, and the value + * returned by lane0 in that warp is used as the "seed" value that + * logically prefixes the thread block's scan inputs. Also provides every thread with + * the block-wide \p block_aggregate of all inputs. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in-out] block_prefix_callback_op + * [warp0 only] Call-back functor for specifying a thread + * block-wide prefix to be applied to all inputs. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T input, T& output, ScanOp scan_op, BlockPrefixCallbackOp& block_prefix_callback_op) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp-synchronous scan + T block_aggregate; + WarpScan warp_scan(temp_storage.warp_scan); + warp_scan.ExclusiveScan(input, output, scan_op, block_aggregate); + + // Obtain warp-wide prefix in lane0, then broadcast to other lanes + T block_prefix = block_prefix_callback_op(block_aggregate); + block_prefix = warp_scan.Broadcast(block_prefix, 0); + + output = scan_op(block_prefix, output); + if (linear_tid == 0) + { + output = block_prefix; + } + } + else + { + // Place thread partial into shared memory raking grid + T* placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy_single(placement_ptr, input); + + __syncthreads(); + + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) + { + WarpScan warp_scan(temp_storage.warp_scan); + + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + + // Warp-synchronous scan + T exclusive_partial, block_aggregate; + warp_scan.ExclusiveScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate); + + // Obtain block-wide prefix in lane0, then broadcast to other lanes + T block_prefix = block_prefix_callback_op(block_aggregate); + block_prefix = warp_scan.Broadcast(block_prefix, 0); + + // Update prefix with warpscan exclusive partial + T downsweep_prefix = scan_op(block_prefix, exclusive_partial); + if (linear_tid == 0) + { + downsweep_prefix = block_prefix; + } + + // Exclusive raking downsweep scan + ExclusiveDownsweep(scan_op, downsweep_prefix); + } + + __syncthreads(); + + // Grab thread prefix from shared memory + output = *placement_ptr; + } + } + + //--------------------------------------------------------------------- + // Inclusive scans + //--------------------------------------------------------------------- + + /** + * @brief Computes an inclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& output, ScanOp scan_op) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp-synchronous scan + WarpScan(temp_storage.warp_scan).InclusiveScan(input, output, scan_op); + } + else + { + // Place thread partial into shared memory raking grid + T* placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy_single(placement_ptr, input); + + __syncthreads(); + + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) + { + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + + // Exclusive Warp-synchronous scan + T exclusive_partial; + WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, scan_op); + + // Inclusive raking downsweep scan + InclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); + } + + __syncthreads(); + + // Grab thread prefix from shared memory + output = *placement_ptr; + } + } + + /** + * @brief Computes an inclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. Also provides every + * thread with the block-wide \p block_aggregate of all inputs. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& output, ScanOp scan_op, T& block_aggregate) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp-synchronous scan + WarpScan(temp_storage.warp_scan).InclusiveScan(input, output, scan_op, block_aggregate); + } + else + { + // Place thread partial into shared memory raking grid + T* placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy_single(placement_ptr, input); + + __syncthreads(); + + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) + { + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + + // Warp-synchronous scan + T inclusive_partial; + T exclusive_partial; + WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); + + // Inclusive raking downsweep scan + InclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); + + // Broadcast aggregate to all threads + if (linear_tid == RAKING_THREADS - 1) + { + temp_storage.block_aggregate = inclusive_partial; + } + } + + __syncthreads(); + + // Grab thread prefix from shared memory + output = *placement_ptr; + + // Retrieve block aggregate + block_aggregate = temp_storage.block_aggregate; + } + } + + /** + * @brief Computes an inclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. the call-back functor \p + * block_prefix_callback_op is invoked by the first warp in the block, and the value + * returned by lane0 in that warp is used as the "seed" value that + * logically prefixes the thread block's scan inputs. Also provides every thread with + * the block-wide \p block_aggregate of all inputs. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in-out] block_prefix_callback_op + * [warp0 only] Call-back functor for specifying a thread + * block-wide prefix to be applied to all inputs. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T input, T& output, ScanOp scan_op, BlockPrefixCallbackOp& block_prefix_callback_op) + { + if (WARP_SYNCHRONOUS) + { + // Short-circuit directly to warp-synchronous scan + T block_aggregate; + WarpScan warp_scan(temp_storage.warp_scan); + warp_scan.InclusiveScan(input, output, scan_op, block_aggregate); + + // Obtain warp-wide prefix in lane0, then broadcast to other lanes + T block_prefix = block_prefix_callback_op(block_aggregate); + block_prefix = warp_scan.Broadcast(block_prefix, 0); + + // Update prefix with exclusive warpscan partial + output = scan_op(block_prefix, output); + } + else + { + // Place thread partial into shared memory raking grid + T* placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); + detail::uninitialized_copy_single(placement_ptr, input); + + __syncthreads(); + + // Reduce parallelism down to just raking threads + if (linear_tid < RAKING_THREADS) + { + WarpScan warp_scan(temp_storage.warp_scan); + + // Raking upsweep reduction across shared partials + T upsweep_partial = Upsweep(scan_op); + + // Warp-synchronous scan + T exclusive_partial, block_aggregate; + warp_scan.ExclusiveScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate); + + // Obtain block-wide prefix in lane0, then broadcast to other lanes + T block_prefix = block_prefix_callback_op(block_aggregate); + block_prefix = warp_scan.Broadcast(block_prefix, 0); + + // Update prefix with warpscan exclusive partial + T downsweep_prefix = scan_op(block_prefix, exclusive_partial); + if (linear_tid == 0) + { + downsweep_prefix = block_prefix; + } + + // Inclusive raking downsweep scan + InclusiveDownsweep(scan_op, downsweep_prefix); + } + + __syncthreads(); + + // Grab thread prefix from shared memory + output = *placement_ptr; + } + } +}; +} // namespace detail + +template +using BlockScanRaking CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::BlockScanRaking; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_scan_warp_scans.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_scan_warp_scans.cuh new file mode 100644 index 0000000000000000000000000000000000000000..d034d2838eac8d193f0b161ad836f888ea38ed0c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/block/specializations/block_scan_warp_scans.cuh @@ -0,0 +1,547 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * cub::BlockScanWarpscans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ +/** + * @brief BlockScanWarpScans provides warpscan-based variants of parallel prefix scan across a CUDA + * thread block. + * + * @tparam BLOCK_DIM_X + * The thread block length in threads along the X dimension + * + * @tparam BLOCK_DIM_Y + * The thread block length in threads along the Y dimension + * + * @tparam BLOCK_DIM_Z + * The thread block length in threads along the Z dimension + * + * @tparam LEGACY_PTX_ARCH + * The PTX compute capability for which to to specialize this collective + */ +template +struct BlockScanWarpScans +{ + //--------------------------------------------------------------------- + // Types and constants + //--------------------------------------------------------------------- + + /// Constants + enum + { + /// Number of warp threads + WARP_THREADS = CUB_WARP_THREADS(0), + + /// The thread block size in threads + BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, + + /// Number of active warps + WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, + }; + + /// WarpScan utility type + using WarpScanT = WarpScan; + + /// WarpScan utility type + using WarpAggregateScan = WarpScan; + + /// Shared memory storage layout type + + struct __align__(32) _TempStorage + { + T warp_aggregates[WARPS]; + + /// Buffer for warp-synchronous scans + typename WarpScanT::TempStorage warp_scan[WARPS]; + + /// Shared prefix for the entire thread block + T block_prefix; + }; + + /// Alias wrapper allowing storage to be unioned + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //--------------------------------------------------------------------- + // Per-thread fields + //--------------------------------------------------------------------- + + // Thread fields + _TempStorage& temp_storage; + unsigned int linear_tid; + unsigned int warp_id; + unsigned int lane_id; + + //--------------------------------------------------------------------- + // Constructors + //--------------------------------------------------------------------- + + /// Constructor + _CCCL_DEVICE _CCCL_FORCEINLINE BlockScanWarpScans(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) + , warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS) + , lane_id(::cuda::ptx::get_sreg_laneid()) + {} + + //--------------------------------------------------------------------- + // Utility methods + //--------------------------------------------------------------------- + + /** + * @param[out] warp_prefix + * The calling thread's partial reduction + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ApplyWarpAggregates(T& warp_prefix, ScanOp scan_op, T& block_aggregate, Int2Type /*addend_warp*/) + { + if (warp_id == WARP) + { + warp_prefix = block_aggregate; + } + + T addend = temp_storage.warp_aggregates[WARP]; + block_aggregate = scan_op(block_aggregate, addend); + + ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type()); + } + + /** + * @param[out] warp_prefix + * The calling thread's partial reduction + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] block_aggregat + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ApplyWarpAggregates(T& /*warp_prefix*/, ScanOp /*scan_op*/, T& /*block_aggregate*/, Int2Type /*addend_warp*/) + {} + + /** + * @brief Use the warp-wide aggregates to compute the calling warp's prefix. Also returns + * block-wide aggregate in all threads. + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] warp_aggregate + * [laneWARP_THREADS - 1 only] Warp-wide aggregate reduction of + * input items + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T ComputeWarpPrefix(ScanOp scan_op, T warp_aggregate, T& block_aggregate) + { + // Last lane in each warp shares its warp-aggregate + if (lane_id == WARP_THREADS - 1) + { + detail::uninitialized_copy_single(temp_storage.warp_aggregates + warp_id, warp_aggregate); + } + + __syncthreads(); + + // Accumulate block aggregates and save the one that is our warp's prefix + T warp_prefix; + block_aggregate = temp_storage.warp_aggregates[0]; + + // Use template unrolling (since the PTX backend can't handle unrolling it for SM1x) + // TODO(bgruber): does that still hold today? This is creating a lot of template instantiations + ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type<1>()); + /* + #pragma unroll + for (int WARP = 1; WARP < WARPS; ++WARP) + { + if (warp_id == WARP) + warp_prefix = block_aggregate; + + T addend = temp_storage.warp_aggregates[WARP]; + block_aggregate = scan_op(block_aggregate, addend); + } + */ + + return warp_prefix; + } + + /** + * @brief Use the warp-wide aggregates and initial-value to compute the calling warp's prefix. + * Also returns block-wide aggregate in all threads. + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in] warp_aggregate + * [laneWARP_THREADS - 1 only] Warp-wide aggregate reduction of + * input items + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + * + * @param[in] initial_value + * Initial value to seed the exclusive scan + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE T + ComputeWarpPrefix(ScanOp scan_op, T warp_aggregate, T& block_aggregate, const T& initial_value) + { + T warp_prefix = ComputeWarpPrefix(scan_op, warp_aggregate, block_aggregate); + + warp_prefix = scan_op(initial_value, warp_prefix); + + if (warp_id == 0) + { + warp_prefix = initial_value; + } + + return warp_prefix; + } + + //--------------------------------------------------------------------- + // Exclusive scans + //--------------------------------------------------------------------- + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. With no initial value, + * the output computed for thread0 is undefined. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] exclusive_output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& exclusive_output, ScanOp scan_op) + { + // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. + T block_aggregate; + ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); + } + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. + * + * @param[in] input + * Calling thread's input items + * + * @param[out] exclusive_output + * Calling thread's output items (may be aliased to \p input) + * + * @param[in] initial_value + * Initial value to seed the exclusive scan + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& exclusive_output, const T& initial_value, ScanOp scan_op) + { + T block_aggregate; + ExclusiveScan(input, exclusive_output, initial_value, scan_op, block_aggregate); + } + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. Also provides every + * thread with the block-wide \p block_aggregate of all inputs. With no initial value, + * the output computed for thread0 is undefined. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] exclusive_output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ExclusiveScan(T input, T& exclusive_output, ScanOp scan_op, T& block_aggregate) + { + // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. + T inclusive_output; + WarpScanT(temp_storage.warp_scan[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); + + // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. + T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); + + // Apply warp prefix to our lane's partial + if (warp_id != 0) + { + exclusive_output = scan_op(warp_prefix, exclusive_output); + if (lane_id == 0) + { + exclusive_output = warp_prefix; + } + } + } + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. Also provides every + * thread with the block-wide \p block_aggregate of all inputs. + * + * @param[in] input + * Calling thread's input items + * + * @param[out] exclusive_output + * Calling thread's output items (may be aliased to \p input) + * + * @param[in] initial_value + * Initial value to seed the exclusive scan + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T input, T& exclusive_output, const T& initial_value, ScanOp scan_op, T& block_aggregate) + { + // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. + T inclusive_output; + WarpScanT(temp_storage.warp_scan[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); + + // Compute the warp-wide prefix and block-wide aggregate for each warp + T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate, initial_value); + + // Apply warp prefix to our lane's partial + exclusive_output = scan_op(warp_prefix, exclusive_output); + if (lane_id == 0) + { + exclusive_output = warp_prefix; + } + } + + /** + * @brief Computes an exclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. the call-back functor \p + * block_prefix_callback_op is invoked by the first warp in the block, and the value + * returned by lane0 in that warp is used as the "seed" value that + * logically prefixes the thread block's scan inputs. Also provides every thread with + * the block-wide \p block_aggregate of all inputs. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] exclusive_output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in-out] block_prefix_callback_op + * [warp0 only] Call-back functor for specifying a thread + * block-wide prefix to be applied to all inputs. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ExclusiveScan(T input, T& exclusive_output, ScanOp scan_op, BlockPrefixCallbackOp& block_prefix_callback_op) + { + // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. + T block_aggregate; + ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); + + // Use the first warp to determine the thread block prefix, returning the result in lane0 + if (warp_id == 0) + { + T block_prefix = block_prefix_callback_op(block_aggregate); + if (lane_id == 0) + { + // Share the prefix with all threads + detail::uninitialized_copy_single(&temp_storage.block_prefix, block_prefix); + + exclusive_output = block_prefix; // The block prefix is the exclusive output for tid0 + } + } + + __syncthreads(); + + // Incorporate thread block prefix into outputs + T block_prefix = temp_storage.block_prefix; + if (linear_tid > 0) + { + exclusive_output = scan_op(block_prefix, exclusive_output); + } + } + + //--------------------------------------------------------------------- + // Inclusive scans + //--------------------------------------------------------------------- + + /** + * @brief Computes an inclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] inclusive_output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& inclusive_output, ScanOp scan_op) + { + T block_aggregate; + InclusiveScan(input, inclusive_output, scan_op, block_aggregate); + } + + /** + * @brief Computes an inclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. Also provides every + * thread with the block-wide \p block_aggregate of all inputs. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] inclusive_output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + * + * @param[out] block_aggregate + * Threadblock-wide aggregate reduction of input items + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void InclusiveScan(T input, T& inclusive_output, ScanOp scan_op, T& block_aggregate) + { + WarpScanT(temp_storage.warp_scan[warp_id]).InclusiveScan(input, inclusive_output, scan_op); + + // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. + T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); + + // Apply warp prefix to our lane's partial + if (warp_id != 0) + { + inclusive_output = scan_op(warp_prefix, inclusive_output); + } + } + + /** + * @brief Computes an inclusive thread block-wide prefix scan using the specified binary \p + * scan_op functor. Each thread contributes one input element. the call-back functor \p + * block_prefix_callback_op is invoked by the first warp in the block, and the value + * returned by lane0 in that warp is used as the "seed" value that + * logically prefixes the thread block's scan inputs. Also provides every thread with + * the block-wide \p block_aggregate of all inputs. + * + * @param[in] input + * Calling thread's input item + * + * @param[out] exclusive_output + * Calling thread's output item (may be aliased to \p input) + * + * @param[in] scan_op + * Binary scan operator + * + * @param[in-out] block_prefix_callback_op + * [warp0 only] Call-back functor for specifying a thread + * block-wide prefix to be applied to all inputs. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + InclusiveScan(T input, T& exclusive_output, ScanOp scan_op, BlockPrefixCallbackOp& block_prefix_callback_op) + { + T block_aggregate; + InclusiveScan(input, exclusive_output, scan_op, block_aggregate); + + // Use the first warp to determine the thread block prefix, returning the result in lane0 + if (warp_id == 0) + { + T block_prefix = block_prefix_callback_op(block_aggregate); + if (lane_id == 0) + { + // Share the prefix with all threads + detail::uninitialized_copy_single(&temp_storage.block_prefix, block_prefix); + } + } + + __syncthreads(); + + // Incorporate thread block prefix into outputs + T block_prefix = temp_storage.block_prefix; + exclusive_output = scan_op(block_prefix, exclusive_output); + } +}; +} // namespace detail +template +using BlockScanWarpScans CCCL_DEPRECATED_BECAUSE( + "This class is considered an implementation detail and the public interface will be " + "removed.") = detail::BlockScanWarpScans; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/detail/device_synchronize.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/detail/device_synchronize.cuh new file mode 100644 index 0000000000000000000000000000000000000000..afe6cbd34d082901c3408e0587301e350a3da2c2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/detail/device_synchronize.cuh @@ -0,0 +1,74 @@ +/* + * Copyright 2021 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +namespace detail +{ + +/** + * Call `cudaDeviceSynchronize()` using the proper API for the current CUB and + * CUDA configuration. + */ +_CCCL_EXEC_CHECK_DISABLE +CUB_RUNTIME_FUNCTION inline cudaError_t device_synchronize() +{ + cudaError_t result = cudaErrorNotSupported; + + // Device-side sync is only available under CDPv1: +#if defined(CUB_DETAIL_CDPv1) + +# if ((__CUDACC_VER_MAJOR__ > 11) || ((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 6))) + // CUDA >= 11.6 +# define CUB_TMP_DEVICE_SYNC_IMPL result = __cudaDeviceSynchronizeDeprecationAvoidance(); +# else // CUDA < 11.6: +# define CUB_TMP_DEVICE_SYNC_IMPL result = cudaDeviceSynchronize(); +# endif + +#else // CDPv2 or no CDP: + +# define CUB_TMP_DEVICE_SYNC_IMPL /* unavailable */ + +#endif // CDP version + + NV_IF_TARGET(NV_IS_HOST, (result = cudaDeviceSynchronize();), (CUB_TMP_DEVICE_SYNC_IMPL)); + +#undef CUB_TMP_DEVICE_SYNC_IMPL + + return result; +} + +} // namespace detail + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/detail/type_traits.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/detail/type_traits.cuh new file mode 100644 index 0000000000000000000000000000000000000000..3dba4d10c6cd4b28b3cca9ca898449408e170ae0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/detail/type_traits.cuh @@ -0,0 +1,188 @@ +/****************************************************************************** + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * \file + * Wrappers and extensions around utilities. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +_CCCL_SUPPRESS_DEPRECATED_PUSH +#include +_CCCL_SUPPRESS_DEPRECATED_POP +#include +#if __cccl_lib_mdspan +# include +#endif // __cccl_lib_mdspan +#include // IWYU pragma: keep +#include +#include + +CUB_NAMESPACE_BEGIN +namespace detail +{ + +template +using invoke_result_t = +#if _CCCL_STD_VER < 2017 + typename ::cuda::std::result_of::type; +#else // 2017+ + ::cuda::std::invoke_result_t; +#endif + +template +_CCCL_NODISCARD _CCCL_HOST_DEVICE constexpr bool are_same() +{ + return ::cuda::std::conjunction<::cuda::std::is_same...>::value; +} + +template +_CCCL_NODISCARD _CCCL_HOST_DEVICE constexpr bool is_one_of() +{ + return ::cuda::std::disjunction<::cuda::std::is_same...>::value; +} + +template +_CCCL_NODISCARD _CCCL_HOST_DEVICE constexpr bool always_false() +{ + return false; +} + +template +struct has_binary_call_operator : ::cuda::std::false_type +{}; + +template +struct has_binary_call_operator< + T, + V, + ::cuda::std::void_t()(::cuda::std::declval(), ::cuda::std::declval()))>> + : ::cuda::std::true_type +{}; + +/*********************************************************************************************************************** + * Array-like type traits + **********************************************************************************************************************/ + +template +struct is_fixed_size_random_access_range : ::cuda::std::false_type +{}; + +template +struct is_fixed_size_random_access_range : ::cuda::std::true_type +{}; + +template +struct is_fixed_size_random_access_range<::cuda::std::array, void> : ::cuda::std::true_type +{}; + +#if _CCCL_STD_VER >= 2014 + +template +struct is_fixed_size_random_access_range<::cuda::std::span, void> : ::cuda::std::true_type +{}; + +# if __cccl_lib_mdspan + +template +struct is_fixed_size_random_access_range< + ::cuda::std::mdspan, + ::cuda::std::enable_if_t> : ::cuda::std::true_type +{}; + +# endif // __cccl_lib_mdspan +#endif // _CCCL_STD_VER >= 2014 + +template +using is_fixed_size_random_access_range_t = typename is_fixed_size_random_access_range::type; + +/*********************************************************************************************************************** + * static_size: a type trait that returns the number of elements in an Array-like type + **********************************************************************************************************************/ +// static_size is useful where size(obj) cannot be checked at compile time +// e.g. +// using Array = NonTriviallyConstructible[8]; +// std::size(Array{}) // compile error +// static_size() // ok + +template +struct static_size +{ + static_assert(cub::detail::always_false(), "static_size not supported for this type"); +}; + +template +struct static_size : ::cuda::std::integral_constant +{}; + +template +struct static_size<::cuda::std::array, void> : ::cuda::std::integral_constant +{}; + +#if _CCCL_STD_VER >= 2014 + +template +struct static_size<::cuda::std::span, void> : ::cuda::std::integral_constant +{}; + +# if __cccl_lib_mdspan + +template +struct static_size<::cuda::std::mdspan, + ::cuda::std::enable_if_t> + : ::cuda::std::integral_constant +{}; + +# endif // __cccl_lib_mdspan +#endif // _CCCL_STD_VER >= 2014 + +template +_CCCL_NODISCARD _CCCL_HOST_DEVICE _CCCL_FORCEINLINE constexpr ::cuda::std::size_t static_size_v() +{ + return static_size::value; +} + +template +using implicit_prom_t = decltype(+T{}); + +} // namespace detail + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_load.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_load.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7c29916265a7978cca452dbc0e0c527fc6a6d12f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_load.cuh @@ -0,0 +1,383 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * Thread utilities for reading memory using PTX cache modifiers. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +//----------------------------------------------------------------------------- +// Tags and constants +//----------------------------------------------------------------------------- + +/** + * @brief Enumeration of cache modifiers for memory load operations. + */ +enum CacheLoadModifier +{ + LOAD_DEFAULT, ///< Default (no modifier) + LOAD_CA, ///< Cache at all levels + LOAD_CG, ///< Cache at global level + LOAD_CS, ///< Cache streaming (likely to be accessed once) + LOAD_CV, ///< Cache as volatile (including cached system lines) + LOAD_LDG, ///< Cache as texture + LOAD_VOLATILE, ///< Volatile (any memory space) +}; + +/** + * @name Thread I/O (cache modified) + * @{ + */ + +/** + * @brief Thread utility for reading memory using cub::CacheLoadModifier cache modifiers. + * Can be used to load any data type. + * + * @par Example + * @code + * #include // or equivalently + * + * // 32-bit load using cache-global modifier: + * int *d_in; + * int val = cub::ThreadLoad(d_in + threadIdx.x); + * + * // 16-bit load using default modifier + * short *d_in; + * short val = cub::ThreadLoad(d_in + threadIdx.x); + * + * // 256-bit load using cache-volatile modifier + * double4 *d_in; + * double4 val = cub::ThreadLoad(d_in + threadIdx.x); + * + * // 96-bit load using cache-streaming modifier + * struct TestFoo { bool a; short b; }; + * TestFoo *d_struct; + * TestFoo val = cub::ThreadLoad(d_in + threadIdx.x); + * \endcode + * + * @tparam MODIFIER + * [inferred] CacheLoadModifier enumeration + * + * @tparam RandomAccessIterator + * [inferred] The input's iterator type \iterator + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE cub::detail::value_t ThreadLoad(RandomAccessIterator itr); + +//@} end member group + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + +/// Helper structure for templated load iteration (inductive case) +/// \deprecated [Since 2.6.0] Use UnrolledThreadLoad() or UnrolledCopy() instead. +template +struct IterateThreadLoad +{ + template + CCCL_DEPRECATED_BECAUSE("Use UnrolledThreadLoad() instead") + static _CCCL_DEVICE _CCCL_FORCEINLINE void Load(T const* ptr, T* vals) + { + vals[COUNT] = ThreadLoad(ptr + COUNT); + IterateThreadLoad::template Load(ptr, vals); + } + + template + CCCL_DEPRECATED_BECAUSE("Use UnrolledCopy() instead") + static _CCCL_DEVICE _CCCL_FORCEINLINE void Dereference(RandomAccessIterator itr, T* vals) + { + vals[COUNT] = itr[COUNT]; + IterateThreadLoad::Dereference(itr, vals); + } +}; + +/// Helper structure for templated load iteration (termination case) +template +struct IterateThreadLoad +{ + template + static _CCCL_DEVICE _CCCL_FORCEINLINE void Load(T const* /*ptr*/, T* /*vals*/) + {} + + template + static _CCCL_DEVICE _CCCL_FORCEINLINE void Dereference(RandomAccessIterator /*itr*/, T* /*vals*/) + {} +}; + +namespace detail +{ +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +UnrolledThreadLoadImpl(T const* src, T* dst, ::cuda::std::integer_sequence) +{ + // TODO(bgruber): replace by fold over comma in C++17 + int dummy[] = {(dst[Is] = ThreadLoad(src + Is), 0)...}; + (void) dummy; +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void +UnrolledCopyImpl(RandomAccessIterator src, T* dst, ::cuda::std::integer_sequence) +{ + // TODO(bgruber): replace by fold over comma in C++17 + int dummy[] = {(dst[Is] = src[Is], 0)...}; + (void) dummy; +} +} // namespace detail + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void UnrolledThreadLoad(T const* src, T* dst) +{ + detail::UnrolledThreadLoadImpl(src, dst, ::cuda::std::make_integer_sequence{}); +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE void UnrolledCopy(RandomAccessIterator src, T* dst) +{ + detail::UnrolledCopyImpl(src, dst, ::cuda::std::make_integer_sequence{}); +} + +/** + * Define a uint4 (16B) ThreadLoad specialization for the given Cache load modifier + */ +# define _CUB_LOAD_16(cub_modifier, ptx_modifier) \ + template <> \ + _CCCL_DEVICE _CCCL_FORCEINLINE uint4 ThreadLoad(uint4 const* ptr) \ + { \ + uint4 retval; \ + asm volatile("ld." #ptx_modifier ".v4.u32 {%0, %1, %2, %3}, [%4];" \ + : "=r"(retval.x), "=r"(retval.y), "=r"(retval.z), "=r"(retval.w) \ + : "l"(ptr)); \ + return retval; \ + } \ + template <> \ + _CCCL_DEVICE _CCCL_FORCEINLINE ulonglong2 ThreadLoad(ulonglong2 const* ptr) \ + { \ + ulonglong2 retval; \ + asm volatile("ld." #ptx_modifier ".v2.u64 {%0, %1}, [%2];" : "=l"(retval.x), "=l"(retval.y) : "l"(ptr)); \ + return retval; \ + } + +/** + * Define a uint2 (8B) ThreadLoad specialization for the given Cache load modifier + */ +# define _CUB_LOAD_8(cub_modifier, ptx_modifier) \ + template <> \ + _CCCL_DEVICE _CCCL_FORCEINLINE ushort4 ThreadLoad(ushort4 const* ptr) \ + { \ + ushort4 retval; \ + asm volatile("ld." #ptx_modifier ".v4.u16 {%0, %1, %2, %3}, [%4];" \ + : "=h"(retval.x), "=h"(retval.y), "=h"(retval.z), "=h"(retval.w) \ + : "l"(ptr)); \ + return retval; \ + } \ + template <> \ + _CCCL_DEVICE _CCCL_FORCEINLINE uint2 ThreadLoad(uint2 const* ptr) \ + { \ + uint2 retval; \ + asm volatile("ld." #ptx_modifier ".v2.u32 {%0, %1}, [%2];" : "=r"(retval.x), "=r"(retval.y) : "l"(ptr)); \ + return retval; \ + } \ + template <> \ + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned long long ThreadLoad( \ + unsigned long long const* ptr) \ + { \ + unsigned long long retval; \ + asm volatile("ld." #ptx_modifier ".u64 %0, [%1];" : "=l"(retval) : "l"(ptr)); \ + return retval; \ + } + +/** + * Define a uint (4B) ThreadLoad specialization for the given Cache load modifier + */ +# define _CUB_LOAD_4(cub_modifier, ptx_modifier) \ + template <> \ + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned int ThreadLoad(unsigned int const* ptr) \ + { \ + unsigned int retval; \ + asm volatile("ld." #ptx_modifier ".u32 %0, [%1];" : "=r"(retval) : "l"(ptr)); \ + return retval; \ + } + +/** + * Define a unsigned short (2B) ThreadLoad specialization for the given Cache load modifier + */ +# define _CUB_LOAD_2(cub_modifier, ptx_modifier) \ + template <> \ + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned short ThreadLoad( \ + unsigned short const* ptr) \ + { \ + unsigned short retval; \ + asm volatile("ld." #ptx_modifier ".u16 %0, [%1];" : "=h"(retval) : "l"(ptr)); \ + return retval; \ + } + +/** + * Define an unsigned char (1B) ThreadLoad specialization for the given Cache load modifier + */ +# define _CUB_LOAD_1(cub_modifier, ptx_modifier) \ + template <> \ + _CCCL_DEVICE _CCCL_FORCEINLINE unsigned char ThreadLoad( \ + unsigned char const* ptr) \ + { \ + unsigned short retval; \ + asm volatile( \ + "{" \ + " .reg .u8 datum;" \ + " ld." #ptx_modifier ".u8 datum, [%1];" \ + " cvt.u16.u8 %0, datum;" \ + "}" \ + : "=h"(retval) \ + : "l"(ptr)); \ + return (unsigned char) retval; \ + } + +/** + * Define powers-of-two ThreadLoad specializations for the given Cache load modifier + */ +# define _CUB_LOAD_ALL(cub_modifier, ptx_modifier) \ + _CUB_LOAD_16(cub_modifier, ptx_modifier) \ + _CUB_LOAD_8(cub_modifier, ptx_modifier) \ + _CUB_LOAD_4(cub_modifier, ptx_modifier) \ + _CUB_LOAD_2(cub_modifier, ptx_modifier) \ + _CUB_LOAD_1(cub_modifier, ptx_modifier) + +/** + * Define powers-of-two ThreadLoad specializations for the various Cache load modifiers + */ +_CUB_LOAD_ALL(LOAD_CA, ca) +_CUB_LOAD_ALL(LOAD_CG, cg) +_CUB_LOAD_ALL(LOAD_CS, cs) +_CUB_LOAD_ALL(LOAD_CV, cv) +_CUB_LOAD_ALL(LOAD_LDG, global.nc) + +// Macro cleanup +# undef _CUB_LOAD_ALL +# undef _CUB_LOAD_1 +# undef _CUB_LOAD_2 +# undef _CUB_LOAD_4 +# undef _CUB_LOAD_8 +# undef _CUB_LOAD_16 + +/** + * ThreadLoad definition for LOAD_DEFAULT modifier on iterator types + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE cub::detail::value_t +ThreadLoad(RandomAccessIterator itr, Int2Type /*modifier*/, Int2Type /*is_pointer*/) +{ + return *itr; +} + +/** + * ThreadLoad definition for LOAD_DEFAULT modifier on pointer types + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE T +ThreadLoad(const T* ptr, Int2Type /*modifier*/, Int2Type /*is_pointer*/) +{ + return *ptr; +} + +/** + * ThreadLoad definition for LOAD_VOLATILE modifier on primitive pointer types + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE T ThreadLoadVolatilePointer(const T* ptr, Int2Type /*is_primitive*/) +{ + T retval = *reinterpret_cast(ptr); + return retval; +} + +/** + * ThreadLoad definition for LOAD_VOLATILE modifier on non-primitive pointer types + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE T ThreadLoadVolatilePointer(const T* ptr, Int2Type /*is_primitive*/) +{ + // Word type for memcpying + using VolatileWord = typename UnitWord::VolatileWord; + constexpr int VOLATILE_MULTIPLE = sizeof(T) / sizeof(VolatileWord); + + T retval; + VolatileWord* words = reinterpret_cast(&retval); + UnrolledCopy(reinterpret_cast(ptr), words); + return retval; +} + +/** + * ThreadLoad definition for LOAD_VOLATILE modifier on pointer types + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE T +ThreadLoad(const T* ptr, Int2Type /*modifier*/, Int2Type /*is_pointer*/) +{ + return ThreadLoadVolatilePointer(ptr, Int2Type::PRIMITIVE>()); +} + +/** + * ThreadLoad definition for generic modifiers on pointer types + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE T ThreadLoad(T const* ptr, Int2Type /*modifier*/, Int2Type /*is_pointer*/) +{ + using DeviceWord = typename UnitWord::DeviceWord; + constexpr int DEVICE_MULTIPLE = sizeof(T) / sizeof(DeviceWord); + + DeviceWord words[DEVICE_MULTIPLE]; + UnrolledThreadLoad(reinterpret_cast(ptr), words); + return *reinterpret_cast(words); +} + +template +_CCCL_DEVICE _CCCL_FORCEINLINE cub::detail::value_t ThreadLoad(RandomAccessIterator itr) +{ + return ThreadLoad(itr, Int2Type(), Int2Type<::cuda::std::is_pointer::value>()); +} + +#endif // _CCCL_DOXYGEN_INVOKED + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_operators.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_operators.cuh new file mode 100644 index 0000000000000000000000000000000000000000..7af32df392cc31bfdddbdc66db7f5c0bf47b9da7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_operators.cuh @@ -0,0 +1,753 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2024, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * Simple binary operator functor types + */ + +/****************************************************************************** + * Simple functor operators + ******************************************************************************/ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include // always_false +#include + +#include // cuda::maximum, cuda::minimum +#include // cuda::std::bit_cast +#include // cuda::std::plus +#include // cuda::std::common_type +#include // cuda::std::forward + +#if defined(_CCCL_HAS_NVFP16) +# include +#endif // _CCCL_HAS_NVFP16 + +#if defined(_CCCL_HAS_NVBF16) +_CCCL_DIAG_PUSH +_CCCL_DIAG_SUPPRESS_CLANG("-Wunused-function") +# include +_CCCL_DIAG_POP +#endif // _CCCL_HAS_NVFP16 + +CUB_NAMESPACE_BEGIN + +// TODO(bgruber): deprecate in C++17 with a note: "replace by decltype(cuda::std::not_fn(EqualityOp{}))" +/// @brief Inequality functor (wraps equality functor) +template +struct InequalityWrapper +{ + /// Wrapped equality operator + EqualityOp op; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE InequalityWrapper(EqualityOp op) + : op(op) + {} + + /// Boolean inequality operator, returns `t != u` + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE bool operator()(T&& t, U&& u) + { + return !op(::cuda::std::forward(t), ::cuda::std::forward(u)); + } +}; + +using Equality CCCL_DEPRECATED_BECAUSE("use cuda::std::equal_to instead") = ::cuda::std::equal_to<>; +using Inequality CCCL_DEPRECATED_BECAUSE("use cuda::std::not_equal_to instead") = ::cuda::std::not_equal_to<>; +using Sum CCCL_DEPRECATED_BECAUSE("use cuda::std::plus instead") = ::cuda::std::plus<>; +using Difference CCCL_DEPRECATED_BECAUSE("use cuda::std::minus instead") = ::cuda::std::minus<>; +using Division CCCL_DEPRECATED_BECAUSE("use cuda::std::divides instead") = ::cuda::std::divides<>; +using Max CCCL_DEPRECATED_BECAUSE("use cuda::maximum instead") = ::cuda::maximum<>; +using Min CCCL_DEPRECATED_BECAUSE("use cuda::minimum instead") = ::cuda::minimum<>; + +/// @brief Arg max functor (keeps the value and offset of the first occurrence +/// of the larger item) +struct ArgMax +{ + /// Boolean max operator, preferring the item having the smaller offset in + /// case of ties + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE KeyValuePair + operator()(const KeyValuePair& a, const KeyValuePair& b) const + { + // Mooch BUG (device reduce argmax gk110 3.2 million random fp32) + // return ((b.value > a.value) || + // ((a.value == b.value) && (b.key < a.key))) + // ? b : a; + + if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) + { + return b; + } + + return a; + } +}; + +/// @brief Arg min functor (keeps the value and offset of the first occurrence +/// of the smallest item) +struct ArgMin +{ + /// Boolean min operator, preferring the item having the smaller offset in + /// case of ties + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE KeyValuePair + operator()(const KeyValuePair& a, const KeyValuePair& b) const + { + // Mooch BUG (device reduce argmax gk110 3.2 million random fp32) + // return ((b.value < a.value) || + // ((a.value == b.value) && (b.key < a.key))) + // ? b : a; + + if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) + { + return b; + } + + return a; + } +}; + +namespace detail +{ +template +struct ScanBySegmentOp +{ + /// Wrapped operator + ScanOpT op; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ScanBySegmentOp() {} + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ScanBySegmentOp(ScanOpT op) + : op(op) + {} + + /** + * @brief Scan operator + * + * @tparam KeyValuePairT + * KeyValuePair pairing of T (value) and int (head flag) + * + * @param[in] first + * First partial reduction + * + * @param[in] second + * Second partial reduction + */ + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE KeyValuePairT operator()(const KeyValuePairT& first, const KeyValuePairT& second) + { + KeyValuePairT retval; + retval.key = first.key | second.key; +#ifdef _NVHPC_CUDA // WAR bug on nvc++ + if (second.key) + { + retval.value = second.value; + } + else + { + // If second.value isn't copied into a temporary here, nvc++ will + // crash while compiling the TestScanByKeyWithLargeTypes test in + // thrust/testing/scan_by_key.cu: + auto v2 = second.value; + retval.value = op(first.value, v2); + } +#else // not nvc++: + // if (second.key) { + // The second partial reduction spans a segment reset, so it's value + // aggregate becomes the running aggregate + // else { + // The second partial reduction does not span a reset, so accumulate both + // into the running aggregate + // } + retval.value = (second.key) ? second.value : op(first.value, second.value); +#endif + return retval; + } +}; + +template +struct basic_binary_op_t +{ + static constexpr bool value = false; +}; + +template <> +struct basic_binary_op_t +{ + static constexpr bool value = true; +}; + +template <> +struct basic_binary_op_t +{ + static constexpr bool value = true; +}; + +template <> +struct basic_binary_op_t +{ + static constexpr bool value = true; +}; +} // namespace detail + +/// @brief Default cast functor +template +struct CastOp +{ + /// Cast operator, returns `(B) a` + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE B operator()(A&& a) const + { + return (B) a; + } +}; + +/// @brief Binary operator wrapper for switching non-commutative scan arguments +template +class SwizzleScanOp +{ +private: + /// Wrapped scan operator + ScanOp scan_op; + +public: + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE SwizzleScanOp(ScanOp scan_op) + : scan_op(scan_op) + {} + + /// Switch the scan arguments + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE T operator()(const T& a, const T& b) + { + T _a(a); + T _b(b); + + return scan_op(_b, _a); + } +}; + +/** + * @brief Reduce-by-segment functor. + * + * Given two cub::KeyValuePair inputs `a` and `b` and a binary associative + * combining operator `f(const T &x, const T &y)`, an instance of this functor + * returns a cub::KeyValuePair whose `key` field is `a.key + b.key`, and whose + * `value` field is either `b.value` if `b.key` is non-zero, or + * `f(a.value, b.value)` otherwise. + * + * ReduceBySegmentOp is an associative, non-commutative binary combining + * operator for input sequences of cub::KeyValuePair pairings. Such sequences + * are typically used to represent a segmented set of values to be reduced + * and a corresponding set of {0,1}-valued integer "head flags" demarcating the + * first value of each segment. + * + * @tparam ReductionOpT Binary reduction operator to apply to values + */ +template +struct ReduceBySegmentOp +{ + /// Wrapped reduction operator + ReductionOpT op; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ReduceBySegmentOp() {} + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ReduceBySegmentOp(ReductionOpT op) + : op(op) + {} + + /** + * @brief Scan operator + * + * @tparam KeyValuePairT + * KeyValuePair pairing of T (value) and OffsetT (head flag) + * + * @param[in] first + * First partial reduction + * + * @param[in] second + * Second partial reduction + */ + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE KeyValuePairT operator()(const KeyValuePairT& first, const KeyValuePairT& second) + { + KeyValuePairT retval; + retval.key = first.key + second.key; +#ifdef _NVHPC_CUDA // WAR bug on nvc++ + if (second.key) + { + retval.value = second.value; + } + else + { + // If second.value isn't copied into a temporary here, nvc++ will + // crash while compiling the TestScanByKeyWithLargeTypes test in + // thrust/testing/scan_by_key.cu: + auto v2 = second.value; + retval.value = op(first.value, v2); + } +#else // not nvc++: + // if (second.key) { + // The second partial reduction spans a segment reset, so it's value + // aggregate becomes the running aggregate + // else { + // The second partial reduction does not span a reset, so accumulate both + // into the running aggregate + // } + retval.value = (second.key) ? second.value : op(first.value, second.value); +#endif + return retval; + } +}; + +/** + * @tparam ReductionOpT Binary reduction operator to apply to values + */ +template +struct ReduceByKeyOp +{ + /// Wrapped reduction operator + ReductionOpT op; + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ReduceByKeyOp() {} + + /// Constructor + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE ReduceByKeyOp(ReductionOpT op) + : op(op) + {} + + /** + * @brief Scan operator + * + * @param[in] first First partial reduction + * @param[in] second Second partial reduction + */ + template + _CCCL_HOST_DEVICE _CCCL_FORCEINLINE KeyValuePairT operator()(const KeyValuePairT& first, const KeyValuePairT& second) + { + KeyValuePairT retval = second; + + if (first.key == second.key) + { + retval.value = op(first.value, retval.value); + } + + return retval; + } +}; + +//! Deprecated [Since 2.8] +template +struct CCCL_DEPRECATED BinaryFlip +{ + BinaryOpT binary_op; + + _CCCL_HOST_DEVICE explicit BinaryFlip(BinaryOpT binary_op) + : binary_op(binary_op) + {} + + template + _CCCL_DEVICE auto + operator()(T&& t, U&& u) -> decltype(binary_op(::cuda::std::forward(u), ::cuda::std::forward(t))) + { + return binary_op(::cuda::std::forward(u), ::cuda::std::forward(t)); + } +}; + +_CCCL_SUPPRESS_DEPRECATED_PUSH +//! Deprecated [Since 2.8] +template +CCCL_DEPRECATED _CCCL_HOST_DEVICE BinaryFlip MakeBinaryFlip(BinaryOpT binary_op) +{ + return BinaryFlip(binary_op); +} +_CCCL_SUPPRESS_DEPRECATED_POP + +#ifndef _CCCL_DOXYGEN_INVOKED // Do not document + +namespace internal +{ + +template +struct SimdMin +{ + static_assert(cub::detail::always_false(), "Unsupported specialization"); +}; + +template <> +struct SimdMin<::cuda::std::int16_t> +{ + using simd_type = ::cuda::std::uint32_t; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE ::cuda::std::uint32_t + operator()(::cuda::std::uint32_t a, ::cuda::std::uint32_t b) const + { + return __vmins2(a, b); + } +}; + +template <> +struct SimdMin<::cuda::std::uint16_t> +{ + using simd_type = ::cuda::std::uint32_t; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE ::cuda::std::uint32_t + operator()(::cuda::std::uint32_t a, ::cuda::std::uint32_t b) const + { + return __vminu2(a, b); + } +}; + +# if defined(_CCCL_HAS_NVFP16) + +template <> +struct SimdMin<__half> +{ + using simd_type = __half2; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __half2 operator()(__half2 a, __half2 b) const + { +# if _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) + return __floats2half2_rn(::cuda::minimum<>{}(__half2float(a.x), __half2float(b.x)), + ::cuda::minimum<>{}(__half2float(a.y), __half2float(b.y))); +# else // ^^^ _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) ^^^ / vvv otherwise vvv + NV_IF_TARGET(NV_PROVIDES_SM_80, + (return __hmin2(a, b);), + (return __halves2half2(__float2half(::cuda::minimum<>{}(__half2float(a.x), __half2float(b.x))), + __float2half(::cuda::minimum<>{}(__half2float(a.y), __half2float(b.y))));)); +# endif // !_CCCL_CUDACC_BELOW(12) || !_CCCL_CUDA_COMPILER(NVHPC) + } +}; + +# endif // defined(_CCCL_HAS_NVFP16) + +# if defined(_CCCL_HAS_NVBF16) + +// NOTE: __halves2bfloat162 is not always available on older CUDA Toolkits for __CUDA_ARCH__ < 800 +_CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __nv_bfloat162 halves2bfloat162(__nv_bfloat16 a, __nv_bfloat16 b) +{ + ::cuda::std::uint32_t tmp; + auto a_uint16 = ::cuda::std::bit_cast<::cuda::std::uint16_t>(a); + auto b_uint16 = ::cuda::std::bit_cast<::cuda::std::uint16_t>(b); + asm("{mov.b32 %0, {%1,%2};}\n" : "=r"(tmp) : "h"(a_uint16), "h"(b_uint16)); + __nv_bfloat162 ret; + ::memcpy(&ret, &tmp, sizeof(ret)); + return ret; // TODO: replace with ::cuda::std::bit_cast<__nv_bfloat162>(tmp); +} + +template <> +struct SimdMin<__nv_bfloat16> +{ + using simd_type = __nv_bfloat162; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __nv_bfloat162 operator()(__nv_bfloat162 a, __nv_bfloat162 b) const + { +# if _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) + return __floats2bfloat162_rn(::cuda::minimum<>{}(__bfloat162float(a.x), __bfloat162float(b.x)), + ::cuda::minimum<>{}(__bfloat162float(a.y), __bfloat162float(b.y))); +# else // ^^^ _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) ^^^ / vvv otherwise vvv + NV_IF_TARGET(NV_PROVIDES_SM_80, + (return __hmin2(a, b);), + (return cub::internal::halves2bfloat162( + __float2bfloat16(::cuda::minimum<>{}(__bfloat162float(a.x), __bfloat162float(b.x))), + __float2bfloat16(::cuda::minimum<>{}(__bfloat162float(a.y), __bfloat162float(b.y))));)); +# endif // !_CCCL_CUDACC_BELOW(12) || !_CCCL_CUDA_COMPILER(NVHPC) + } +}; + +# endif // defined(_CCCL_HAS_NVBF16) + +//---------------------------------------------------------------------------------------------------------------------- + +template +struct SimdMax +{ + static_assert(cub::detail::always_false(), "Unsupported specialization"); +}; + +template <> +struct SimdMax<::cuda::std::int16_t> +{ + using simd_type = ::cuda::std::uint32_t; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE ::cuda::std::uint32_t + operator()(::cuda::std::uint32_t a, ::cuda::std::uint32_t b) const + { + return __vmaxs2(a, b); + } +}; + +template <> +struct SimdMax<::cuda::std::uint16_t> +{ + using simd_type = ::cuda::std::uint32_t; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE ::cuda::std::uint32_t + operator()(::cuda::std::uint32_t a, ::cuda::std::uint32_t b) const + { + return __vmaxu2(a, b); + } +}; + +# if defined(_CCCL_HAS_NVFP16) + +template <> +struct SimdMax<__half> +{ + using simd_type = __half2; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __half2 operator()(__half2 a, __half2 b) const + { +# if _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) + return __floats2half2_rn(::cuda::maximum<>{}(__half2float(a.x), __half2float(b.x)), + ::cuda::maximum<>{}(__half2float(a.y), __half2float(b.y))); +# else // ^^^ _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) ^^^ / vvv otherwise vvv + NV_IF_TARGET(NV_PROVIDES_SM_80, + (return __hmax2(a, b);), + (return __halves2half2(__float2half(::cuda::maximum<>{}(__half2float(a.x), __half2float(b.x))), + __float2half(::cuda::maximum<>{}(__half2float(a.y), __half2float(b.y))));)); +# endif // !_CCCL_CUDACC_BELOW(12) || !_CCCL_CUDA_COMPILER(NVHPC) + } +}; + +# endif // defined(_CCCL_HAS_NVFP16) + +# if defined(_CCCL_HAS_NVBF16) + +template <> +struct SimdMax<__nv_bfloat16> +{ + using simd_type = __nv_bfloat162; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __nv_bfloat162 operator()(__nv_bfloat162 a, __nv_bfloat162 b) const + { +# if _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) + return __floats2bfloat162_rn(::cuda::maximum<>{}(__bfloat162float(a.x), __bfloat162float(b.x)), + ::cuda::maximum<>{}(__bfloat162float(a.y), __bfloat162float(b.y))); +# else // ^^^ _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) ^^^ / vvv otherwise vvv + NV_IF_TARGET(NV_PROVIDES_SM_80, + (return __hmax2(a, b);), + (return cub::internal::halves2bfloat162( + __float2bfloat16(::cuda::maximum<>{}(__bfloat162float(a.x), __bfloat162float(b.x))), + __float2bfloat16(::cuda::maximum<>{}(__bfloat162float(a.y), __bfloat162float(b.y))));)); +# endif // !_CCCL_CUDACC_BELOW(12) || !_CCCL_CUDA_COMPILER(NVHPC) + } +}; + +# endif // defined(_CCCL_HAS_NVBF16) + +//---------------------------------------------------------------------------------------------------------------------- + +template +struct SimdSum +{ + static_assert(cub::detail::always_false(), "Unsupported specialization"); +}; + +# if defined(_CCCL_HAS_NVFP16) + +template <> +struct SimdSum<__half> +{ + using simd_type = __half2; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __half2 operator()(__half2 a, __half2 b) const + { +# if _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) + return __floats2half2_rn(__half2float(a.x) + __half2float(b.x), __half2float(a.y) + __half2float(b.y)); +# else // ^^^ _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) ^^^ / vvv otherwise vvv + NV_IF_TARGET(NV_PROVIDES_SM_53, + (return __hadd2(a, b);), + (return __halves2half2(__float2half(__half2float(a.x) + __half2float(b.x)), + __float2half(__half2float(a.y) + __half2float(b.y)));)); +# endif // !_CCCL_CUDACC_BELOW(12) || !_CCCL_CUDA_COMPILER(NVHPC) + } +}; + +# endif // defined(_CCCL_HAS_NVFP16) + +# if defined(_CCCL_HAS_NVBF16) + +template <> +struct SimdSum<__nv_bfloat16> +{ + using simd_type = __nv_bfloat162; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __nv_bfloat162 operator()(__nv_bfloat162 a, __nv_bfloat162 b) const + { +# if _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) + return __floats2bfloat162_rn( + __bfloat162float(a.x) + __bfloat162float(b.x), __bfloat162float(a.y) + __bfloat162float(b.y)); +# else // ^^^ _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) ^^^ / vvv otherwise vvv + NV_IF_TARGET( + NV_PROVIDES_SM_80, + (return __hadd2(a, b);), + (return cub::internal::halves2bfloat162(__float2bfloat16(__bfloat162float(a.x) + __bfloat162float(b.x)), + __float2bfloat16(__bfloat162float(a.y) + __bfloat162float(b.y)));)); +# endif // !_CCCL_CUDACC_BELOW(12) || !_CCCL_CUDA_COMPILER(NVHPC) + } +}; + +# endif // defined(_CCCL_HAS_NVBF16) + +//---------------------------------------------------------------------------------------------------------------------- + +template +struct SimdMul +{ + static_assert(cub::detail::always_false(), "Unsupported specialization"); +}; + +# if defined(_CCCL_HAS_NVFP16) + +template <> +struct SimdMul<__half> +{ + using simd_type = __half2; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __half2 operator()(__half2 a, __half2 b) const + { +# if _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) + return __floats2half2_rn(__half2float(a.x) * __half2float(b.x), __half2float(a.y) * __half2float(b.y)); +# else // ^^^ _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) ^^^ / vvv otherwise vvv + NV_IF_TARGET(NV_PROVIDES_SM_53, + (return __hmul2(a, b);), + (return __halves2half2(__float2half(__half2float(a.x) * __half2float(b.x)), + __float2half(__half2float(a.y) * __half2float(b.y)));)); +# endif // !_CCCL_CUDACC_BELOW(12) || !_CCCL_CUDA_COMPILER(NVHPC) + } +}; + +# endif // defined(_CCCL_HAS_NVFP16) + +# if defined(_CCCL_HAS_NVBF16) + +template <> +struct SimdMul<__nv_bfloat16> +{ + using simd_type = __nv_bfloat162; + + _CCCL_NODISCARD _CCCL_DEVICE _CCCL_FORCEINLINE __nv_bfloat162 operator()(__nv_bfloat162 a, __nv_bfloat162 b) const + { +# if _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) + return __floats2bfloat162_rn( + __bfloat162float(a.x) * __bfloat162float(b.x), __bfloat162float(a.y) * __bfloat162float(b.y)); +# else // ^^^ _CCCL_CUDACC_BELOW(12) && _CCCL_CUDA_COMPILER(NVHPC) ^^^ / vvv otherwise vvv + NV_IF_TARGET(NV_PROVIDES_SM_80, + (return __hmul2(a, b);), + (return halves2bfloat162(__float2bfloat16(__bfloat162float(a.x) * __bfloat162float(b.x)), + __float2bfloat16(__bfloat162float(a.y) * __bfloat162float(b.y)));)); +# endif // !_CCCL_CUDACC_BELOW(12) || !_CCCL_CUDA_COMPILER(NVHPC) + } +}; + +# endif // defined(_CCCL_HAS_NVBF16) + +//---------------------------------------------------------------------------------------------------------------------- + +template +struct CubOperatorToSimdOperator +{ + static_assert(cub::detail::always_false(), "Unsupported specialization"); +}; + +template +struct CubOperatorToSimdOperator<::cuda::minimum<>, T> +{ + using type = SimdMin; + using simd_type = typename type::simd_type; +}; + +template +struct CubOperatorToSimdOperator<::cuda::minimum, T> : CubOperatorToSimdOperator<::cuda::minimum<>, T> +{}; + +template +struct CubOperatorToSimdOperator<::cuda::maximum<>, T> +{ + using type = SimdMax; + using simd_type = typename type::simd_type; +}; + +template +struct CubOperatorToSimdOperator<::cuda::maximum, T> : CubOperatorToSimdOperator<::cuda::maximum<>, T> +{}; + +template +struct CubOperatorToSimdOperator<::cuda::std::plus<>, T> +{ + using type = SimdSum; + using simd_type = typename type::simd_type; +}; + +template +struct CubOperatorToSimdOperator<::cuda::std::plus, T> : CubOperatorToSimdOperator<::cuda::std::plus<>, T> +{}; + +template +struct CubOperatorToSimdOperator<::cuda::std::multiplies<>, T> +{ + using type = SimdMul; + using simd_type = typename type::simd_type; +}; + +template +struct CubOperatorToSimdOperator<::cuda::std::multiplies, T> + : CubOperatorToSimdOperator<::cuda::std::multiplies<>, T> +{}; + +template +using cub_operator_to_simd_operator_t = typename CubOperatorToSimdOperator::type; + +template +using simd_type_t = typename CubOperatorToSimdOperator::simd_type; + +} // namespace internal + +#endif // !_CCCL_DOXYGEN_INVOKED + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_search.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_search.cuh new file mode 100644 index 0000000000000000000000000000000000000000..802d4ec96f8ec95c5620a314a8a94a87682c1e9b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/thread/thread_search.cuh @@ -0,0 +1,196 @@ +/****************************************************************************** + * Copyright (c) 2011, Duane Merrill. All rights reserved. + * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * Thread utilities for sequential search + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include + +#include + +#include + +CUB_NAMESPACE_BEGIN + +/** + * Computes the begin offsets into A and B for the specific diagonal + */ +template +_CCCL_HOST_DEVICE _CCCL_FORCEINLINE void MergePathSearch( + OffsetT diagonal, AIteratorT a, BIteratorT b, OffsetT a_len, OffsetT b_len, CoordinateT& path_coordinate) +{ + /// The value type of the input iterator + using T = cub::detail::value_t; + + OffsetT split_min = CUB_MAX(diagonal - b_len, 0); + OffsetT split_max = CUB_MIN(diagonal, a_len); + + while (split_min < split_max) + { + OffsetT split_pivot = (split_min + split_max) >> 1; + if (a[split_pivot] <= b[diagonal - split_pivot - 1]) + { + // Move candidate split range up A, down B + split_min = split_pivot + 1; + } + else + { + // Move candidate split range up B, down A + split_max = split_pivot; + } + } + + path_coordinate.x = CUB_MIN(split_min, a_len); + path_coordinate.y = diagonal - split_min; +} + +/** + * @brief Returns the offset of the first value within @p input which does not compare + * less than @p val + * + * @param[in] input + * Input sequence + * + * @param[in] num_items + * Input sequence length + * + * @param[in] val + * Search key + */ +// TODO(bgruber): deprecate once ::cuda::std::lower_bound is made public +template +_CCCL_DEVICE _CCCL_FORCEINLINE OffsetT LowerBound(InputIteratorT input, OffsetT num_items, T val) +{ + OffsetT retval = 0; + while (num_items > 0) + { + OffsetT half = num_items >> 1; + if (input[retval + half] < val) + { + retval = retval + (half + 1); + num_items = num_items - (half + 1); + } + else + { + num_items = half; + } + } + + return retval; +} + +/** + * @brief Returns the offset of the first value within @p input which compares + * greater than @p val + * + * @param[in] input + * Input sequence + * + * @param[in] num_items + * Input sequence length + * + * @param[in] val + * Search key + */ +// TODO(bgruber): deprecate once ::cuda::std::upper_bound is made public +template +_CCCL_DEVICE _CCCL_FORCEINLINE OffsetT UpperBound(InputIteratorT input, OffsetT num_items, T val) +{ + OffsetT retval = 0; + while (num_items > 0) + { + OffsetT half = num_items >> 1; + if (val < input[retval + half]) + { + num_items = half; + } + else + { + retval = retval + (half + 1); + num_items = num_items - (half + 1); + } + } + + return retval; +} + +#if defined(__CUDA_FP16_TYPES_EXIST__) +/** + * @param[in] input + * Input sequence + * + * @param[in] num_items + * Input sequence length + * + * @param[in] val + * Search key + */ +template +_CCCL_DEVICE _CCCL_FORCEINLINE OffsetT UpperBound(InputIteratorT input, OffsetT num_items, __half val) +{ + OffsetT retval = 0; + while (num_items > 0) + { + OffsetT half = num_items >> 1; + + bool lt; + NV_IF_TARGET(NV_PROVIDES_SM_53, + (lt = __hlt(val, input[retval + half]);), + (lt = __half2float(val) < __half2float(input[retval + half]);)); + + if (lt) + { + num_items = half; + } + else + { + retval = retval + (half + 1); + num_items = num_items - (half + 1); + } + } + + return retval; +} +#endif // __CUDA_FP16_TYPES_EXIST__ + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_exchange.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_exchange.cuh new file mode 100644 index 0000000000000000000000000000000000000000..79f422f5abe964ebd3b81c3c74ce4f9be054d90f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_exchange.cuh @@ -0,0 +1,409 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +/** + * @file + * The cub::WarpExchange class provides [collective](../index.html#sec0) + * methods for rearranging data partitioned across a CUDA warp. + */ + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +enum WarpExchangeAlgorithm +{ + WARP_EXCHANGE_SMEM, + WARP_EXCHANGE_SHUFFLE, +}; + +namespace detail +{ +template +using InternalWarpExchangeImpl = + ::cuda::std::_If, + WarpExchangeShfl>; +} // namespace detail + +/** + * @brief The WarpExchange class provides [collective](../index.html#sec0) + * methods for rearranging data partitioned across a CUDA warp. + * + * @tparam T + * The data type to be exchanged. + * + * @tparam ITEMS_PER_THREAD + * The number of items partitioned onto each thread. + * + * @tparam LOGICAL_WARP_THREADS + * [optional] The number of threads per "logical" warp (may be less + * than the number of hardware warp threads). Default is the warp size of the + * targeted CUDA compute-capability (e.g., 32 threads for SM86). Must be a + * power of two. + * + * @tparam LEGACY_PTX_ARCH + * Unused. + * + * @par Overview + * - It is commonplace for a warp of threads to rearrange data items between + * threads. For example, the global memory accesses prefer patterns where + * data items are "striped" across threads (where consecutive threads access + * consecutive items), yet most warp-wide operations prefer a "blocked" + * partitioning of items across threads (where consecutive items belong to a + * single thread). + * - WarpExchange supports the following types of data exchanges: + * - Transposing between [blocked](../index.html#sec5sec3) and + * [striped](../index.html#sec5sec3) arrangements + * - Scattering ranked items to a + * [striped arrangement](../index.html#sec5sec3) + * + * @par A Simple Example + * @par + * The code snippet below illustrates the conversion from a "blocked" to a + * "striped" arrangement of 64 integer items partitioned across 16 threads where + * each thread owns 4 items. + * @par + * @code + * #include // or equivalently + * + * __global__ void ExampleKernel(int *d_data, ...) + * { + * constexpr int warp_threads = 16; + * constexpr int block_threads = 256; + * constexpr int items_per_thread = 4; + * constexpr int warps_per_block = block_threads / warp_threads; + * const int warp_id = static_cast(threadIdx.x) / warp_threads; + * + * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each + * using WarpExchangeT = + * cub::WarpExchange; + * + * // Allocate shared memory for WarpExchange + * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; + * + * // Load a tile of data striped across threads + * int thread_data[items_per_thread]; + * // ... + * + * // Collectively exchange data into a blocked arrangement across threads + * WarpExchangeT(temp_storage[warp_id]).StripedToBlocked(thread_data, thread_data); + * @endcode + * @par + * Suppose the set of striped input @p thread_data across the block of threads + * is { [0,16,32,48], [1,17,33,49], ..., [15, 32, 47, 63] }. + * The corresponding output @p thread_data in those threads will be + * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [60,61,62,63] }. + */ +template +class WarpExchange + : private detail::InternalWarpExchangeImpl +{ + using InternalWarpExchange = + detail::InternalWarpExchangeImpl; + +public: + /// \smemstorage{WarpExchange} + using TempStorage = typename InternalWarpExchange::TempStorage; + + //! @name Collective constructors + //! @{ + + WarpExchange() = delete; + + /** + * @brief Collective constructor using the specified memory allocation as + * temporary storage. + */ + explicit _CCCL_DEVICE _CCCL_FORCEINLINE WarpExchange(TempStorage& temp_storage) + : InternalWarpExchange(temp_storage) + {} + + //! @} end member group + //! @name Data movement + //! @{ + + /** + * @brief Transposes data items from blocked arrangement to + * striped arrangement. + * + * @par + * @smemwarpreuse + * + * @par Snippet + * The code snippet below illustrates the conversion from a "blocked" to a + * "striped" arrangement of 64 integer items partitioned across 16 threads + * where each thread owns 4 items. + * @par + * @code + * #include // or equivalently + * + * __global__ void ExampleKernel(int *d_data, ...) + * { + * constexpr int warp_threads = 16; + * constexpr int block_threads = 256; + * constexpr int items_per_thread = 4; + * constexpr int warps_per_block = block_threads / warp_threads; + * const int warp_id = static_cast(threadIdx.x) / warp_threads; + * + * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each + * using WarpExchangeT = cub::WarpExchange; + * + * // Allocate shared memory for WarpExchange + * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; + * + * // Obtain a segment of consecutive items that are blocked across threads + * int thread_data[items_per_thread]; + * // ... + * + * // Collectively exchange data into a striped arrangement across threads + * WarpExchangeT(temp_storage[warp_id]).BlockedToStriped(thread_data, thread_data); + * @endcode + * @par + * Suppose the set of striped input @p thread_data across the block of threads + * is { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [60,61,62,63] }. + * The corresponding output @p thread_data in those threads will be + * { [0,16,32,48], [1,17,33,49], ..., [15, 32, 47, 63] }. + * + * @param[in] input_items + * Items to exchange, converting between blocked and + * striped arrangements. + * + * @param[out] output_items + * Items from exchange, converting between striped and + * blocked arrangements. May be aliased to @p input_items. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + BlockedToStriped(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + InternalWarpExchange::BlockedToStriped(input_items, output_items); + } + + /** + * @brief Transposes data items from striped arrangement to + * blocked arrangement. + * + * @par + * @smemwarpreuse + * + * @par Snippet + * The code snippet below illustrates the conversion from a "striped" to a + * "blocked" arrangement of 64 integer items partitioned across 16 threads + * where each thread owns 4 items. + * @par + * @code + * #include // or equivalently + * + * __global__ void ExampleKernel(int *d_data, ...) + * { + * constexpr int warp_threads = 16; + * constexpr int block_threads = 256; + * constexpr int items_per_thread = 4; + * constexpr int warps_per_block = block_threads / warp_threads; + * const int warp_id = static_cast(threadIdx.x) / warp_threads; + * + * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each + * using WarpExchangeT = cub::WarpExchange; + * + * // Allocate shared memory for WarpExchange + * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; + * + * // Load a tile of data striped across threads + * int thread_data[items_per_thread]; + * // ... + * + * // Collectively exchange data into a blocked arrangement across threads + * WarpExchangeT(temp_storage[warp_id]).StripedToBlocked(thread_data, thread_data); + * @endcode + * @par + * Suppose the set of striped input @p thread_data across the block of threads + * is { [0,16,32,48], [1,17,33,49], ..., [15, 32, 47, 63] }. + * The corresponding output @p thread_data in those threads will be + * { [0,1,2,3], [4,5,6,7], [8,9,10,11], ..., [60,61,62,63] }. + * + * @param[in] input_items + * Items to exchange + * + * @param[out] output_items + * Items from exchange. May be aliased to @p input_items. + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + StripedToBlocked(const InputT (&input_items)[ITEMS_PER_THREAD], OutputT (&output_items)[ITEMS_PER_THREAD]) + { + InternalWarpExchange::StripedToBlocked(input_items, output_items); + } + + /** + * @brief Exchanges valid data items annotated by rank + * into striped arrangement. + * + * @par + * @smemwarpreuse + * + * @par Snippet + * The code snippet below illustrates the conversion from a "scatter" to a + * "striped" arrangement of 64 integer items partitioned across 16 threads + * where each thread owns 4 items. + * @par + * @code + * #include // or equivalently + * + * __global__ void ExampleKernel(int *d_data, ...) + * { + * constexpr int warp_threads = 16; + * constexpr int block_threads = 256; + * constexpr int items_per_thread = 4; + * constexpr int warps_per_block = block_threads / warp_threads; + * const int warp_id = static_cast(threadIdx.x) / warp_threads; + * + * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each + * using WarpExchangeT = cub::WarpExchange; + * + * // Allocate shared memory for WarpExchange + * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; + * + * // Obtain a segment of consecutive items that are blocked across threads + * int thread_data[items_per_thread]; + * int thread_ranks[items_per_thread]; + * // ... + * + * // Collectively exchange data into a striped arrangement across threads + * WarpExchangeT(temp_storage[warp_id]).ScatterToStriped( + * thread_data, thread_ranks); + * @endcode + * @par + * Suppose the set of input @p thread_data across the block of threads + * is `{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }`, and the set of + * @p thread_ranks is `{ [63,62,61,60], ..., [7,6,5,4], [3,2,1,0] }`. The + * corresponding output @p thread_data in those threads will be + * `{ [63, 47, 31, 15], [62, 46, 30, 14], ..., [48, 32, 16, 0] }`. + * + * @tparam OffsetT [inferred] Signed integer type for local offsets + * + * @param[in,out] items Items to exchange + * @param[in] ranks Corresponding scatter ranks + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + ScatterToStriped(InputT (&items)[ITEMS_PER_THREAD], OffsetT (&ranks)[ITEMS_PER_THREAD]) + { + InternalWarpExchange::ScatterToStriped(items, ranks); + } + + /** + * @brief Exchanges valid data items annotated by rank + * into striped arrangement. + * + * @par + * @smemwarpreuse + * + * @par Snippet + * The code snippet below illustrates the conversion from a "scatter" to a + * "striped" arrangement of 64 integer items partitioned across 16 threads + * where each thread owns 4 items. + * @par + * @code + * #include // or equivalently + * + * __global__ void ExampleKernel(int *d_data, ...) + * { + * constexpr int warp_threads = 16; + * constexpr int block_threads = 256; + * constexpr int items_per_thread = 4; + * constexpr int warps_per_block = block_threads / warp_threads; + * const int warp_id = static_cast(threadIdx.x) / warp_threads; + * + * // Specialize WarpExchange for a virtual warp of 16 threads owning 4 integer items each + * using WarpExchangeT = cub::WarpExchange; + * + * // Allocate shared memory for WarpExchange + * __shared__ typename WarpExchangeT::TempStorage temp_storage[warps_per_block]; + * + * // Obtain a segment of consecutive items that are blocked across threads + * int thread_input[items_per_thread]; + * int thread_ranks[items_per_thread]; + * // ... + * + * // Collectively exchange data into a striped arrangement across threads + * int thread_output[items_per_thread]; + * WarpExchangeT(temp_storage[warp_id]).ScatterToStriped( + * thread_input, thread_output, thread_ranks); + * @endcode + * @par + * Suppose the set of input @p thread_input across the block of threads + * is `{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }`, and the set of + * @p thread_ranks is `{ [63,62,61,60], ..., [7,6,5,4], [3,2,1,0] }`. The + * corresponding @p thread_output in those threads will be + * `{ [63, 47, 31, 15], [62, 46, 30, 14], ..., [48, 32, 16, 0] }`. + * + * @tparam OffsetT [inferred] Signed integer type for local offsets + * + * @param[in] input_items + * Items to exchange + * + * @param[out] output_items + * Items from exchange. May be aliased to @p input_items. + * + * @param[in] ranks + * Corresponding scatter ranks + */ + template + _CCCL_DEVICE _CCCL_FORCEINLINE void ScatterToStriped( + const InputT (&input_items)[ITEMS_PER_THREAD], + OutputT (&output_items)[ITEMS_PER_THREAD], + OffsetT (&ranks)[ITEMS_PER_THREAD]) + { + InternalWarpExchange::ScatterToStriped(input_items, output_items, ranks); + } + + //@} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_load.cuh b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_load.cuh new file mode 100644 index 0000000000000000000000000000000000000000..3f11129c35a737ce6c49327d5ee48d784c33f2bc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/_core/include/cupy/_cccl/cub/cub/warp/warp_load.cuh @@ -0,0 +1,619 @@ +/****************************************************************************** + * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ******************************************************************************/ + +//! @file +//! Operations for reading linear tiles of data into the CUDA warp. + +#pragma once + +#include + +#if defined(_CCCL_IMPLICIT_SYSTEM_HEADER_GCC) +# pragma GCC system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_CLANG) +# pragma clang system_header +#elif defined(_CCCL_IMPLICIT_SYSTEM_HEADER_MSVC) +# pragma system_header +#endif // no system header + +#include +#include +#include +#include +#include + +#include + +CUB_NAMESPACE_BEGIN + +//! @rst +//! ``cub::WarpLoadAlgorithm`` enumerates alternative algorithms for :cpp:struct:`cub::WarpLoad` to +//! read a linear segment of data from memory into a CUDA warp. +//! @endrst +enum WarpLoadAlgorithm +{ + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` of data is read directly from memory. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! The utilization of memory transactions (coalescing) decreases as the + //! access stride between threads increases (i.e., the number items per thread). + //! @endrst + WARP_LOAD_DIRECT, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`striped arrangement ` of data is read directly from memory. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! The utilization of memory transactions (coalescing) doesn't depend on + //! the number of items per thread. + //! @endrst + WARP_LOAD_STRIPED, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`blocked arrangement ` of data is read from memory using + //! CUDA's built-in vectorized loads as a coalescing optimization. + //! For example, ``ld.global.v4.s32`` instructions will be generated when ``T = int`` and + //! ``ITEMS_PER_THREAD % 4 == 0``. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high until the the + //! access stride between threads (i.e., the number items per thread) exceeds the + //! maximum vector load width (typically 4 items or 64B, whichever is lower). + //! - The following conditions will prevent vectorization and loading will fall + //! back to cub::WARP_LOAD_DIRECT: + //! + //! - ``ITEMS_PER_THREAD`` is odd + //! - The ``InputIteratorT`` is not a simple pointer type + //! - The block input offset is not quadword-aligned + //! - The data type ``T`` is not a built-in primitive or CUDA vector type + //! (e.g., ``short``, ``int2``, ``double``, ``float2``, etc.) + //! @endrst + WARP_LOAD_VECTORIZE, + + //! @rst + //! Overview + //! ++++++++++++++++++++++++++ + //! + //! A :ref:`striped arrangement ` of data is read efficiently from + //! memory and then locally transposed into a + //! :ref:`blocked arrangement `. + //! + //! Performance Considerations + //! ++++++++++++++++++++++++++ + //! + //! - The utilization of memory transactions (coalescing) remains high + //! regardless of items loaded per thread. + //! - The local reordering incurs slightly longer latencies and throughput than the direct + //! ``cub::WARP_LOAD_DIRECT`` and ``cub::WARP_LOAD_VECTORIZE`` alternatives. + //! @endrst + WARP_LOAD_TRANSPOSE +}; + +//! @rst +//! The WarpLoad class provides :ref:`collective ` data movement methods for +//! loading a linear segment of items from memory into a +//! :ref:`blocked arrangement ` across a CUDA thread warp. +//! +//! Overview +//! ++++++++++++++++ +//! +//! - The WarpLoad class provides a single data movement abstraction that can be +//! specialized to implement different cub::WarpLoadAlgorithm strategies. This +//! facilitates different performance policies for different architectures, data +//! types, granularity sizes, etc. +//! - WarpLoad can be optionally specialized by different data movement strategies: +//! +//! #. :cpp:enumerator:`cub::WARP_LOAD_DIRECT`: +//! a :ref:`blocked arrangement ` of data is read directly from +//! memory. +//! #. :cpp:enumerator:`cub::WARP_LOAD_STRIPED`: +//! a :ref:`striped arrangement ` of data is read directly from +//! memory. +//! #. :cpp:enumerator:`cub::WARP_LOAD_VECTORIZE`: +//! a :ref:`blocked arrangement ` of data is read directly from +//! memory using CUDA's built-in vectorized loads as a coalescing optimization. +//! #. :cpp:enumerator:`cub::WARP_LOAD_TRANSPOSE`: +//! a :ref:`striped arrangement ` of data is read directly from +//! memory and is then locally transposed into a +//! :ref:`blocked arrangement `. +//! +//! A Simple Example +//! ++++++++++++++++ +//! +//! The code snippet below illustrates the loading of a linear segment of 64 +//! integers into a "blocked" arrangement across 16 threads where each thread +//! owns 4 consecutive items. The load is specialized for ``WARP_LOAD_TRANSPOSE``, +//! meaning memory references are efficiently coalesced using a warp-striped access +//! pattern (after which items are locally reordered among threads). +//! +//! .. code-block:: c++ +//! +//! #include // or equivalently +//! +//! __global__ void ExampleKernel(int *d_data, ...) +//! { +//! constexpr int warp_threads = 16; +//! constexpr int block_threads = 256; +//! constexpr int items_per_thread = 4; +//! +//! // Specialize WarpLoad for a warp of 16 threads owning 4 integer items each +//! using WarpLoadT = WarpLoad; +//! +//! constexpr int warps_in_block = block_threads / warp_threads; +//! constexpr int tile_size = items_per_thread * warp_threads; +//! const int warp_id = static_cast(threadIdx.x) / warp_threads; +//! +//! // Allocate shared memory for WarpLoad +//! __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; +//! +//! // Load a segment of consecutive items that are blocked across threads +//! int thread_data[items_per_thread]; +//! WarpLoadT(temp_storage[warp_id]).Load(d_data + warp_id * tile_size, +//! thread_data); +//! +//! Suppose the input ``d_data`` is ``0, 1, 2, 3, 4, 5, ...``. +//! The set of ``thread_data`` across the first logical warp of threads in those +//! threads will be: ``{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }``. +//! @endrst +//! +//! @tparam InputT +//! The data type to read into (which must be convertible from the input +//! iterator's value type). +//! +//! @tparam ITEMS_PER_THREAD +//! The number of consecutive items partitioned onto each thread. +//! +//! @tparam ALGORITHM +//! [optional] cub::WarpLoadAlgorithm tuning policy. +//! default: cub::WARP_LOAD_DIRECT. +//! +//! @tparam LOGICAL_WARP_THREADS +//! [optional] The number of threads per "logical" warp (may be less +//! than the number of hardware warp threads). Default is the warp size of the +//! targeted CUDA compute-capability (e.g., 32 threads for SM86). Must be a +//! power of two. +//! +//! @tparam LEGACY_PTX_ARCH +//! Unused. +template +class WarpLoad +{ + static constexpr bool IS_ARCH_WARP = LOGICAL_WARP_THREADS == CUB_WARP_THREADS(0); + + static_assert(PowerOfTwo::VALUE, "LOGICAL_WARP_THREADS must be a power of two"); + +private: + /***************************************************************************** + * Algorithmic variants + ****************************************************************************/ + + /// Load helper + template + struct LoadInternal; + + template + struct LoadInternal + { + using TempStorage = NullType; + + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) + { + LoadDirectBlocked(linear_tid, block_itr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) + { + LoadDirectBlocked(linear_tid, block_itr, items, valid_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) + { + LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default); + } + }; + + template + struct LoadInternal + { + using TempStorage = NullType; + + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) + { + LoadDirectStriped(linear_tid, block_itr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) + { + LoadDirectStriped(linear_tid, block_itr, items, valid_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) + { + LoadDirectStriped(linear_tid, block_itr, items, valid_items, oob_default); + } + }; + + template + struct LoadInternal + { + using TempStorage = NullType; + + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& /*temp_storage*/, int linear_tid) + : linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(InputT* block_ptr, InputT (&items)[ITEMS_PER_THREAD]) + { + InternalLoadDirectBlockedVectorized(linear_tid, block_ptr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(const InputT* block_ptr, InputT (&items)[ITEMS_PER_THREAD]) + { + InternalLoadDirectBlockedVectorized(linear_tid, block_ptr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(CacheModifiedInputIterator block_itr, InputT (&items)[ITEMS_PER_THREAD]) + { + InternalLoadDirectBlockedVectorized(linear_tid, block_itr.ptr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(_InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) + { + LoadDirectBlocked(linear_tid, block_itr, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) + { + LoadDirectBlocked(linear_tid, block_itr, items, valid_items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) + { + LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default); + } + }; + + template + struct LoadInternal + { + using WarpExchangeT = WarpExchange; + + struct _TempStorage : WarpExchangeT::TempStorage + {}; + + struct TempStorage : Uninitialized<_TempStorage> + {}; + + _TempStorage& temp_storage; + + int linear_tid; + + _CCCL_DEVICE _CCCL_FORCEINLINE LoadInternal(TempStorage& temp_storage, int linear_tid) + : temp_storage(temp_storage.Alias()) + , linear_tid(linear_tid) + {} + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) + { + LoadDirectStriped(linear_tid, block_itr, items); + WarpExchangeT(temp_storage).StripedToBlocked(items, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) + { + LoadDirectStriped(linear_tid, block_itr, items, valid_items); + WarpExchangeT(temp_storage).StripedToBlocked(items, items); + } + + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) + { + LoadDirectStriped(linear_tid, block_itr, items, valid_items, oob_default); + WarpExchangeT(temp_storage).StripedToBlocked(items, items); + } + }; + + /***************************************************************************** + * Type definitions + ****************************************************************************/ + + /// Internal load implementation to use + using InternalLoad = LoadInternal; + + /// Shared memory storage layout type + using _TempStorage = typename InternalLoad::TempStorage; + + /***************************************************************************** + * Utility methods + ****************************************************************************/ + + /// Internal storage allocator + _CCCL_DEVICE _CCCL_FORCEINLINE _TempStorage& PrivateStorage() + { + __shared__ _TempStorage private_storage; + return private_storage; + } + + /***************************************************************************** + * Thread fields + ****************************************************************************/ + + /// Thread reference to shared storage + _TempStorage& temp_storage; + + /// Linear thread-id + int linear_tid; + +public: + /// @smemstorage{WarpLoad} + struct TempStorage : Uninitialized<_TempStorage> + {}; + + //! @name Collective constructors + //! @{ + + //! @brief Collective constructor using a private static allocation of + //! shared memory as temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE WarpLoad() + : temp_storage(PrivateStorage()) + , linear_tid( + IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : (::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS)) + {} + + //! @brief Collective constructor using the specified memory allocation as + //! temporary storage. + _CCCL_DEVICE _CCCL_FORCEINLINE WarpLoad(TempStorage& temp_storage) + : temp_storage(temp_storage.Alias()) + , linear_tid( + IS_ARCH_WARP ? ::cuda::ptx::get_sreg_laneid() : (::cuda::ptx::get_sreg_laneid() % LOGICAL_WARP_THREADS)) + {} + + //! @} end member group + //! @name Data movement + //! @{ + + //! @rst + //! Load a linear segment of items from memory. + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, ...) + //! { + //! constexpr int warp_threads = 16; + //! constexpr int block_threads = 256; + //! constexpr int items_per_thread = 4; + //! + //! // Specialize WarpLoad for a warp of 16 threads owning 4 integer items each + //! using WarpLoadT = WarpLoad; + //! + //! constexpr int warps_in_block = block_threads / warp_threads; + //! constexpr int tile_size = items_per_thread * warp_threads; + //! const int warp_id = static_cast(threadIdx.x) / warp_threads; + //! + //! // Allocate shared memory for WarpLoad + //! __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; + //! + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[items_per_thread]; + //! WarpLoadT(temp_storage[warp_id]).Load(d_data + warp_id * tile_size, + //! thread_data); + //! + //! Suppose the input ``d_data`` is ``0, 1, 2, 3, 4, 5, ...``, + //! The set of ``thread_data`` across the first logical warp of threads in those + //! threads will be: ``{ [0,1,2,3], [4,5,6,7], ..., [60,61,62,63] }``. + //! @endrst + //! + //! @param[in] block_itr The thread block's base input iterator for loading from + //! @param[out] items Data to load + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD]) + { + InternalLoad(temp_storage, linear_tid).Load(block_itr, items); + } + + //! @rst + //! Load a linear segment of items from memory, guarded by range. + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, int valid_items, ...) + //! { + //! constexpr int warp_threads = 16; + //! constexpr int block_threads = 256; + //! constexpr int items_per_thread = 4; + //! + //! // Specialize WarpLoad for a warp of 16 threads owning 4 integer items each + //! using WarpLoadT = WarpLoad; + //! + //! constexpr int warps_in_block = block_threads / warp_threads; + //! constexpr int tile_size = items_per_thread * warp_threads; + //! const int warp_id = static_cast(threadIdx.x) / warp_threads; + //! + //! // Allocate shared memory for WarpLoad + //! __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; + //! + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[items_per_thread]; + //! WarpLoadT(temp_storage[warp_id]).Load(d_data + warp_id * tile_size, + //! thread_data, + //! valid_items); + //! + //! Suppose the input ``d_data`` is ``0, 1, 2, 3, 4, 5, ...`` and ``valid_items`` is ``5``. + //! The set of ``thread_data`` across the first logical warp of threads in those threads will be: + //! ``{ [0,1,2,3], [4,?,?,?], ..., [?,?,?,?] }`` with only the first two threads being unmasked to + //! load portions of valid data (and other items remaining unassigned). + //! @endrst + //! + //! @param[in] block_itr The thread block's base input iterator for loading from + //! @param[out] items Data to load + //! @param[in] valid_items Number of valid items to load + template + _CCCL_DEVICE _CCCL_FORCEINLINE void Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items) + { + InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items); + } + + //! @rst + //! Load a linear segment of items from memory, guarded by range. + //! + //! @smemwarpreuse + //! + //! Snippet + //! +++++++ + //! + //! .. code-block:: c++ + //! + //! #include // or equivalently + //! + //! __global__ void ExampleKernel(int *d_data, int valid_items, ...) + //! { + //! constexpr int warp_threads = 16; + //! constexpr int block_threads = 256; + //! constexpr int items_per_thread = 4; + //! + //! // Specialize WarpLoad for a warp of 16 threads owning 4 integer items each + //! using WarpLoadT = WarpLoad; + //! + //! constexpr int warps_in_block = block_threads / warp_threads; + //! constexpr int tile_size = items_per_thread * warp_threads; + //! const int warp_id = static_cast(threadIdx.x) / warp_threads; + //! + //! // Allocate shared memory for WarpLoad + //! __shared__ typename WarpLoadT::TempStorage temp_storage[warps_in_block]; + //! + //! // Load a segment of consecutive items that are blocked across threads + //! int thread_data[items_per_thread]; + //! WarpLoadT(temp_storage[warp_id]).Load(d_data + warp_id * tile_size, + //! thread_data, + //! valid_items, + //! -1); + //! + //! Suppose the input ``d_data`` is ``0, 1, 2, 3, 4, 5, ...``, ``valid_items`` is ``5``, and the + //! out-of-bounds default is ``-1``. The set of ``thread_data`` across the first logical warp of + //! threads in those threads will be: ``{ [0,1,2,3], [4,-1,-1,-1], ..., [-1,-1,-1,-1] }`` with + //! only the first two threads being unmasked to load portions of valid data (and other items + //! are assigned ``-1``). + //! @endrst + //! + //! @param[in] block_itr The thread block's base input iterator for loading from + //! @param[out] items Data to load + //! @param[in] valid_items Number of valid items to load + //! @param[in] oob_default Default value to assign out-of-bound items + template + _CCCL_DEVICE _CCCL_FORCEINLINE void + Load(InputIteratorT block_itr, InputT (&items)[ITEMS_PER_THREAD], int valid_items, DefaultT oob_default) + { + InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items, oob_default); + } + + //! @} end member group +}; + +CUB_NAMESPACE_END diff --git a/vllm/lib/python3.10/site-packages/cupy/cuda/cufft.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/cuda/cufft.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..17874d38a8dafe3f079bf5281b98e49ba687d3ec --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/cuda/cufft.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:333a4c7a35139648e288d0d49aadf62dc84eb6054abdab7b896d8cfcba1703b6 +size 2440736 diff --git a/vllm/lib/python3.10/site-packages/cupy/cuda/device.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/cuda/device.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1a932bd4e5b5ea167e4e864172e642376b835a7b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/cuda/device.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6d1f6c209a6147189b82412e981fffc6774ff81172d31fa1e40d5fee8c87433 +size 827880 diff --git a/vllm/lib/python3.10/site-packages/cupy/cuda/function.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/cuda/function.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..7ad75c262a8dc5dd934999b06b0e1188b374aee5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/cuda/function.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a623863b33b5ad41b361fda33bbe9df8ae434d1cc1f10b90accff2b7d6744fea +size 1060240 diff --git a/vllm/lib/python3.10/site-packages/cupy/cuda/graph.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/cuda/graph.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..efe4b9bbe03d1a37bba2f53844ffeb96d4416c46 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/cuda/graph.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a68a3724c8cd6d8170b00bd3ce91fcf3e38bba7315e30ee1695d8ed58aa08b5c +size 348880 diff --git a/vllm/lib/python3.10/site-packages/cupy/cuda/memory_hook.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/cuda/memory_hook.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..3e6b4a05e64745fb2bfd0f558fc01b9ddbfa51bc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/cuda/memory_hook.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d48901d88ea3bb44d9d223bfc9bce72e83a4655488583ab75c1829d68af109be +size 561480 diff --git a/vllm/lib/python3.10/site-packages/cupy/cuda/stream.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/cuda/stream.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b270362b69d1e34fa0b493f8fd6aae5cef8c4a39 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/cuda/stream.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2d80fca6e63f24f68255c092357529d06236ac54c7efcae69a571ff54bf9e0b +size 893576 diff --git a/vllm/lib/python3.10/site-packages/cupy/cuda/texture.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/cuda/texture.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1f74d607ae9c1c116a9896949dce134e736b12a0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/cuda/texture.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79749bc6197a30e93e5a3081f8b87d525bc7a5a4ea637203a16ef883309a34de +size 1101464 diff --git a/vllm/lib/python3.10/site-packages/cupy/fft/_cache.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/fft/_cache.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1a3b5b4aa9ae7a58b1cea098da945e1a127d55aa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/fft/_cache.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45dd63aec6f6437a584d14dbfea193f5fa407b2e39b93530305321f6d2c70b2f +size 1229640 diff --git a/vllm/lib/python3.10/site-packages/cupy/fft/_callback.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/cupy/fft/_callback.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..f3c51758614fe15773388ec41bd36ed18d56699a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupy/fft/_callback.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8a4672a4230c95ea9ca323950df87abf2e13acfa6156664ed8f89e93f6e452f +size 1434272