| #include "cupy_cub.h" |
| #include <cupy/type_dispatcher.cuh> |
|
|
| #ifndef CUPY_USE_HIP |
| #include <cfloat> |
| #include <cub/device/device_reduce.cuh> |
| #include <cub/device/device_segmented_reduce.cuh> |
| #include <cub/device/device_spmv.cuh> |
| #include <cub/device/device_scan.cuh> |
| #include <cub/device/device_histogram.cuh> |
| #include <cub/iterator/counting_input_iterator.cuh> |
| #include <cub/iterator/transform_input_iterator.cuh> |
| #include <cuda/functional> |
| #include <cuda/std/functional> |
| #else |
| #include <hipcub/device/device_reduce.hpp> |
| #include <hipcub/device/device_segmented_reduce.hpp> |
| #include <hipcub/device/device_scan.hpp> |
| #include <hipcub/device/device_histogram.hpp> |
| #include <rocprim/iterator/counting_iterator.hpp> |
| #include <hipcub/iterator/transform_input_iterator.hpp> |
| #endif |
|
|
|
|
| |
| #ifndef CUPY_USE_HIP |
| |
| |
| |
| |
| |
| |
| using namespace cub; |
| #define CUPY_CUB_NAMESPACE cub |
|
|
| template <> |
| struct FpLimits<complex<float>> |
| { |
| static __host__ __device__ __forceinline__ complex<float> Max() { |
| return (complex<float>(FLT_MAX, FLT_MAX)); |
| } |
|
|
| static __host__ __device__ __forceinline__ complex<float> Lowest() { |
| return (complex<float>(FLT_MAX * float(-1), FLT_MAX * float(-1))); |
| } |
| }; |
|
|
| template <> |
| struct FpLimits<complex<double>> |
| { |
| static __host__ __device__ __forceinline__ complex<double> Max() { |
| return (complex<double>(DBL_MAX, DBL_MAX)); |
| } |
|
|
| static __host__ __device__ __forceinline__ complex<double> Lowest() { |
| return (complex<double>(DBL_MAX * double(-1), DBL_MAX * double(-1))); |
| } |
| }; |
|
|
| template <> struct NumericTraits<complex<float>> : BaseTraits<FLOATING_POINT, true, false, unsigned int, complex<float>> {}; |
| template <> struct NumericTraits<complex<double>> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, complex<double>> {}; |
|
|
| |
| namespace std { |
|
|
| template <> |
| class numeric_limits<thrust::complex<float>> { |
| public: |
| static __host__ __device__ thrust::complex<float> infinity() noexcept { |
| return thrust::complex<float>(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity()); |
| } |
|
|
| static constexpr bool has_infinity = true; |
| }; |
|
|
| template <> |
| class numeric_limits<thrust::complex<double>> { |
| public: |
| static __host__ __device__ thrust::complex<double> infinity() noexcept { |
| return thrust::complex<double>(std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity()); |
| } |
|
|
| static constexpr bool has_infinity = true; |
| }; |
|
|
| template <> |
| class numeric_limits<__half> { |
| public: |
| static __host__ __device__ constexpr __half infinity() noexcept { |
| unsigned short inf_half = 0x7C00U; |
| #if (defined(_MSC_VER) && _MSC_VER >= 1920) |
| #if CUDA_VERSION < 11030 |
| |
| union caster { |
| unsigned short u_; |
| __half h_; |
| }; |
| return caster{inf_half}.h_; |
| #else |
| |
| |
| |
| |
| return __builtin_bit_cast(__half, inf_half); |
| #endif |
| #else |
| return *reinterpret_cast<__half*>(&inf_half); |
| #endif |
| } |
|
|
| static constexpr bool has_infinity = true; |
| }; |
|
|
| } |
|
|
|
|
| #else |
|
|
| |
| |
| |
| #define CUPY_CUB_NAMESPACE hipcub |
|
|
| namespace std { |
| template <> |
| class numeric_limits<thrust::complex<float>> { |
| public: |
| static __host__ __device__ thrust::complex<float> max() noexcept { |
| return thrust::complex<float>(std::numeric_limits<float>::max(), std::numeric_limits<float>::max()); |
| } |
|
|
| static __host__ __device__ thrust::complex<float> lowest() noexcept { |
| return thrust::complex<float>(-std::numeric_limits<float>::max(), -std::numeric_limits<float>::max()); |
| } |
|
|
| static __host__ __device__ thrust::complex<float> infinity() noexcept { |
| return thrust::complex<float>(std::numeric_limits<float>::infinity(), std::numeric_limits<float>::infinity()); |
| } |
|
|
| static constexpr bool has_infinity = true; |
| }; |
|
|
| template <> |
| class numeric_limits<thrust::complex<double>> { |
| public: |
| static __host__ __device__ thrust::complex<double> max() noexcept { |
| return thrust::complex<double>(std::numeric_limits<double>::max(), std::numeric_limits<double>::max()); |
| } |
|
|
| static __host__ __device__ thrust::complex<double> lowest() noexcept { |
| return thrust::complex<double>(-std::numeric_limits<double>::max(), -std::numeric_limits<double>::max()); |
| } |
|
|
| static __host__ __device__ thrust::complex<double> infinity() noexcept { |
| return thrust::complex<double>(std::numeric_limits<double>::infinity(), std::numeric_limits<double>::infinity()); |
| } |
|
|
| static constexpr bool has_infinity = true; |
| }; |
|
|
| |
| |
| |
| template <> |
| class numeric_limits<__half> { |
| public: |
| static __host__ __device__ __half max() noexcept { |
| unsigned short max_half = 0x7bff; |
| __half max_value = *reinterpret_cast<__half*>(&max_half); |
| return max_value; |
| } |
|
|
| static __host__ __device__ __half lowest() noexcept { |
| unsigned short lowest_half = 0xfbff; |
| __half lowest_value = *reinterpret_cast<__half*>(&lowest_half); |
| return lowest_value; |
| } |
|
|
| static __host__ __device__ __half infinity() noexcept { |
| unsigned short inf_half = 0x7C00U; |
| __half inf_value = *reinterpret_cast<__half*>(&inf_half); |
| return inf_value; |
| } |
|
|
| static constexpr bool has_infinity = true; |
| }; |
| } |
|
|
| using namespace hipcub; |
|
|
| #endif |
|
|
| __host__ __device__ __half half_negate_inf() { |
| unsigned short minf_half = 0xFC00U; |
| __half* minf_value = reinterpret_cast<__half*>(&minf_half); |
| return *minf_value; |
| } |
|
|
| |
|
|
|
|
| |
| |
| |
|
|
| |
| |
| |
| #ifdef CUPY_USE_HIP |
| struct _multiply |
| { |
| template <typename T> |
| __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const |
| { |
| return a * b; |
| } |
| }; |
| #else |
| using _multiply = cuda::std::multiplies<>; |
| #endif |
|
|
| |
| |
| |
| struct _arange |
| { |
| private: |
| int step_size; |
|
|
| public: |
| __host__ __device__ __forceinline__ _arange(int i): step_size(i) {} |
| __host__ __device__ __forceinline__ int operator()(const int &in) const { |
| return step_size * in; |
| } |
| }; |
|
|
| #ifndef CUPY_USE_HIP |
| typedef TransformInputIterator<int, _arange, CountingInputIterator<int>> seg_offset_itr; |
| #else |
| typedef TransformInputIterator<int, _arange, rocprim::counting_iterator<int>> seg_offset_itr; |
| #endif |
|
|
| |
| |
| |
| |
| |
| |
|
|
| #if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \ |
| && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP)) |
| __host__ __device__ __forceinline__ bool half_isnan(const __half& x) { |
| #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) |
| return __hisnan(x); |
| #else |
| |
| return isnan(__half2float(x)); |
| #endif |
| } |
|
|
| __host__ __device__ __forceinline__ bool half_less(const __half& l, const __half& r) { |
| #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) |
| return l < r; |
| #else |
| |
| return __half2float(l) < __half2float(r); |
| #endif |
| } |
|
|
| __host__ __device__ __forceinline__ bool half_equal(const __half& l, const __half& r) { |
| #if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) |
| return l == r; |
| #else |
| |
| return __half2float(l) == __half2float(r); |
| #endif |
| } |
| #endif |
|
|
| #ifdef CUPY_USE_HIP |
|
|
| |
| |
| |
|
|
| template <typename T> |
| struct select_max { |
| using type = Max; |
| }; |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ float Max::operator()(const float &a, const float &b) const |
| { |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? b : a;} |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ double Max::operator()(const double &a, const double &b) const |
| { |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? b : a;} |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ complex<float> Max::operator()(const complex<float> &a, const complex<float> &b) const |
| { |
| |
| |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? b : a;} |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ complex<double> Max::operator()(const complex<double> &a, const complex<double> &b) const |
| { |
| |
| |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? b : a;} |
| } |
|
|
| #if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \ |
| && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP)) |
| |
| template <> |
| __host__ __device__ __forceinline__ __half Max::operator()(const __half &a, const __half &b) const |
| { |
| |
| if (half_isnan(a)) {return a;} |
| else if (half_isnan(b)) {return b;} |
| else { return half_less(a, b) ? b : a; } |
| } |
| #endif |
|
|
| |
| |
| |
|
|
| template <typename T> |
| struct select_min { |
| using type = Min; |
| }; |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ float Min::operator()(const float &a, const float &b) const |
| { |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? a : b;} |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ double Min::operator()(const double &a, const double &b) const |
| { |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? a : b;} |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ complex<float> Min::operator()(const complex<float> &a, const complex<float> &b) const |
| { |
| |
| |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? a : b;} |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ complex<double> Min::operator()(const complex<double> &a, const complex<double> &b) const |
| { |
| |
| |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? a : b;} |
| } |
|
|
| #if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \ |
| && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP)) |
| |
| template <> |
| __host__ __device__ __forceinline__ __half Min::operator()(const __half &a, const __half &b) const |
| { |
| |
| if (half_isnan(a)) {return a;} |
| else if (half_isnan(b)) {return b;} |
| else { return half_less(a, b) ? a : b; } |
| } |
| #endif |
|
|
| #endif |
|
|
| |
| |
| |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMax::operator()( |
| const KeyValuePair<int, float> &a, |
| const KeyValuePair<int, float> &b) const |
| { |
| if (isnan(a.value)) |
| return a; |
| else if (isnan(b.value)) |
| return b; |
| else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMax::operator()( |
| const KeyValuePair<int, double> &a, |
| const KeyValuePair<int, double> &b) const |
| { |
| if (isnan(a.value)) |
| return a; |
| else if (isnan(b.value)) |
| return b; |
| else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMax::operator()( |
| const KeyValuePair<int, complex<float>> &a, |
| const KeyValuePair<int, complex<float>> &b) const |
| { |
| if (isnan(a.value)) |
| return a; |
| else if (isnan(b.value)) |
| return b; |
| else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMax::operator()( |
| const KeyValuePair<int, complex<double>> &a, |
| const KeyValuePair<int, complex<double>> &b) const |
| { |
| if (isnan(a.value)) |
| return a; |
| else if (isnan(b.value)) |
| return b; |
| else if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
|
|
| #if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \ |
| && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP)) |
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMax::operator()( |
| const KeyValuePair<int, __half> &a, |
| const KeyValuePair<int, __half> &b) const |
| { |
| if (half_isnan(a.value)) |
| return a; |
| else if (half_isnan(b.value)) |
| return b; |
| else if ((half_less(a.value, b.value)) || |
| (half_equal(a.value, b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
| #endif |
|
|
| |
| |
| |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, float> ArgMin::operator()( |
| const KeyValuePair<int, float> &a, |
| const KeyValuePair<int, float> &b) const |
| { |
| if (isnan(a.value)) |
| return a; |
| else if (isnan(b.value)) |
| return b; |
| else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, double> ArgMin::operator()( |
| const KeyValuePair<int, double> &a, |
| const KeyValuePair<int, double> &b) const |
| { |
| if (isnan(a.value)) |
| return a; |
| else if (isnan(b.value)) |
| return b; |
| else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, complex<float>> ArgMin::operator()( |
| const KeyValuePair<int, complex<float>> &a, |
| const KeyValuePair<int, complex<float>> &b) const |
| { |
| if (isnan(a.value)) |
| return a; |
| else if (isnan(b.value)) |
| return b; |
| else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
|
|
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, complex<double>> ArgMin::operator()( |
| const KeyValuePair<int, complex<double>> &a, |
| const KeyValuePair<int, complex<double>> &b) const |
| { |
| if (isnan(a.value)) |
| return a; |
| else if (isnan(b.value)) |
| return b; |
| else if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
| } |
|
|
| #if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \ |
| && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP)) |
| |
| template <> |
| __host__ __device__ __forceinline__ KeyValuePair<int, __half> ArgMin::operator()( |
| const KeyValuePair<int, __half> &a, |
| const KeyValuePair<int, __half> &b) const |
| { |
| if (half_isnan(a.value)) |
| return a; |
| else if (half_isnan(b.value)) |
| return b; |
| else if ((half_less(b.value, a.value)) || |
| (half_equal(a.value, b.value) && (b.key < a.key))) |
| return b; |
| else |
| return a; |
|
|
| } |
| #endif |
|
|
| #ifndef CUPY_USE_HIP |
|
|
| |
| |
| |
|
|
| template <typename T> |
| struct select_max { |
| #if CCCL_VERSION >= 2008000 |
| using type = cuda::maximum<>; |
| #else |
| using type = cub::Max; |
| #endif |
| }; |
|
|
| template <typename T> |
| struct nan_handling_max { |
| __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? b : a;} |
| } |
| }; |
|
|
| template <> |
| struct select_max<float> { |
| using type = nan_handling_max<float>; |
| }; |
|
|
| template <> |
| struct select_max<double> { |
| using type = nan_handling_max<double>; |
| }; |
|
|
| |
| |
| template <> |
| struct select_max<complex<float>> { |
| using type = nan_handling_max<complex<float>>; |
| }; |
| template <> |
| struct select_max<complex<double>> { |
| using type = nan_handling_max<complex<double>>; |
| }; |
|
|
| #if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \ |
| && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP)) |
| template <> |
| struct select_max<__half> { |
| struct type { |
| __host__ __device__ __forceinline__ __half operator()(const __half &a, const __half &b) const { |
| |
| if (half_isnan(a)) {return a;} |
| else if (half_isnan(b)) {return b;} |
| else { return half_less(a, b) ? b : a; } |
| } |
| }; |
| }; |
| #endif |
|
|
| |
| |
| |
| template <typename T> |
| struct select_min { |
| #if CCCL_VERSION >= 2008000 |
| using type = cuda::minimum<>; |
| #else |
| using type = cub::Min; |
| #endif |
| }; |
|
|
| template <typename T> |
| struct nan_handling_min { |
| __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { |
| |
| if (isnan(a)) {return a;} |
| else if (isnan(b)) {return b;} |
| else {return a < b ? a : b;} |
| } |
| }; |
|
|
| template <> |
| struct select_min<float> { |
| using type = nan_handling_min<float>; |
| }; |
|
|
| template <> |
| struct select_min<double> { |
| using type = nan_handling_min<double>; |
| }; |
|
|
| |
| |
| template <> |
| struct select_min<complex<float>> { |
| using type = nan_handling_min<complex<float>>; |
| }; |
| template <> |
| struct select_min<complex<double>> { |
| using type = nan_handling_min<complex<double>>; |
| }; |
|
|
| #if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \ |
| && (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP)) |
| template <> |
| struct select_min<__half> { |
| struct type { |
| __host__ __device__ __forceinline__ __half operator()(const __half &a, const __half &b) const { |
| |
| if (half_isnan(a)) {return a;} |
| else if (half_isnan(b)) {return b;} |
| else { return half_less(a, b) ? a: b; } |
| } |
| }; |
| }; |
| #endif |
|
|
| #endif |
|
|
| |
|
|
| |
| |
| |
| struct _cub_reduce_sum { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_items, cudaStream_t s) |
| { |
| DeviceReduce::Sum(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<T*>(y), num_items, s); |
| } |
| }; |
|
|
| struct _cub_segmented_reduce_sum { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_segments, seg_offset_itr offset_start, cudaStream_t s) |
| { |
| DeviceSegmentedReduce::Sum(workspace, workspace_size, |
| static_cast<T*>(x), static_cast<T*>(y), num_segments, |
| offset_start, offset_start+1, s); |
| } |
| }; |
|
|
| |
| |
| |
| struct _cub_reduce_prod { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_items, cudaStream_t s) |
| { |
| _multiply product_op; |
| |
| |
| DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<T*>(y), num_items, product_op, static_cast<T>(1.0f), s); |
| } |
| }; |
|
|
| struct _cub_segmented_reduce_prod { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_segments, seg_offset_itr offset_start, cudaStream_t s) |
| { |
| _multiply product_op; |
| |
| |
| DeviceSegmentedReduce::Reduce(workspace, workspace_size, |
| static_cast<T*>(x), static_cast<T*>(y), num_segments, |
| offset_start, offset_start+1, |
| product_op, static_cast<T>(1.0f), s); |
| } |
| }; |
|
|
| |
| |
| |
| struct _cub_reduce_min { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_items, cudaStream_t s) |
| { |
| if constexpr (std::numeric_limits<T>::has_infinity) |
| { |
| DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<T*>(y), num_items, |
| typename select_min<T>::type{}, std::numeric_limits<T>::infinity(), s); |
| } |
| else |
| { |
| DeviceReduce::Min(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<T*>(y), num_items, s); |
| } |
| } |
| }; |
|
|
| struct _cub_segmented_reduce_min { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_segments, seg_offset_itr offset_start, cudaStream_t s) |
| { |
| if constexpr (std::numeric_limits<T>::has_infinity) |
| { |
| DeviceSegmentedReduce::Reduce(workspace, workspace_size, |
| static_cast<T*>(x), static_cast<T*>(y), num_segments, |
| offset_start, offset_start+1, |
| typename select_min<T>::type{}, std::numeric_limits<T>::infinity(), s); |
| } |
| else |
| { |
| DeviceSegmentedReduce::Min(workspace, workspace_size, |
| static_cast<T*>(x), static_cast<T*>(y), num_segments, |
| offset_start, offset_start+1, s); |
| } |
| } |
| }; |
|
|
| |
| |
| |
| struct _cub_reduce_max { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_items, cudaStream_t s) |
| { |
| if constexpr (std::numeric_limits<T>::has_infinity) |
| { |
| |
| if constexpr (std::is_same_v<T, __half>) |
| { |
| DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<T*>(y), num_items, |
| typename select_max<T>::type{}, half_negate_inf(), s); |
| } |
| else |
| { |
| DeviceReduce::Reduce(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<T*>(y), num_items, |
| typename select_max<T>::type{}, -std::numeric_limits<T>::infinity(), s); |
|
|
| } |
| } |
| else |
| { |
| DeviceReduce::Max(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<T*>(y), num_items, s); |
| } |
| } |
| }; |
|
|
| struct _cub_segmented_reduce_max { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_segments, seg_offset_itr offset_start, cudaStream_t s) |
| { |
| if constexpr (std::numeric_limits<T>::has_infinity) |
| { |
| |
| if constexpr (std::is_same_v<T, __half>) |
| { |
| DeviceSegmentedReduce::Reduce(workspace, workspace_size, |
| static_cast<T*>(x), static_cast<T*>(y), num_segments, |
| offset_start, offset_start+1, |
| typename select_max<T>::type{}, half_negate_inf(), s); |
| } |
| else |
| { |
| DeviceSegmentedReduce::Reduce(workspace, workspace_size, |
| static_cast<T*>(x), static_cast<T*>(y), num_segments, |
| offset_start, offset_start+1, |
| typename select_max<T>::type{}, -std::numeric_limits<T>::infinity(), s); |
| } |
| } |
| else |
| { |
| DeviceSegmentedReduce::Max(workspace, workspace_size, |
| static_cast<T*>(x), static_cast<T*>(y), num_segments, |
| offset_start, offset_start+1, s); |
| } |
| } |
| }; |
|
|
| |
| |
| |
| struct _cub_reduce_argmin { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_items, cudaStream_t s) |
| { |
| DeviceReduce::ArgMin(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<KeyValuePair<int, T>*>(y), num_items, s); |
| } |
| }; |
|
|
| |
|
|
| |
| |
| |
| struct _cub_reduce_argmax { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_items, cudaStream_t s) |
| { |
| DeviceReduce::ArgMax(workspace, workspace_size, static_cast<T*>(x), |
| static_cast<KeyValuePair<int, T>*>(y), num_items, s); |
| } |
| }; |
|
|
| |
|
|
| |
| |
| |
| struct _cub_device_spmv { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* values, |
| void* row_offsets, void* column_indices, void* x, void* y, |
| int num_rows, int num_cols, int num_nonzeros, cudaStream_t stream) |
| { |
| #ifndef CUPY_USE_HIP |
| DeviceSpmv::CsrMV(workspace, workspace_size, static_cast<T*>(values), |
| static_cast<int*>(row_offsets), static_cast<int*>(column_indices), |
| static_cast<T*>(x), static_cast<T*>(y), num_rows, num_cols, |
| num_nonzeros, stream); |
| #endif |
| } |
| }; |
|
|
| |
| |
| |
| struct _cub_inclusive_sum { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* input, void* output, |
| int num_items, cudaStream_t s) |
| { |
| DeviceScan::InclusiveSum(workspace, workspace_size, static_cast<T*>(input), |
| static_cast<T*>(output), num_items, s); |
| } |
| }; |
|
|
| |
| |
| |
| struct _cub_inclusive_product { |
| template <typename T> |
| void operator()(void* workspace, size_t& workspace_size, void* input, void* output, |
| int num_items, cudaStream_t s) |
| { |
| _multiply product_op; |
| DeviceScan::InclusiveScan(workspace, workspace_size, static_cast<T*>(input), |
| static_cast<T*>(output), product_op, num_items, s); |
| } |
| }; |
|
|
| |
| |
| |
| struct _cub_histogram_range { |
| template <typename sampleT, |
| typename binT = typename std::conditional<std::is_integral<sampleT>::value, double, sampleT>::type> |
| void operator()(void* workspace, size_t& workspace_size, void* input, void* output, |
| int n_bins, void* bins, size_t n_samples, cudaStream_t s) const |
| { |
| |
| |
| typedef typename std::conditional<(std::is_same<sampleT, complex<float>>::value || std::is_same<sampleT, complex<double>>::value), |
| double, |
| sampleT>::type h_sampleT; |
| typedef typename std::conditional<(std::is_same<binT, complex<float>>::value || std::is_same<binT, complex<double>>::value), |
| double, |
| binT>::type h_binT; |
|
|
| |
| |
| |
| |
|
|
| |
| int num_samples = n_samples; |
| DeviceHistogram::HistogramRange(workspace, workspace_size, static_cast<h_sampleT*>(input), |
| #ifndef CUPY_USE_HIP |
| static_cast<long long*>(output), n_bins, static_cast<h_binT*>(bins), num_samples, s); |
| #else |
| |
| |
| |
| static_cast<unsigned long long*>(output), n_bins, static_cast<h_binT*>(bins), num_samples, s); |
| #endif |
| |
| |
| |
| |
| } |
| }; |
|
|
| |
| |
| |
| struct _cub_histogram_even { |
| template <typename sampleT> |
| void operator()(void* workspace, size_t& workspace_size, void* input, void* output, |
| int& n_bins, int& lower, int& upper, size_t n_samples, cudaStream_t s) const |
| { |
| #ifndef CUPY_USE_HIP |
| |
| typedef typename std::conditional<std::is_integral<sampleT>::value, sampleT, int>::type h_sampleT; |
| int num_samples = n_samples; |
| static_assert(sizeof(long long) == sizeof(intptr_t), "not supported"); |
| DeviceHistogram::HistogramEven(workspace, workspace_size, static_cast<h_sampleT*>(input), |
| static_cast<long long*>(output), n_bins, lower, upper, num_samples, s); |
| #else |
| throw std::runtime_error("HIP is not supported yet"); |
| #endif |
| } |
| }; |
|
|
| |
| |
| |
|
|
| |
|
|
| void cub_device_reduce(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_items, cudaStream_t stream, int op, int dtype_id) |
| { |
| switch(op) { |
| case CUPY_CUB_SUM: return dtype_dispatcher(dtype_id, _cub_reduce_sum(), |
| workspace, workspace_size, x, y, num_items, stream); |
| case CUPY_CUB_MIN: return dtype_dispatcher(dtype_id, _cub_reduce_min(), |
| workspace, workspace_size, x, y, num_items, stream); |
| case CUPY_CUB_MAX: return dtype_dispatcher(dtype_id, _cub_reduce_max(), |
| workspace, workspace_size, x, y, num_items, stream); |
| case CUPY_CUB_ARGMIN: return dtype_dispatcher(dtype_id, _cub_reduce_argmin(), |
| workspace, workspace_size, x, y, num_items, stream); |
| case CUPY_CUB_ARGMAX: return dtype_dispatcher(dtype_id, _cub_reduce_argmax(), |
| workspace, workspace_size, x, y, num_items, stream); |
| case CUPY_CUB_PROD: return dtype_dispatcher(dtype_id, _cub_reduce_prod(), |
| workspace, workspace_size, x, y, num_items, stream); |
| default: throw std::runtime_error("Unsupported operation"); |
| } |
| } |
|
|
| size_t cub_device_reduce_get_workspace_size(void* x, void* y, int num_items, |
| cudaStream_t stream, int op, int dtype_id) |
| { |
| size_t workspace_size = 0; |
| cub_device_reduce(NULL, workspace_size, x, y, num_items, stream, |
| op, dtype_id); |
| return workspace_size; |
| } |
|
|
| |
|
|
| void cub_device_segmented_reduce(void* workspace, size_t& workspace_size, |
| void* x, void* y, int num_segments, int segment_size, |
| cudaStream_t stream, int op, int dtype_id) |
| { |
| |
| |
| #ifndef CUPY_USE_HIP |
| CountingInputIterator<int> count_itr(0); |
| #else |
| rocprim::counting_iterator<int> count_itr(0); |
| #endif |
| _arange scaling(segment_size); |
| seg_offset_itr itr(count_itr, scaling); |
|
|
| switch(op) { |
| case CUPY_CUB_SUM: |
| return dtype_dispatcher(dtype_id, _cub_segmented_reduce_sum(), |
| workspace, workspace_size, x, y, num_segments, itr, stream); |
| case CUPY_CUB_MIN: |
| return dtype_dispatcher(dtype_id, _cub_segmented_reduce_min(), |
| workspace, workspace_size, x, y, num_segments, itr, stream); |
| case CUPY_CUB_MAX: |
| return dtype_dispatcher(dtype_id, _cub_segmented_reduce_max(), |
| workspace, workspace_size, x, y, num_segments, itr, stream); |
| case CUPY_CUB_PROD: |
| return dtype_dispatcher(dtype_id, _cub_segmented_reduce_prod(), |
| workspace, workspace_size, x, y, num_segments, itr, stream); |
| default: |
| throw std::runtime_error("Unsupported operation"); |
| } |
| } |
|
|
| size_t cub_device_segmented_reduce_get_workspace_size(void* x, void* y, |
| int num_segments, int segment_size, |
| cudaStream_t stream, int op, int dtype_id) |
| { |
| size_t workspace_size = 0; |
| cub_device_segmented_reduce(NULL, workspace_size, x, y, |
| num_segments, segment_size, stream, |
| op, dtype_id); |
| return workspace_size; |
| } |
|
|
| |
|
|
| void cub_device_spmv(void* workspace, size_t& workspace_size, void* values, |
| void* row_offsets, void* column_indices, void* x, void* y, int num_rows, |
| int num_cols, int num_nonzeros, cudaStream_t stream, |
| int dtype_id) |
| { |
| #ifndef CUPY_USE_HIP |
| return dtype_dispatcher(dtype_id, _cub_device_spmv(), |
| workspace, workspace_size, values, row_offsets, |
| column_indices, x, y, num_rows, num_cols, |
| num_nonzeros, stream); |
| #endif |
| } |
|
|
| size_t cub_device_spmv_get_workspace_size(void* values, void* row_offsets, |
| void* column_indices, void* x, void* y, int num_rows, int num_cols, |
| int num_nonzeros, cudaStream_t stream, int dtype_id) |
| { |
| size_t workspace_size = 0; |
| #ifndef CUPY_USE_HIP |
| cub_device_spmv(NULL, workspace_size, values, row_offsets, column_indices, |
| x, y, num_rows, num_cols, num_nonzeros, stream, dtype_id); |
| #endif |
| return workspace_size; |
| } |
|
|
| |
|
|
| void cub_device_scan(void* workspace, size_t& workspace_size, void* x, void* y, |
| int num_items, cudaStream_t stream, int op, int dtype_id) |
| { |
| switch(op) { |
| case CUPY_CUB_CUMSUM: |
| return dtype_dispatcher(dtype_id, _cub_inclusive_sum(), |
| workspace, workspace_size, x, y, num_items, stream); |
| case CUPY_CUB_CUMPROD: |
| return dtype_dispatcher(dtype_id, _cub_inclusive_product(), |
| workspace, workspace_size, x, y, num_items, stream); |
| default: |
| throw std::runtime_error("Unsupported operation"); |
| } |
| } |
|
|
| size_t cub_device_scan_get_workspace_size(void* x, void* y, int num_items, |
| cudaStream_t stream, int op, int dtype_id) |
| { |
| size_t workspace_size = 0; |
| cub_device_scan(NULL, workspace_size, x, y, num_items, stream, |
| op, dtype_id); |
| return workspace_size; |
| } |
|
|
| |
|
|
| void cub_device_histogram_range(void* workspace, size_t& workspace_size, void* x, void* y, |
| int n_bins, void* bins, size_t n_samples, cudaStream_t stream, int dtype_id) |
| { |
| |
| if (dtype_id == CUPY_TYPE_COMPLEX64 || dtype_id == CUPY_TYPE_COMPLEX128) { |
| throw std::runtime_error("complex dtype is not yet supported"); |
| } |
|
|
| |
| return dtype_dispatcher(dtype_id, _cub_histogram_range(), |
| workspace, workspace_size, x, y, n_bins, bins, n_samples, stream); |
| } |
|
|
| size_t cub_device_histogram_range_get_workspace_size(void* x, void* y, int n_bins, |
| void* bins, size_t n_samples, cudaStream_t stream, int dtype_id) |
| { |
| size_t workspace_size = 0; |
| cub_device_histogram_range(NULL, workspace_size, x, y, n_bins, bins, n_samples, |
| stream, dtype_id); |
| return workspace_size; |
| } |
|
|
| void cub_device_histogram_even(void* workspace, size_t& workspace_size, void* x, void* y, |
| int n_bins, int lower, int upper, size_t n_samples, cudaStream_t stream, int dtype_id) |
| { |
| #ifndef CUPY_USE_HIP |
| return dtype_dispatcher(dtype_id, _cub_histogram_even(), |
| workspace, workspace_size, x, y, n_bins, lower, upper, n_samples, stream); |
| #endif |
| } |
|
|
| size_t cub_device_histogram_even_get_workspace_size(void* x, void* y, int n_bins, |
| int lower, int upper, size_t n_samples, cudaStream_t stream, int dtype_id) |
| { |
| size_t workspace_size = 0; |
| cub_device_histogram_even(NULL, workspace_size, x, y, n_bins, lower, upper, n_samples, |
| stream, dtype_id); |
| return workspace_size; |
| } |
|
|