repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/mdarray.hpp | /*
* Copyright (2019) Sandia Corporation
*
* The source code is licensed under the 3-clause BSD license found in the LICENSE file
* thirdparty/LICENSES/mdarray.license
*/
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stddef.h>
#include <raft/core/detail/macros.hpp>
#include <raft/core/host_device_accessor.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/memory_type.hpp>
#include <raft/core/resources.hpp>
namespace raft {
/**
* @defgroup mdarray_apis multi-dimensional memory-owning type
* @{
*/
/**
* @brief Interface to implement an owning multi-dimensional array
*
* raft::array_interace is an interface to owning container types for mdspan.
* Check implementation of raft::mdarray which implements raft::array_interface
* using Curiously Recurring Template Pattern.
* This interface calls into method `view()` whose implementation is provided by
* the implementing class. `view()` must return an object of type raft::host_mdspan
* or raft::device_mdspan or any types derived from the them.
*/
template <typename Base>
class array_interface {
/**
* @brief Get an mdspan
*/
auto view() noexcept { return static_cast<Base*>(this)->view(); }
/**
* @brief Get an mdspan<const T>
*/
auto view() const noexcept { return static_cast<Base*>(this)->view(); }
};
namespace detail {
template <typename T, typename = void>
struct is_array_interface : std::false_type {};
template <typename T>
struct is_array_interface<T, std::void_t<decltype(std::declval<T>().view())>>
: std::bool_constant<is_mdspan_v<decltype(std::declval<T>().view())>> {};
template <typename T>
using is_array_interface_t = is_array_interface<std::remove_const_t<T>>;
/**
* @\brief Boolean to determine if template type T is raft::array_interface or derived type
* or any type that has a member function `view()` that returns either
* raft::host_mdspan or raft::device_mdspan
*/
template <typename T>
inline constexpr bool is_array_interface_v = is_array_interface<std::remove_const_t<T>>::value;
} // namespace detail
template <typename...>
struct is_array_interface : std::true_type {};
template <typename T1>
struct is_array_interface<T1> : detail::is_array_interface_t<T1> {};
template <typename T1, typename... Tn>
struct is_array_interface<T1, Tn...> : std::conditional_t<detail::is_array_interface_v<T1>,
is_array_interface<Tn...>,
std::false_type> {};
/**
* @\brief Boolean to determine if variadic template types Tn are raft::array_interface
* or derived type or any type that has a member function `view()` that returns either
* raft::host_mdspan or raft::device_mdspan
*/
template <typename... Tn>
inline constexpr bool is_array_interface_v = is_array_interface<Tn...>::value;
/**
* @brief Modified from the c++ mdarray proposal
*
* https://isocpp.org/files/papers/D1684R0.html
*
* mdarray is a container type for mdspan with similar template arguments. However there
* are some inconsistencies in between them. We have made some modificiations to fit our
* needs, which are listed below.
*
* - Layout policy is different, the mdarray in raft uses `std::experimental::extent` directly just
* like `mdspan`, while the `mdarray` in the reference implementation uses varidic
* template.
*
* - Most of the constructors from the reference implementation is removed to make sure
* CUDA stream is honored. Note that this class is not coupled to CUDA and therefore
* will only be used in the case where the device variant is used.
*
* - unique_size is not implemented, which is still working in progress in the proposal
*
* - For container policy, we adopt the alternative approach documented in the proposal
* [sec 2.4.3], which requires an additional make_accessor method for it to be used in
* mdspan. The container policy reference implementation has multiple `access` methods
* that accommodate needs for both mdarray and mdspan. This is more difficult for us
* since the policy might contain states that are unwanted inside a CUDA kernel. Also,
* on host we return a proxy to the actual value as `device_ref` so different access
* methods will have different return type, which is less desirable.
*
* - For the above reasons, copying from other mdarray with different policy type is also
* removed.
*/
template <typename ElementType, typename Extents, typename LayoutPolicy, typename ContainerPolicy>
class mdarray
: public array_interface<mdarray<ElementType, Extents, LayoutPolicy, ContainerPolicy>> {
static_assert(!std::is_const<ElementType>::value,
"Element type for container must not be const.");
public:
using extents_type = Extents;
using layout_type = LayoutPolicy;
using mapping_type = typename layout_type::template mapping<extents_type>;
using element_type = ElementType;
using value_type = std::remove_cv_t<element_type>;
using index_type = typename extents_type::index_type;
using difference_type = std::ptrdiff_t;
using rank_type = typename extents_type::rank_type;
// Naming: ref impl: container_policy_type, proposal: container_policy
using container_policy_type = ContainerPolicy;
using container_type = typename container_policy_type::container_type;
using pointer = typename container_policy_type::pointer;
using const_pointer = typename container_policy_type::const_pointer;
using reference = typename container_policy_type::reference;
using const_reference = typename container_policy_type::const_reference;
private:
template <typename E,
typename ViewAccessorPolicy =
std::conditional_t<std::is_const_v<E>,
typename container_policy_type::const_accessor_policy,
typename container_policy_type::accessor_policy>>
using view_type_impl =
mdspan<E,
extents_type,
layout_type,
host_device_accessor<ViewAccessorPolicy, container_policy_type::mem_type>>;
public:
/**
* \brief the mdspan type returned by view method.
*/
using view_type = view_type_impl<element_type>;
using const_view_type = view_type_impl<element_type const>;
public:
constexpr mdarray(raft::resources const& handle) noexcept(
std::is_nothrow_default_constructible_v<container_type>)
: cp_{}, c_{cp_.create(handle, 0)} {};
constexpr mdarray(mdarray const&) noexcept(std::is_nothrow_copy_constructible_v<container_type>) =
default;
constexpr mdarray(mdarray&&) noexcept(std::is_nothrow_move_constructible<container_type>::value) =
default;
constexpr auto operator=(mdarray const&) noexcept(
std::is_nothrow_copy_assignable<container_type>::value) -> mdarray& = default;
constexpr auto operator=(mdarray&&) noexcept(
std::is_nothrow_move_assignable<container_type>::value) -> mdarray& = default;
~mdarray() noexcept(std::is_nothrow_destructible<container_type>::value) = default;
#ifndef RAFT_MDARRAY_CTOR_CONSTEXPR
#if !(__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ <= 2)
// 11.0:
// Error: Internal Compiler Error (codegen): "there was an error in verifying the lgenfe output!"
//
// 11.2:
// Call parameter type does not match function signature!
// i8** null
// i8* %call14 = call i32 null(void (i8*)* null, i8* null, i8** null), !dbg !1060
// <unnamed>: parse Invalid record (Producer: 'LLVM7.0.1' Reader: 'LLVM 7.0.1')
#define RAFT_MDARRAY_CTOR_CONSTEXPR constexpr
#else
#define RAFT_MDARRAY_CTOR_CONSTEXPR
#endif // !(__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ <= 2)
#endif // RAFT_MDARRAY_CTOR_CONSTEXPR
/**
* @brief The only constructor that can create storage, raft::resources is accepted
* so that the device implementation can make sure the relevant CUDA stream is
* being used for allocation.
*/
RAFT_MDARRAY_CTOR_CONSTEXPR mdarray(raft::resources const& handle,
mapping_type const& m,
container_policy_type const& cp)
: cp_(cp), map_(m), c_(cp_.create(handle, map_.required_span_size()))
{
}
RAFT_MDARRAY_CTOR_CONSTEXPR mdarray(raft::resources const& handle,
mapping_type const& m,
container_policy_type& cp)
: cp_(cp), map_(m), c_(cp_.create(handle, map_.required_span_size()))
{
}
#undef RAFT_MDARRAY_CTOR_CONSTEXPR
/**
* @brief Get an mdspan
*/
auto view() noexcept { return view_type(c_.data(), map_, cp_.make_accessor_policy()); }
/**
* @brief Get an mdspan<const T>
*/
auto view() const noexcept
{
return const_view_type(c_.data(), map_, cp_.make_accessor_policy());
}
[[nodiscard]] constexpr auto size() const noexcept -> std::size_t { return this->view().size(); }
[[nodiscard]] auto data_handle() noexcept -> pointer { return c_.data(); }
[[nodiscard]] constexpr auto data_handle() const noexcept -> const_pointer { return c_.data(); }
/**
* @brief Indexing operator, use it sparingly since it triggers a device<->host copy.
*/
template <typename... IndexType>
auto operator()(IndexType&&... indices)
-> std::enable_if_t<sizeof...(IndexType) == extents_type::rank() &&
(std::is_convertible_v<IndexType, index_type> && ...) &&
std::is_constructible_v<extents_type, IndexType...>,
/* device policy is not default constructible due to requirement for CUDA
stream. */
/* std::is_default_constructible_v<container_policy_type> */
reference>
{
return cp_.access(c_, map_(std::forward<IndexType>(indices)...));
}
/**
* @brief Indexing operator, use it sparingly since it triggers a device<->host copy.
*/
template <typename... IndexType>
auto operator()(IndexType&&... indices) const
-> std::enable_if_t<sizeof...(IndexType) == extents_type::rank() &&
(std::is_convertible_v<IndexType, index_type> && ...) &&
std::is_constructible_v<extents_type, IndexType...> &&
std::is_constructible<mapping_type, extents_type>::value,
/* device policy is not default constructible due to requirement for CUDA
stream. */
/* std::is_default_constructible_v<container_policy_type> */
const_reference>
{
return cp_.access(c_, map_(std::forward<IndexType>(indices)...));
}
// basic_mdarray observers of the domain multidimensional index space (also in basic_mdspan)
[[nodiscard]] RAFT_INLINE_FUNCTION static constexpr auto rank() noexcept -> rank_type
{
return extents_type::rank();
}
[[nodiscard]] RAFT_INLINE_FUNCTION static constexpr auto rank_dynamic() noexcept -> rank_type
{
return extents_type::rank_dynamic();
}
[[nodiscard]] RAFT_INLINE_FUNCTION static constexpr auto static_extent(size_t r) noexcept
-> index_type
{
return extents_type::static_extent(r);
}
[[nodiscard]] RAFT_INLINE_FUNCTION constexpr auto extents() const noexcept -> extents_type
{
return map_.extents();
}
/**
* @brief the extent of rank r
*/
[[nodiscard]] RAFT_INLINE_FUNCTION constexpr auto extent(size_t r) const noexcept -> index_type
{
return map_.extents().extent(r);
}
// mapping
[[nodiscard]] RAFT_INLINE_FUNCTION constexpr auto mapping() const noexcept -> mapping_type
{
return map_;
}
[[nodiscard]] RAFT_INLINE_FUNCTION constexpr auto is_unique() const noexcept -> bool
{
return map_.is_unique();
}
[[nodiscard]] RAFT_INLINE_FUNCTION constexpr auto is_exhaustive() const noexcept -> bool
{
return map_.is_exhaustive();
}
[[nodiscard]] RAFT_INLINE_FUNCTION constexpr auto is_strided() const noexcept -> bool
{
return map_.is_strided();
}
[[nodiscard]] RAFT_INLINE_FUNCTION constexpr auto stride(size_t r) const -> index_type
{
return map_.stride(r);
}
[[nodiscard]] RAFT_INLINE_FUNCTION static constexpr auto is_always_unique() noexcept -> bool
{
return mapping_type::is_always_unique();
}
[[nodiscard]] RAFT_INLINE_FUNCTION static constexpr auto is_always_exhaustive() noexcept -> bool
{
return mapping_type::is_always_exhaustive();
}
[[nodiscard]] RAFT_INLINE_FUNCTION static constexpr auto is_always_strided() noexcept -> bool
{
return mapping_type::is_always_strided();
}
private:
template <typename, typename, typename, typename>
friend class mdarray;
private:
container_policy_type cp_;
mapping_type map_;
container_type c_;
};
/** @} */
/**
* @defgroup mdarray_reshape Row- or Col-norm computation
* @{
*/
/**
* @brief Flatten object implementing raft::array_interface into a 1-dim array view
*
* @tparam array_interface_type Expected type implementing raft::array_interface
* @param mda raft::array_interace implementing object
* @return Either raft::host_mdspan or raft::device_mdspan with vector_extent
* depending on the underlying ContainerPolicy
*/
template <typename array_interface_type,
std::enable_if_t<is_array_interface_v<array_interface_type>>* = nullptr>
auto flatten(const array_interface_type& mda)
{
return flatten(mda.view());
}
/**
* @brief Reshape object implementing raft::array_interface
*
* @tparam array_interface_type Expected type implementing raft::array_interface
* @tparam Extents raft::extents for dimensions
* @tparam IndexType the index type of the extents
* @param mda raft::array_interace implementing object
* @param new_shape Desired new shape of the input
* @return raft::host_mdspan or raft::device_mdspan, depending on the underlying
* ContainerPolicy
*/
template <typename array_interface_type,
typename IndexType = std::uint32_t,
size_t... Extents,
std::enable_if_t<is_array_interface_v<array_interface_type>>* = nullptr>
auto reshape(const array_interface_type& mda, extents<IndexType, Extents...> new_shape)
{
return reshape(mda.view(), new_shape);
}
/** @} */
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/host_csr_matrix.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/csr_matrix.hpp>
#include <raft/core/host_container_policy.hpp>
#include <raft/core/host_span.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/sparse_types.hpp>
#include <type_traits>
namespace raft {
/**
* \defgroup host_csr_matrix Host CSR Matrix
* @{
*/
/**
* Specialization for a sparsity-preserving compressed structure view which uses host memory
*/
template <typename IndptrType, typename IndicesType, typename NZType>
using host_compressed_structure_view =
compressed_structure_view<IndptrType, IndicesType, NZType, false>;
/**
* Specialization for a sparsity-owning compressed structure which uses host memory
*/
template <typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T> typename ContainerPolicy = host_vector_policy>
using host_compressed_structure =
compressed_structure<IndptrType, IndicesType, NZType, false, ContainerPolicy>;
/**
* Specialization for a csr matrix view which uses host memory
*/
template <typename ElementType, typename IndptrType, typename IndicesType, typename NZType>
using host_csr_matrix_view = csr_matrix_view<ElementType, IndptrType, IndicesType, NZType, false>;
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T> typename ContainerPolicy = host_vector_policy,
SparsityType sparsity_type = SparsityType::OWNING>
using host_csr_matrix =
csr_matrix<ElementType, IndptrType, IndicesType, NZType, false, ContainerPolicy, sparsity_type>;
/**
* Specialization for a sparsity-owning csr matrix which uses host memory
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T> typename ContainerPolicy = host_vector_policy>
using host_sparsity_owning_csr_matrix =
csr_matrix<ElementType, IndptrType, IndicesType, NZType, false, ContainerPolicy>;
/**
* Specialization for a sparsity-preserving csr matrix which uses host memory
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T> typename ContainerPolicy = host_vector_policy>
using host_sparsity_preserving_csr_matrix = csr_matrix<ElementType,
IndptrType,
IndicesType,
NZType,
false,
ContainerPolicy,
SparsityType::PRESERVING>;
template <typename T>
struct is_host_csr_matrix_view : std::false_type {};
template <typename ElementType, typename IndptrType, typename IndicesType, typename NZType>
struct is_host_csr_matrix_view<host_csr_matrix_view<ElementType, IndptrType, IndicesType, NZType>>
: std::true_type {};
template <typename T>
constexpr bool is_host_csr_matrix_view_v = is_host_csr_matrix_view<T>::value;
template <typename T>
struct is_host_csr_matrix : std::false_type {};
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T>
typename ContainerPolicy,
SparsityType sparsity_type>
struct is_host_csr_matrix<
host_csr_matrix<ElementType, IndptrType, IndicesType, NZType, ContainerPolicy, sparsity_type>>
: std::true_type {};
template <typename T>
constexpr bool is_host_csr_matrix_v = is_host_csr_matrix<T>::value;
template <typename T>
constexpr bool is_host_csr_sparsity_owning_v =
is_host_csr_matrix<T>::value and T::get_sparsity_type() == OWNING;
template <typename T>
constexpr bool is_host_csr_sparsity_preserving_v = std::disjunction_v<
is_host_csr_matrix_view<T>,
std::bool_constant<is_host_csr_matrix<T>::value and T::get_sparsity_type() == PRESERVING>>;
/**
* Create a sparsity-owning sparse matrix in the compressed-sparse row format. sparsity-owning
* means that all of the underlying vectors (data, indptr, indices) are owned by the csr_matrix
* instance. If not known up front, the sparsity can be ignored in this factory function and
* `resize()` invoked on the instance once the sparsity is known.
*
* @code{.cpp}
* #include <raft/core/host_resources.hpp>
* #include <raft/core/host_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
*
* raft::resources handle;
* csr_matrix = raft::make_host_csr_matrix(handle, n_rows, n_cols);
* ...
* // compute expected sparsity
* ...
* int nnz = 5000;
* csr_matrix.initialize_sparsity(nnz);
* @endcode
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] handle raft handle for managing expensive resources
* @param[in] n_rows total number of rows in the matrix
* @param[in] n_cols total number of columns in the matrix
* @param[in] nnz number of non-zeros in the matrix if known [optional]
* @return a sparsity-owning sparse matrix in compressed (csr) format
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType = uint64_t>
auto make_host_csr_matrix(raft::resources const& handle,
IndptrType n_rows,
IndicesType n_cols,
NZType nnz = 0)
{
return host_sparsity_owning_csr_matrix<ElementType, IndptrType, IndicesType, NZType>(
handle, n_rows, n_cols, nnz);
}
/**
* Create a sparsity-preserving sparse matrix in the compressed-sparse row format.
* sparsity-preserving means that a view of the csr sparsity is supplied, allowing the values in
* the sparsity to change but not the sparsity itself. The csr_matrix instance does not own the
* sparsity, the sparsity must be known up front, and cannot be resized later.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
*
* raft::resources handle;
* coo_structure = raft::make_host_compressed_structure(handle, n_rows, n_cols);
* ...
* // compute expected sparsity
* ...
* csr_structure.initialize_sparsity(nnz);
* csr_matrix = raft::make_host_csr_matrix(handle, csr_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] handle raft handle for managing expensive resources
* @param[in] structure a sparsity-preserving compressed structural view
* @return a sparsity-preserving sparse matrix in compressed (csr) format
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType = uint64_t>
auto make_host_csr_matrix(raft::resources const& handle,
host_compressed_structure_view<IndptrType, IndicesType, NZType> structure)
{
return host_sparsity_preserving_csr_matrix<ElementType, IndptrType, IndicesType, NZType>(
handle, structure);
}
/**
* Create a non-owning sparse matrix view in the coordinate format. This is sparsity-preserving,
* meaning that the underlying sparsity is known and cannot be changed. Use the sparsity-owning
* coo_matrix if sparsity needs to be mutable.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following pointer is assumed to reference device memory for a size of nnz
* float* h_elm_ptr = ...;
*
* raft::resources handle;
* csr_structure = raft::make_host_compressed_structure(handle, n_rows, n_cols, nnz);
* csr_matrix_view = raft::make_host_csr_matrix_view(handle, h_elm_ptr, csr_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] ptr a pointer to array of nonzero matrix elements on host (size nnz)
* @param[in] structure a sparsity-preserving compressed sparse structural view
* @return a sparsity-preserving csr matrix view
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType = uint64_t>
auto make_host_csr_matrix_view(
ElementType* ptr, host_compressed_structure_view<IndptrType, IndicesType, NZType> structure)
{
return host_csr_matrix_view<ElementType, IndptrType, IndicesType, NZType>(
raft::host_span<ElementType>(ptr, structure.get_nnz()), structure);
}
/**
* Create a non-owning sparse matrix view in the compressed-sparse row format. This is
* sparsity-preserving, meaning that the underlying sparsity is known and cannot be changed. Use the
* sparsity-owning coo_matrix if sparsity needs to be mutable.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_span.hpp>
* #include <raft/core/host_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following span is assumed to be of size nnz
* raft::host_span<float> h_elm_ptr;
*
* raft::resources handle;
* csr_structure = raft::make_host_compressed_structure(handle, n_rows, n_cols, nnz);
* csr_matrix_view = raft::make_host_csr_matrix_view(handle, h_elm_ptr, csr_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] elements host span containing array of matrix elements (size nnz)
* @param[in] structure a sparsity-preserving structural view
* @return a sparsity-preserving csr matrix view
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType = uint64_t>
auto make_host_csr_matrix_view(
raft::host_span<ElementType> elements,
host_compressed_structure_view<IndptrType, IndicesType, NZType> structure)
{
RAFT_EXPECTS(elements.size() == structure.get_nnz(),
"Size of elements must be equal to the nnz from the structure");
return host_csr_matrix_view<ElementType, IndptrType, IndicesType, NZType>(elements, structure);
}
/**
* Create a sparsity-owning compressed structure. This is not sparsity-preserving, meaning that
* the underlying sparsity does not need to be known upon construction. When not known up front,
* the allocation of the underlying indices array is delayed until `resize(nnz)` is invoked.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* raft::resources handle;
* csr_structure = raft::make_host_compressed_structure(handle, n_rows, n_cols, nnz);
* ...
* // compute expected sparsity
* ...
* csr_structure.initialize_sparsity(nnz);
* @endcode *
*
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] handle raft handle for managing expensive resources
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of cols
* @param[in] nnz total number of nonzeros, if known
* @return a sparsity-owning compressed structure instance
*/
template <typename IndptrType, typename IndicesType, typename NZType = uint64_t>
auto make_host_compressed_structure(raft::resources const& handle,
IndptrType n_rows,
IndicesType n_cols,
NZType nnz = 0)
{
return host_compressed_structure<IndptrType, IndicesType, NZType>(handle, n_rows, n_cols, nnz);
}
/**
* Create a non-owning sparsity-preserved compressed structure view. Sparsity-preserving means that
* the underlying sparsity is known and cannot be changed. Use the sparsity-owning version if the
* sparsity is not known up front.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following pointer is assumed to reference host-accessible memory of size n_rows+1
* int *indptr = ...;
*
* // The following pointer is assumed to reference host-accessible memory of size nnz
* int *indices = ...;
*
* raft::resources handle;
* csr_structure = raft::make_host_compressed_structure_view(handle, indptr, indices, n_rows,
* n_cols, nnz);
* @endcode
*
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] indptr structural indptr (size n_rows+1)
* @param[in] indices structural indices (size nnz)
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of columns
* @param[in] nnz number of non-zeros
* @return a sparsity-preserving compressed structural view
*/
template <typename IndptrType, typename IndicesType, typename NZType = uint64_t>
auto make_host_compressed_structure_view(
IndptrType* indptr, IndicesType* indices, IndptrType n_rows, IndicesType n_cols, NZType nnz)
{
return host_compressed_structure_view<IndptrType, IndicesType, NZType>(
raft::host_span<IndptrType>(indptr, n_rows + 1),
raft::host_span<IndicesType>(indices, nnz),
n_cols);
}
/**
* Create a non-owning sparsity-preserved compressed structure view. Sparsity-preserving means that
* the underlying sparsity is known and cannot be changed. Use the sparsity-owning version if the
* sparsity is not known up front.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following host span is assumed to be of size n_rows+1
* raft::host_span<int> indptr;
*
* // The following host span is assumed to be of size nnz
* raft::host_span<int> indices;
*
* raft::resources handle;
* csr_structure = raft::make_host_compressed_structure_view(handle, indptr, indices, n_rows,
* n_cols);
* @endcode
*
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] indptr structural indptr (size n_rows+1)
* @param[in] indices structural indices (size nnz)
* @param[in] n_cols total number of columns
* @return a sparsity-preserving compressed structural view
*
*/
template <typename IndptrType, typename IndicesType, typename NZType = uint64_t>
auto make_host_compressed_structure_view(raft::host_span<IndptrType> indptr,
raft::host_span<IndicesType> indices,
IndicesType n_cols)
{
return host_compressed_structure_view<IndptrType, IndicesType, NZType>(indptr, indices, n_cols);
}
/** @} */
}; // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/host_mdarray.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/host_container_policy.hpp>
#include <raft/core/mdarray.hpp>
namespace raft {
/**
* @brief mdarray with host container policy
* @tparam ElementType the data type of the elements
* @tparam Extents defines the shape
* @tparam LayoutPolicy policy for indexing strides and layout ordering
* @tparam ContainerPolicy storage and accessor policy
*/
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename ContainerPolicy = host_vector_policy<ElementType>>
using host_mdarray = mdarray<ElementType, Extents, LayoutPolicy, host_accessor<ContainerPolicy>>;
/**
* @brief Shorthand for 0-dim host mdarray (scalar).
* @tparam ElementType the data type of the scalar element
* @tparam IndexType the index type of the extents
*/
template <typename ElementType, typename IndexType = std::uint32_t>
using host_scalar = host_mdarray<ElementType, scalar_extent<IndexType>>;
/**
* @brief Shorthand for 1-dim host mdarray.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
using host_vector = host_mdarray<ElementType, vector_extent<IndexType>, LayoutPolicy>;
/**
* @brief Shorthand for c-contiguous host matrix.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
using host_matrix = host_mdarray<ElementType, matrix_extent<IndexType>, LayoutPolicy>;
/**
* @defgroup host_mdarray_factories factories to create host mdarrays
* @{
*/
/**
* @brief Create a host mdarray.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] res raft handle for managing expensive resources
* @param[in] exts dimensionality of the array (series of integers)
* @return raft::host_mdarray
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
size_t... Extents>
auto make_host_mdarray(raft::resources& res, extents<IndexType, Extents...> exts)
{
using mdarray_t = host_mdarray<ElementType, decltype(exts), LayoutPolicy>;
typename mdarray_t::mapping_type layout{exts};
typename mdarray_t::container_policy_type policy;
return mdarray_t{res, layout, policy};
}
/**
* @}
*/
/**
* @brief Create a host mdarray.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param exts dimensionality of the array (series of integers)
* Note: This function is deprecated and will be removed in a future version. Please use version
* that accepts raft::resources.
*
* @return raft::host_mdarray
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
size_t... Extents>
auto make_host_mdarray(extents<IndexType, Extents...> exts)
{
using mdarray_t = host_mdarray<ElementType, decltype(exts), LayoutPolicy>;
typename mdarray_t::mapping_type layout{exts};
typename mdarray_t::container_policy_type policy;
raft::resources res;
return mdarray_t{res, layout, policy};
}
/**
* @ingroup host_mdarray_factories
* @brief Create a 2-dim c-contiguous host mdarray.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] res raft handle for managing expensive resources
* @param[in] n_rows number or rows in matrix
* @param[in] n_cols number of columns in matrix
* @return raft::host_matrix
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_host_matrix(raft::resources& res, IndexType n_rows, IndexType n_cols)
{
return make_host_mdarray<ElementType, IndexType, LayoutPolicy>(
res, make_extents<IndexType>(n_rows, n_cols));
}
/**
* @brief Create a 2-dim c-contiguous host mdarray.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] n_rows number or rows in matrix
* @param[in] n_cols number of columns in matrix
* Note: This function is deprecated and will be removed in a future version. Please use version
* that accepts raft::resources.
*
* @return raft::host_matrix
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_host_matrix(IndexType n_rows, IndexType n_cols)
{
return make_host_mdarray<ElementType, IndexType, LayoutPolicy>(
make_extents<IndexType>(n_rows, n_cols));
}
/**
* @ingroup host_mdarray_factories
* @brief Create a host scalar from v.
*
* @tparam ElementType the data type of the scalar element
* @tparam IndexType the index type of the extents
* @param[in] res raft handle for managing expensive resources
* @param[in] v scalar type to wrap
* @return raft::host_scalar
*/
template <typename ElementType, typename IndexType = std::uint32_t>
auto make_host_scalar(raft::resources& res, ElementType const& v)
{
// FIXME(jiamingy): We can optimize this by using std::array as container policy, which
// requires some more compile time dispatching. This is enabled in the ref impl but
// hasn't been ported here yet.
scalar_extent<IndexType> extents;
using policy_t = typename host_scalar<ElementType>::container_policy_type;
policy_t policy;
auto scalar = host_scalar<ElementType>{res, extents, policy};
scalar(0) = v;
return scalar;
}
/**
* @brief Create a host scalar from v.
*
* @tparam ElementType the data type of the scalar element
* @tparam IndexType the index type of the extents
* @param[in] v scalar type to wrap
* Note: This function is deprecated and will be removed in a future version. Please use version
* that accepts raft::resources.
*
* @return raft::host_scalar
*/
template <typename ElementType, typename IndexType = std::uint32_t>
auto make_host_scalar(ElementType const& v)
{
// FIXME(jiamingy): We can optimize this by using std::array as container policy, which
// requires some more compile time dispatching. This is enabled in the ref impl but
// hasn't been ported here yet.
scalar_extent<IndexType> extents;
using policy_t = typename host_scalar<ElementType>::container_policy_type;
policy_t policy;
raft::resources handle;
auto scalar = host_scalar<ElementType>{handle, extents, policy};
scalar(0) = v;
return scalar;
}
/**
* @ingroup host_mdarray_factories
* @brief Create a 1-dim host mdarray.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] res raft handle for managing expensive resources
* @param[in] n number of elements in vector
* @return raft::host_vector
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_host_vector(raft::resources& res, IndexType n)
{
return make_host_mdarray<ElementType, IndexType, LayoutPolicy>(res, make_extents<IndexType>(n));
}
/**
* @brief Create a 1-dim host mdarray.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] n number of elements in vector
*
* Note: This function is deprecated and will be removed in a future version. Please use version
* that accepts raft::resources.
* @return raft::host_vector
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_host_vector(IndexType n)
{
return make_host_mdarray<ElementType, IndexType, LayoutPolicy>(make_extents<IndexType>(n));
}
} // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_span.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/span.hpp>
namespace raft {
/**
* @defgroup device_span one-dimensional device span type
* @{
*/
/**
* @brief A span class for device pointer.
*/
template <typename T, size_t extent = std::experimental::dynamic_extent>
using device_span = span<T, true, extent>;
/**
* @}
*/
} // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/logger.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "logger-macros.hpp"
#include "logger-ext.hpp"
#if !defined(RAFT_COMPILED)
#include "logger-inl.hpp"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/logger-macros.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/**
* @defgroup logging levels used in raft
*
* @note exactly match the corresponding ones (but reverse in terms of value)
* in spdlog for wrapping purposes
*
* @{
*/
#define RAFT_LEVEL_TRACE 6
#define RAFT_LEVEL_DEBUG 5
#define RAFT_LEVEL_INFO 4
#define RAFT_LEVEL_WARN 3
#define RAFT_LEVEL_ERROR 2
#define RAFT_LEVEL_CRITICAL 1
#define RAFT_LEVEL_OFF 0
/** @} */
#if !defined(RAFT_ACTIVE_LEVEL)
#define RAFT_ACTIVE_LEVEL RAFT_LEVEL_INFO
#endif
/**
* @defgroup loggerMacros Helper macros for dealing with logging
* @{
*/
#if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_TRACE)
#define RAFT_LOG_TRACE(fmt, ...) \
do { \
std::stringstream ss; \
ss << raft::detail::format("%s:%d ", __FILE__, __LINE__); \
ss << raft::detail::format(fmt, ##__VA_ARGS__); \
raft::logger::get(RAFT_NAME).log(RAFT_LEVEL_TRACE, ss.str().c_str()); \
} while (0)
#else
#define RAFT_LOG_TRACE(fmt, ...) void(0)
#endif
#if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_TRACE)
#define RAFT_LOG_TRACE_VEC(ptr, len) \
do { \
std::stringstream ss; \
ss << raft::detail::format("%s:%d ", __FILE__, __LINE__); \
print_vector(#ptr, ptr, len, ss); \
raft::logger::get(RAFT_NAME).log(RAFT_LEVEL_TRACE, ss.str().c_str()); \
} while (0)
#else
#define RAFT_LOG_TRACE_VEC(ptr, len) void(0)
#endif
#if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_DEBUG)
#define RAFT_LOG_DEBUG(fmt, ...) \
do { \
std::stringstream ss; \
ss << raft::detail::format("%s:%d ", __FILE__, __LINE__); \
ss << raft::detail::format(fmt, ##__VA_ARGS__); \
raft::logger::get(RAFT_NAME).log(RAFT_LEVEL_DEBUG, ss.str().c_str()); \
} while (0)
#else
#define RAFT_LOG_DEBUG(fmt, ...) void(0)
#endif
#if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_INFO)
#define RAFT_LOG_INFO(fmt, ...) \
raft::logger::get(RAFT_NAME).log(RAFT_LEVEL_INFO, fmt, ##__VA_ARGS__)
#else
#define RAFT_LOG_INFO(fmt, ...) void(0)
#endif
#if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_WARN)
#define RAFT_LOG_WARN(fmt, ...) \
raft::logger::get(RAFT_NAME).log(RAFT_LEVEL_WARN, fmt, ##__VA_ARGS__)
#else
#define RAFT_LOG_WARN(fmt, ...) void(0)
#endif
#if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_ERROR)
#define RAFT_LOG_ERROR(fmt, ...) \
raft::logger::get(RAFT_NAME).log(RAFT_LEVEL_ERROR, fmt, ##__VA_ARGS__)
#else
#define RAFT_LOG_ERROR(fmt, ...) void(0)
#endif
#if (RAFT_ACTIVE_LEVEL >= RAFT_LEVEL_CRITICAL)
#define RAFT_LOG_CRITICAL(fmt, ...) \
raft::logger::get(RAFT_NAME).log(RAFT_LEVEL_CRITICAL, fmt, ##__VA_ARGS__)
#else
#define RAFT_LOG_CRITICAL(fmt, ...) void(0)
#endif
/** @} */
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/resources.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "resource/resource_types.hpp"
#include <algorithm>
#include <mutex>
#include <raft/core/error.hpp> // RAFT_EXPECTS
#include <raft/core/logger.hpp>
#include <string>
#include <vector>
namespace raft {
/**
* @brief Resource container which allows lazy-loading and registration
* of resource_factory implementations, which in turn generate resource instances.
*
* This class is intended to be agnostic of the resources it contains and
* does not, itself, differentiate between host and device resources. Downstream
* accessor functions can then register and load resources as needed in order
* to keep its usage somewhat opaque to end-users.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/resource/cuda_stream.hpp>
* #include <raft/core/resource/cublas_handle.hpp>
*
* raft::resources res;
* auto stream = raft::resource::get_cuda_stream(res);
* auto cublas_handle = raft::resource::get_cublas_handle(res);
* @endcode
*/
class resources {
public:
template <typename T>
using pair_res = std::pair<resource::resource_type, std::shared_ptr<T>>;
using pair_res_factory = pair_res<resource::resource_factory>;
using pair_resource = pair_res<resource::resource>;
resources()
: factories_(resource::resource_type::LAST_KEY), resources_(resource::resource_type::LAST_KEY)
{
for (int i = 0; i < resource::resource_type::LAST_KEY; ++i) {
factories_.at(i) = std::make_pair(resource::resource_type::LAST_KEY,
std::make_shared<resource::empty_resource_factory>());
resources_.at(i) = std::make_pair(resource::resource_type::LAST_KEY,
std::make_shared<resource::empty_resource>());
}
}
/**
* @brief Shallow copy of underlying resources instance.
* Note that this does not create any new resources.
*/
resources(const resources& res) : factories_(res.factories_), resources_(res.resources_) {}
resources(resources&&) = delete;
resources& operator=(resources&&) = delete;
/**
* @brief Returns true if a resource_factory has been registered for the
* given resource_type, false otherwise.
* @param resource_type resource type to check
* @return true if resource_factory is registered for the given resource_type
*/
bool has_resource_factory(resource::resource_type resource_type) const
{
std::lock_guard<std::mutex> _(mutex_);
return factories_.at(resource_type).first != resource::resource_type::LAST_KEY;
}
/**
* @brief Register a resource_factory with the current instance.
* This will overwrite any existing resource factories.
* @param factory resource factory to register on the current instance
*/
void add_resource_factory(std::shared_ptr<resource::resource_factory> factory) const
{
std::lock_guard<std::mutex> _(mutex_);
resource::resource_type rtype = factory.get()->get_resource_type();
RAFT_EXPECTS(rtype != resource::resource_type::LAST_KEY,
"LAST_KEY is a placeholder and not a valid resource factory type.");
factories_.at(rtype) = std::make_pair(rtype, factory);
// Clear the corresponding resource, so that on next `get_resource` the new factory is used
if (resources_.at(rtype).first != resource::resource_type::LAST_KEY) {
resources_.at(rtype) = std::make_pair(resource::resource_type::LAST_KEY,
std::make_shared<resource::empty_resource>());
}
}
/**
* @brief Retrieve a resource for the given resource_type and cast to given pointer type.
* Note that the resources are loaded lazily on-demand and resources which don't yet
* exist on the current instance will be created using the corresponding factory, if
* it exists.
* @tparam res_t pointer type for which retrieved resource will be casted
* @param resource_type resource type to retrieve
* @return the given resource, if it exists.
*/
template <typename res_t>
res_t* get_resource(resource::resource_type resource_type) const
{
std::lock_guard<std::mutex> _(mutex_);
if (resources_.at(resource_type).first == resource::resource_type::LAST_KEY) {
RAFT_EXPECTS(factories_.at(resource_type).first != resource::resource_type::LAST_KEY,
"No resource factory has been registered for the given resource %d.",
resource_type);
resource::resource_factory* factory = factories_.at(resource_type).second.get();
resources_.at(resource_type) = std::make_pair(
resource_type, std::shared_ptr<resource::resource>(factory->make_resource()));
}
resource::resource* res = resources_.at(resource_type).second.get();
return reinterpret_cast<res_t*>(res->get_resource());
}
protected:
mutable std::mutex mutex_;
mutable std::vector<pair_res_factory> factories_;
mutable std::vector<pair_resource> resources_;
};
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/copy.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/copy.hpp>
namespace raft {
/**
* @brief Copy data from one mdspan to another with the same extents
*
* This function copies data from one mdspan to another, regardless of whether
* or not the mdspans have the same layout, memory type (host/device/managed)
* or data type. So long as it is possible to convert the data type from source
* to destination, and the extents are equal, this function should be able to
* perform the copy. Any necessary device operations will be stream-ordered via the CUDA stream
* provided by the `raft::resources` argument.
*
* This header includes a custom kernel used for copying data between
* completely arbitrary mdspans on device. To compile this function in a
* non-CUDA translation unit, `raft/core/copy.hpp` may be used instead. The
* pure C++ header will correctly compile even without a CUDA compiler.
* Depending on the specialization, this CUDA header may invoke the kernel and
* therefore require a CUDA compiler.
*
* Limitations: Currently this function does not support copying directly
* between two arbitrary mdspans on different CUDA devices. It is assumed that the caller sets the
* correct CUDA device. Furthermore, host-to-host copies that require a transformation of the
* underlying memory layout are currently not performant, although they are supported.
*
* Note that when copying to an mdspan with a non-unique layout (i.e. the same
* underlying memory is addressed by different element indexes), the source
* data must contain non-unique values for every non-unique destination
* element. If this is not the case, the behavior is undefined. Some copies
* to non-unique layouts which are well-defined will nevertheless fail with an
* exception to avoid race conditions in the underlying copy.
*
* @tparam DstType An mdspan type for the destination container.
* @tparam SrcType An mdspan type for the source container
* @param res raft::resources used to provide a stream for copies involving the
* device.
* @param dst The destination mdspan.
* @param src The source mdspan.
*/
template <typename DstType, typename SrcType>
detail::mdspan_copyable_with_kernel_t<DstType, SrcType> copy(resources const& res,
DstType&& dst,
SrcType&& src)
{
detail::copy(res, std::forward<DstType>(dst), std::forward<SrcType>(src));
}
#ifndef RAFT_NON_CUDA_COPY_IMPLEMENTED
#define RAFT_NON_CUDA_COPY_IMPLEMENTED
template <typename DstType, typename SrcType>
detail::mdspan_copyable_not_with_kernel_t<DstType, SrcType> copy(resources const& res,
DstType&& dst,
SrcType&& src)
{
detail::copy(res, std::forward<DstType>(dst), std::forward<SrcType>(src));
}
#endif
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/cuda_support.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft {
#ifndef RAFT_DISABLE_CUDA
auto constexpr static const CUDA_ENABLED = true;
#else
auto constexpr static const CUDA_ENABLED = false;
#endif
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/error.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __RAFT_RT_ERROR
#define __RAFT_RT_ERROR
#pragma once
#include <cstdio>
#include <execinfo.h>
#include <iostream>
#include <memory>
#include <sstream>
#include <stdexcept>
#include <string>
#include <vector>
namespace raft {
/**
* @defgroup error_handling Exceptions & Error Handling
* @{
*/
/** base exception class for the whole of raft */
class exception : public std::exception {
public:
/** default ctor */
explicit exception() noexcept : std::exception(), msg_() {}
/** copy ctor */
exception(exception const& src) noexcept : std::exception(), msg_(src.what())
{
collect_call_stack();
}
/** ctor from an input message */
explicit exception(std::string const msg) noexcept : std::exception(), msg_(std::move(msg))
{
collect_call_stack();
}
/** get the message associated with this exception */
char const* what() const noexcept override { return msg_.c_str(); }
private:
/** message associated with this exception */
std::string msg_;
/** append call stack info to this exception's message for ease of debug */
// Courtesy: https://www.gnu.org/software/libc/manual/html_node/Backtraces.html
void collect_call_stack() noexcept
{
#ifdef __GNUC__
constexpr int kMaxStackDepth = 64;
void* stack[kMaxStackDepth]; // NOLINT
auto depth = backtrace(stack, kMaxStackDepth);
std::ostringstream oss;
oss << std::endl << "Obtained " << depth << " stack frames" << std::endl;
char** strings = backtrace_symbols(stack, depth);
if (strings == nullptr) {
oss << "But no stack trace could be found!" << std::endl;
msg_ += oss.str();
return;
}
///@todo: support for demangling of C++ symbol names
for (int i = 0; i < depth; ++i) {
oss << "#" << i << " in " << strings[i] << std::endl;
}
free(strings);
msg_ += oss.str();
#endif // __GNUC__
}
};
/**
* @brief Exception thrown when logical precondition is violated.
*
* This exception should not be thrown directly and is instead thrown by the
* RAFT_EXPECTS and RAFT_FAIL macros.
*
*/
struct logic_error : public raft::exception {
explicit logic_error(char const* const message) : raft::exception(message) {}
explicit logic_error(std::string const& message) : raft::exception(message) {}
};
/**
* @brief Exception thrown when attempting to use CUDA features from a non-CUDA
* build
*
*/
struct non_cuda_build_error : public raft::exception {
explicit non_cuda_build_error(char const* const message) : raft::exception(message) {}
explicit non_cuda_build_error(std::string const& message) : raft::exception(message) {}
};
/**
* @}
*/
} // namespace raft
// FIXME: Need to be replaced with RAFT_FAIL
/** macro to throw a runtime error */
#define THROW(fmt, ...) \
do { \
int size1 = \
std::snprintf(nullptr, 0, "exception occurred! file=%s line=%d: ", __FILE__, __LINE__); \
int size2 = std::snprintf(nullptr, 0, fmt, ##__VA_ARGS__); \
if (size1 < 0 || size2 < 0) \
throw raft::exception("Error in snprintf, cannot handle raft exception."); \
auto size = size1 + size2 + 1; /* +1 for final '\0' */ \
auto buf = std::make_unique<char[]>(size_t(size)); \
std::snprintf(buf.get(), \
size1 + 1 /* +1 for '\0' */, \
"exception occurred! file=%s line=%d: ", \
__FILE__, \
__LINE__); \
std::snprintf(buf.get() + size1, size2 + 1 /* +1 for '\0' */, fmt, ##__VA_ARGS__); \
std::string msg(buf.get(), buf.get() + size - 1); /* -1 to remove final '\0' */ \
throw raft::exception(msg); \
} while (0)
// FIXME: Need to be replaced with RAFT_EXPECTS
/** macro to check for a conditional and assert on failure */
#define ASSERT(check, fmt, ...) \
do { \
if (!(check)) THROW(fmt, ##__VA_ARGS__); \
} while (0)
/**
* Macro to append error message to first argument.
* This should only be called in contexts where it is OK to throw exceptions!
*/
#define SET_ERROR_MSG(msg, location_prefix, fmt, ...) \
do { \
int size1 = std::snprintf(nullptr, 0, "%s", location_prefix); \
int size2 = std::snprintf(nullptr, 0, "file=%s line=%d: ", __FILE__, __LINE__); \
int size3 = std::snprintf(nullptr, 0, fmt, ##__VA_ARGS__); \
if (size1 < 0 || size2 < 0 || size3 < 0) \
throw raft::exception("Error in snprintf, cannot handle raft exception."); \
auto size = size1 + size2 + size3 + 1; /* +1 for final '\0' */ \
std::vector<char> buf(size); \
std::snprintf(buf.data(), size1 + 1 /* +1 for '\0' */, "%s", location_prefix); \
std::snprintf( \
buf.data() + size1, size2 + 1 /* +1 for '\0' */, "file=%s line=%d: ", __FILE__, __LINE__); \
std::snprintf(buf.data() + size1 + size2, size3 + 1 /* +1 for '\0' */, fmt, ##__VA_ARGS__); \
msg += std::string(buf.data(), buf.data() + size - 1); /* -1 to remove final '\0' */ \
} while (0)
/**
* @defgroup assertion Assertion and error macros
* @{
*/
/**
* @brief Macro for checking (pre-)conditions that throws an exception when a condition is false
*
* @param[in] cond Expression that evaluates to true or false
* @param[in] fmt String literal description of the reason that cond is expected to be true with
* optional format tagas
* @throw raft::logic_error if the condition evaluates to false.
*/
#define RAFT_EXPECTS(cond, fmt, ...) \
do { \
if (!(cond)) { \
std::string msg{}; \
SET_ERROR_MSG(msg, "RAFT failure at ", fmt, ##__VA_ARGS__); \
throw raft::logic_error(msg); \
} \
} while (0)
/**
* @brief Indicates that an erroneous code path has been taken.
*
* @param[in] fmt String literal description of the reason that this code path is erroneous with
* optional format tagas
* @throw always throws raft::logic_error
*/
#define RAFT_FAIL(fmt, ...) \
do { \
std::string msg{}; \
SET_ERROR_MSG(msg, "RAFT failure at ", fmt, ##__VA_ARGS__); \
throw raft::logic_error(msg); \
} while (0)
/**
* @}
*/
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/temporary_device_buffer.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "device_mdarray.hpp"
#include "device_mdspan.hpp"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cudart_utils.hpp>
#include <variant>
namespace raft {
/**
* \defgroup temporary_device_buffer `raft::temporary_device_buffer`
* @{
*/
/**
* @brief An object which provides temporary access on-device to memory from either a host or device
* pointer. This object provides a `view()` method that will provide a `raft::device_mdspan` that
* may be read-only depending on const-qualified nature of the input pointer.
*
* @tparam ElementType type of the input
* @tparam Extents raft::extents
* @tparam LayoutPolicy layout of the input
* @tparam ContainerPolicy container to be used to own device memory if needed
*/
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
template <typename> typename ContainerPolicy = device_uvector_policy>
class temporary_device_buffer {
using view_type = device_mdspan<ElementType, Extents, LayoutPolicy>;
using index_type = typename Extents::index_type;
using element_type = std::remove_cv_t<ElementType>;
using container_policy = ContainerPolicy<element_type>;
using owning_device_buffer =
device_mdarray<element_type, Extents, LayoutPolicy, container_policy>;
using data_store = std::variant<ElementType*, owning_device_buffer>;
static constexpr bool is_const_pointer_ = std::is_const_v<ElementType>;
public:
temporary_device_buffer(temporary_device_buffer const&) = delete;
temporary_device_buffer& operator=(temporary_device_buffer const&) = delete;
constexpr temporary_device_buffer(temporary_device_buffer&&) = default;
constexpr temporary_device_buffer& operator=(temporary_device_buffer&&) = default;
/**
* @brief Construct a new temporary device buffer object
*
* @param handle raft::resources
* @param data input pointer
* @param extents dimensions of input array
* @param write_back if true, any writes to the `view()` of this object will be copid
* back if the original pointer was in host memory
*/
temporary_device_buffer(resources const& handle,
ElementType* data,
Extents extents,
bool write_back = false)
: stream_(resource::get_cuda_stream(handle)),
original_data_(data),
extents_{extents},
write_back_(write_back),
length_([this]() {
std::size_t length = 1;
for (std::size_t i = 0; i < extents_.rank(); ++i) {
length *= extents_.extent(i);
}
return length;
}()),
device_id_{get_device_for_address(data)}
{
if (device_id_ == -1) {
typename owning_device_buffer::mapping_type layout{extents_};
typename owning_device_buffer::container_policy_type policy{};
owning_device_buffer device_data{handle, layout, policy};
raft::copy(device_data.data_handle(), data, length_, resource::get_cuda_stream(handle));
data_ = data_store{std::in_place_index<1>, std::move(device_data)};
} else {
data_ = data_store{std::in_place_index<0>, data};
}
}
~temporary_device_buffer() noexcept(is_const_pointer_)
{
// only need to write data back for non const pointers
// when write_back=true and original pointer is in
// host memory
if constexpr (not is_const_pointer_) {
if (write_back_ && device_id_ == -1) {
raft::copy(original_data_, std::get<1>(data_).data_handle(), length_, stream_);
}
}
}
/**
* @brief Returns a `raft::device_mdspan`
*
* @return raft::device_mdspan
*/
auto view() -> view_type
{
if (device_id_ == -1) {
return std::get<1>(data_).view();
} else {
return make_mdspan<ElementType, index_type, LayoutPolicy, false, true>(original_data_,
extents_);
}
}
private:
rmm::cuda_stream_view stream_;
ElementType* original_data_;
data_store data_;
Extents extents_;
bool write_back_;
std::size_t length_;
int device_id_;
};
/**@}*/
/**
* \defgroup temporary_device_buffer_factories Temporary device buffer factories
* @{
*/
/**
* @brief Factory to create a `raft::temporary_device_buffer`
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // Initialize raft::device_mdarray and raft::extents
* // Can be either raft::device_mdarray or raft::host_mdarray
* auto exts = raft::make_extents<int>(5);
* auto array = raft::make_device_mdarray<int, int>(handle, exts);
*
* auto d_buf = raft::make_temporary_device_buffer(handle, array.data_handle(), exts);
* @endcode
*
* @tparam ElementType type of the input
* @tparam IndexType index type of `raft::extents`
* @tparam LayoutPolicy layout of the input
* @tparam ContainerPolicy container to be used to own device memory if needed
* @tparam Extents variadic dimensions for `raft::extents`
* @param handle raft::resources
* @param data input pointer
* @param extents dimensions of input array
* @param write_back if true, any writes to the `view()` of this object will be copid
* back if the original pointer was in host memory
* @return raft::temporary_device_buffer
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
template <typename> typename ContainerPolicy = device_uvector_policy,
size_t... Extents>
auto make_temporary_device_buffer(raft::resources const& handle,
ElementType* data,
raft::extents<IndexType, Extents...> extents,
bool write_back = false)
{
return temporary_device_buffer<ElementType, decltype(extents), LayoutPolicy, ContainerPolicy>(
handle, data, extents, write_back);
}
/**
* @brief Factory to create a `raft::temporary_device_buffer` which produces a
* read-only `raft::device_mdspan` from `view()` method with
* `write_back=false`
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // Initialize raft::device_mdarray and raft::extents
* // Can be either raft::device_mdarray or raft::host_mdarray
* auto exts = raft::make_extents<int>(5);
* auto array = raft::make_device_mdarray<int, int>(handle, exts);
*
* auto d_buf = raft::make_readonly_temporary_device_buffer(handle, array.data_handle(), exts);
* @endcode
*
* @tparam ElementType type of the input
* @tparam IndexType index type of `raft::extents`
* @tparam LayoutPolicy layout of the input
* @tparam ContainerPolicy container to be used to own device memory if needed
* @tparam Extents variadic dimensions for `raft::extents`
* @param handle raft::resources
* @param data input pointer
* @param extents dimensions of input array
* @return raft::temporary_device_buffer
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
template <typename> typename ContainerPolicy = device_uvector_policy,
size_t... Extents>
auto make_readonly_temporary_device_buffer(raft::resources const& handle,
ElementType* data,
raft::extents<IndexType, Extents...> extents)
{
return temporary_device_buffer<std::add_const_t<ElementType>,
decltype(extents),
LayoutPolicy,
ContainerPolicy>(handle, data, extents, false);
}
/**
* @brief Factory to create a `raft::temporary_device_buffer` which produces a
* writeable `raft::device_mdspan` from `view()` method with
* `write_back=true`
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
*
* raft::resources handle;
*
* // Initialize raft::host_mdarray and raft::extents
* // Can be either raft::device_mdarray or raft::host_mdarray
* auto exts = raft::make_extents<int>(5);
* auto array = raft::make_host_mdarray<int, int>(handle, exts);
*
* auto d_buf = raft::make_writeback_temporary_device_buffer(handle, array.data_handle(), exts);
* @endcode
*
* @tparam ElementType type of the input
* @tparam IndexType index type of `raft::extents`
* @tparam LayoutPolicy layout of the input
* @tparam ContainerPolicy container to be used to own device memory if needed
* @tparam Extents variadic dimensions for `raft::extents`
* @param handle raft::resources
* @param data input pointer
* @param extents dimensions of input array
* @return raft::temporary_device_buffer
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
template <typename> typename ContainerPolicy = device_uvector_policy,
size_t... Extents,
typename = std::enable_if_t<not std::is_const_v<ElementType>>>
auto make_writeback_temporary_device_buffer(raft::resources const& handle,
ElementType* data,
raft::extents<IndexType, Extents...> extents)
{
return temporary_device_buffer<ElementType, decltype(extents), LayoutPolicy, ContainerPolicy>(
handle, data, extents, true);
}
/**@}*/
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/serialize.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/mdspan_numpy_serializer.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <iostream>
#include <vector>
/**
* Collection of serialization functions for RAFT data types
*/
namespace raft {
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void serialize_mdspan(
const raft::resources&,
std::ostream& os,
const raft::host_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>& obj)
{
detail::numpy_serializer::serialize_host_mdspan(os, obj);
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void serialize_mdspan(
const raft::resources& handle,
std::ostream& os,
const raft::device_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>& obj)
{
static_assert(std::is_same_v<LayoutPolicy, raft::layout_c_contiguous> ||
std::is_same_v<LayoutPolicy, raft::layout_f_contiguous>,
"The serializer only supports row-major and column-major layouts");
using obj_t = raft::device_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>;
// Copy to host before serializing
// For contiguous layouts, size() == product of dimensions
std::vector<typename obj_t::value_type> tmp(obj.size());
cudaStream_t stream = resource::get_cuda_stream(handle);
raft::update_host(tmp.data(), obj.data_handle(), obj.size(), stream);
resource::sync_stream(handle);
using inner_accessor_type = typename obj_t::accessor_type::accessor_type;
auto tmp_mdspan =
raft::host_mdspan<ElementType, Extents, LayoutPolicy, raft::host_accessor<inner_accessor_type>>(
tmp.data(), obj.extents());
detail::numpy_serializer::serialize_host_mdspan(os, tmp_mdspan);
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void serialize_mdspan(
const raft::resources&,
std::ostream& os,
const raft::managed_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>& obj)
{
using obj_t = raft::managed_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>;
using inner_accessor_type = typename obj_t::accessor_type::accessor_type;
auto tmp_mdspan =
raft::host_mdspan<ElementType, Extents, LayoutPolicy, raft::host_accessor<inner_accessor_type>>(
obj.data_handle(), obj.extents());
detail::numpy_serializer::serialize_host_mdspan(os, tmp_mdspan);
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void deserialize_mdspan(
const raft::resources&,
std::istream& is,
raft::host_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>& obj)
{
detail::numpy_serializer::deserialize_host_mdspan(is, obj);
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void deserialize_mdspan(
const raft::resources& handle,
std::istream& is,
raft::device_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>& obj)
{
static_assert(std::is_same_v<LayoutPolicy, raft::layout_c_contiguous> ||
std::is_same_v<LayoutPolicy, raft::layout_f_contiguous>,
"The serializer only supports row-major and column-major layouts");
using obj_t = raft::device_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>;
// Copy to device after serializing
// For contiguous layouts, size() == product of dimensions
std::vector<typename obj_t::value_type> tmp(obj.size());
using inner_accessor_type = typename obj_t::accessor_type::accessor_type;
auto tmp_mdspan =
raft::host_mdspan<ElementType, Extents, LayoutPolicy, raft::host_accessor<inner_accessor_type>>(
tmp.data(), obj.extents());
detail::numpy_serializer::deserialize_host_mdspan(is, tmp_mdspan);
cudaStream_t stream = resource::get_cuda_stream(handle);
raft::update_device(obj.data_handle(), tmp.data(), obj.size(), stream);
resource::sync_stream(handle);
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void deserialize_mdspan(
const raft::resources& handle,
std::istream& is,
raft::host_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>&& obj)
{
deserialize_mdspan(handle, is, obj);
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void deserialize_mdspan(
const raft::resources& handle,
std::istream& is,
raft::managed_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>& obj)
{
using obj_t = raft::managed_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>;
using inner_accessor_type = typename obj_t::accessor_type::accessor_type;
auto tmp_mdspan =
raft::host_mdspan<ElementType, Extents, LayoutPolicy, raft::host_accessor<inner_accessor_type>>(
obj.data_handle(), obj.extents());
detail::numpy_serializer::deserialize_host_mdspan(is, tmp_mdspan);
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void deserialize_mdspan(
const raft::resources& handle,
std::istream& is,
raft::managed_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>&& obj)
{
deserialize_mdspan(handle, is, obj);
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void deserialize_mdspan(
const raft::resources& handle,
std::istream& is,
raft::device_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>&& obj)
{
deserialize_mdspan(handle, is, obj);
}
template <typename T>
inline void serialize_scalar(const raft::resources&, std::ostream& os, const T& value)
{
detail::numpy_serializer::serialize_scalar(os, value);
}
template <typename T>
inline T deserialize_scalar(const raft::resources&, std::istream& is)
{
return detail::numpy_serializer::deserialize_scalar<T>(is);
}
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/sparse_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/logger.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/span.hpp>
#include <raft/core/sparse_types.hpp>
namespace raft {
/**
* \defgroup sparse_types Sparse API vocabulary
* @{
*/
enum SparsityType { OWNING, PRESERVING };
/**
* Maintains metadata about the structure and sparsity of a sparse matrix.
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @tparam is_device
*/
template <typename RowType, typename ColType, typename NZType, int is_device>
class sparse_structure {
public:
using row_type = RowType;
using col_type = ColType;
using nnz_type = NZType;
/**
* Constructor when sparsity is already known
* @param n_rows total number of rows in matrix
* @param n_cols total number of columns in matrix
* @param nnz sparsity of matrix
*/
sparse_structure(row_type n_rows, col_type n_cols, nnz_type nnz)
: n_rows_(n_rows), n_cols_(n_cols), nnz_(nnz){};
/**
* Constructor when sparsity is not yet known
* @param n_rows total number of rows in matrix
* @param n_cols total number of columns in matrix
*/
sparse_structure(row_type n_rows, col_type n_cols) : n_rows_(n_rows), n_cols_(n_cols), nnz_(0) {}
/**
* Return the sparsity of the matrix (this will be 0 when sparsity is not yet known)
* @return sparsity of matrix
*/
nnz_type get_nnz() { return nnz_; }
/**
* Return the total number of rows in the matrix
* @return total number of rows in the matriz
*/
row_type get_n_rows() { return n_rows_; }
/**
* Return the total number of columns in the matrix
* @return total number of columns
*/
col_type get_n_cols() { return n_cols_; }
/**
* Initialize the matrix sparsity when it was not known
* upon construction.
* @param nnz
*/
virtual void initialize_sparsity(nnz_type nnz) { nnz_ = nnz; }
protected:
row_type n_rows_;
col_type n_cols_;
nnz_type nnz_;
};
/**
* A non-owning view of a sparse matrix, which includes a
* structure component coupled with its elements/weights
*
* @tparam ElementType
* @tparam sparse_structure
*/
template <typename ElementType, typename StructureType, bool is_device>
class sparse_matrix_view {
public:
using element_type = ElementType;
using structure_view_type = typename StructureType::view_type;
sparse_matrix_view(raft::span<ElementType, is_device> element_span,
structure_view_type structure_view)
: element_span_(element_span), structure_view_(structure_view)
{
// FIXME: Validate structure sizes match span size.
}
/**
* Return a view of the structure underlying this matrix
* @return
*/
structure_view_type structure_view() { return structure_view_; }
/**
* Return a span of the nonzero elements of the matrix
* @return span of the nonzero elements of the matrix
*/
span<element_type, is_device> get_elements() { return element_span_; }
protected:
raft::span<element_type, is_device> element_span_;
structure_view_type structure_view_;
};
/**
* TODO: Need to support the following types of configurations:
* 1. solid: immutable_sparse_matrix_view<const ElementType, const StructureType>
* - This is an immutable view type, nothing can change.
* 2. liquid: sparse_matrix<ElementType, const StructureType>
* - sparse_matrix owning container w/ StructureType=immutable view?
* 3. gas: sparse_matrix<ElementType, StructureType>
* - sparse_matrix owning container w/ StructureType owning container?
*/
/**
* An owning container for a sparse matrix, which includes a
* structure component coupled with its elements/weights
* @tparam ElementType
* @tparam sparse_structure
* @tparam ContainerPolicy
*/
template <typename ElementType,
typename StructureType,
typename ViewType,
bool is_device,
template <typename T>
typename ContainerPolicy>
class sparse_matrix {
public:
using view_type = ViewType;
using element_type = typename view_type::element_type;
using structure_type = StructureType;
using row_type = typename structure_type::row_type;
using col_type = typename structure_type::col_type;
using nnz_type = typename structure_type::nnz_type;
using structure_view_type = typename structure_type::view_type;
using container_policy_type = ContainerPolicy<element_type>;
using container_type = typename container_policy_type::container_type;
// constructor that owns the data and the structure
sparse_matrix(raft::resources const& handle,
row_type n_rows,
col_type n_cols,
nnz_type nnz = 0) noexcept(std::is_nothrow_default_constructible_v<container_type>)
: structure_{handle, n_rows, n_cols, nnz}, cp_{}, c_elements_{cp_.create(handle, 0)} {};
// Constructor that owns the data but not the structure
// This constructor is only callable with a `structure_type == *_structure_view`
// which makes it okay to copy
sparse_matrix(raft::resources const& handle, structure_type structure) noexcept(
std::is_nothrow_default_constructible_v<container_type>)
: structure_{structure}, cp_{}, c_elements_{cp_.create(handle, structure_.get_nnz())} {};
constexpr sparse_matrix(sparse_matrix const&) noexcept(
std::is_nothrow_copy_constructible_v<container_type>) = default;
constexpr sparse_matrix(sparse_matrix&&) noexcept(
std::is_nothrow_move_constructible<container_type>::value) = default;
constexpr auto operator=(sparse_matrix const&) noexcept(
std::is_nothrow_copy_assignable<container_type>::value) -> sparse_matrix& = default;
constexpr auto operator=(sparse_matrix&&) noexcept(
std::is_nothrow_move_assignable<container_type>::value) -> sparse_matrix& = default;
~sparse_matrix() noexcept(std::is_nothrow_destructible<container_type>::value) = default;
void initialize_sparsity(nnz_type nnz) { c_elements_.resize(nnz); };
raft::span<ElementType, is_device> get_elements()
{
return raft::span<ElementType, is_device>(c_elements_.data(), structure_.get_nnz());
}
/**
* Return a view of the structure underlying this matrix
* @return
*/
virtual structure_view_type structure_view() = 0;
/**
* Return a sparsity-preserving view of this sparse matrix
* @return view of this sparse matrix
*/
view_type view()
{
auto struct_view = structure_view();
auto element_span =
raft::span<ElementType, is_device>(c_elements_.data(), struct_view.get_nnz());
return view_type(element_span, struct_view);
}
protected:
structure_type structure_;
container_policy_type cp_;
container_type c_elements_;
};
/* @} */
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_container_policy.hpp | /*
* Copyright (2019) Sandia Corporation
*
* The source code is licensed under the 3-clause BSD license found in the LICENSE file
* thirdparty/LICENSES/mdarray.license
*/
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/util/cudart_utils.hpp>
#include <raft/core/detail/span.hpp> // dynamic_extent
#include <raft/core/host_device_accessor.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/device_ptr.h>
namespace raft {
/**
* @brief A simplified version of thrust::device_reference with support for CUDA stream.
*/
template <typename T>
class device_reference {
public:
using value_type = typename std::remove_cv_t<T>;
using pointer = thrust::device_ptr<T>;
using const_pointer = thrust::device_ptr<T const>;
private:
std::conditional_t<std::is_const<T>::value, const_pointer, pointer> ptr_;
rmm::cuda_stream_view stream_;
public:
device_reference(thrust::device_ptr<T> ptr, rmm::cuda_stream_view stream)
: ptr_{ptr}, stream_{stream}
{
}
operator value_type() const // NOLINT
{
auto* raw = ptr_.get();
value_type v{};
update_host(&v, raw, 1, stream_);
return v;
}
auto operator=(T const& other) -> device_reference&
{
auto* raw = ptr_.get();
update_device(raw, &other, 1, stream_);
return *this;
}
};
/**
* @brief A thin wrapper over rmm::device_uvector for implementing the mdarray container policy.
*
*/
template <typename T>
class device_uvector {
rmm::device_uvector<T> data_;
public:
using value_type = T;
using size_type = std::size_t;
using reference = device_reference<T>;
using const_reference = device_reference<T const>;
using pointer = value_type*;
using const_pointer = value_type const*;
using iterator = pointer;
using const_iterator = const_pointer;
public:
~device_uvector() = default;
device_uvector(device_uvector&&) noexcept = default;
device_uvector(device_uvector const& that) : data_{that.data_, that.data_.stream()} {}
auto operator=(device_uvector<T> const& that) -> device_uvector<T>&
{
data_ = rmm::device_uvector<T>{that.data_, that.data_.stream()};
return *this;
}
auto operator=(device_uvector<T>&& that) noexcept -> device_uvector<T>& = default;
/**
* @brief Default ctor is deleted as it doesn't accept stream.
*/
device_uvector() = delete;
/**
* @brief Ctor that accepts a size, stream and an optional mr.
*/
explicit device_uvector(std::size_t size, rmm::cuda_stream_view stream) : data_{size, stream} {}
/**
* @brief Ctor that accepts a size, stream and a memory resource.
*/
explicit device_uvector(std::size_t size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: data_{size, stream, mr}
{
}
/**
* @brief Index operator that returns a proxy to the actual data.
*/
template <typename Index>
auto operator[](Index i) noexcept -> reference
{
return device_reference<T>{thrust::device_ptr<T>{data_.data() + i}, data_.stream()};
}
/**
* @brief Index operator that returns a proxy to the actual data.
*/
template <typename Index>
auto operator[](Index i) const noexcept
{
return device_reference<T const>{thrust::device_ptr<T const>{data_.data() + i}, data_.stream()};
}
void resize(size_type size) { data_.resize(size, data_.stream()); }
[[nodiscard]] auto data() noexcept -> pointer { return data_.data(); }
[[nodiscard]] auto data() const noexcept -> const_pointer { return data_.data(); }
};
/**
* @brief A container policy for device mdarray.
*/
template <typename ElementType>
class device_uvector_policy {
public:
using element_type = ElementType;
using container_type = device_uvector<element_type>;
// FIXME(jiamingy): allocator type is not supported by rmm::device_uvector
using pointer = typename container_type::pointer;
using const_pointer = typename container_type::const_pointer;
using reference = device_reference<element_type>;
using const_reference = device_reference<element_type const>;
using accessor_policy = std::experimental::default_accessor<element_type>;
using const_accessor_policy = std::experimental::default_accessor<element_type const>;
public:
auto create(raft::resources const& res, size_t n) -> container_type
{
if (mr_ == nullptr) {
// NB: not using the workspace resource by default!
// The workspace resource is for short-lived temporary allocations.
return container_type(n, resource::get_cuda_stream(res));
} else {
return container_type(n, resource::get_cuda_stream(res), mr_);
}
}
constexpr device_uvector_policy() = default;
constexpr explicit device_uvector_policy(rmm::mr::device_memory_resource* mr) noexcept : mr_(mr)
{
}
[[nodiscard]] constexpr auto access(container_type& c, size_t n) const noexcept -> reference
{
return c[n];
}
[[nodiscard]] constexpr auto access(container_type const& c, size_t n) const noexcept
-> const_reference
{
return c[n];
}
[[nodiscard]] auto make_accessor_policy() noexcept { return accessor_policy{}; }
[[nodiscard]] auto make_accessor_policy() const noexcept { return const_accessor_policy{}; }
private:
rmm::mr::device_memory_resource* mr_{nullptr};
};
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/copy.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/copy.hpp>
namespace raft {
#ifndef RAFT_NON_CUDA_COPY_IMPLEMENTED
#define RAFT_NON_CUDA_COPY_IMPLEMENTED
/**
* @brief Copy data from one mdspan to another with the same extents
*
* This function copies data from one mdspan to another, regardless of whether
* or not the mdspans have the same layout, memory type (host/device/managed)
* or data type. So long as it is possible to convert the data type from source
* to destination, and the extents are equal, this function should be able to
* perform the copy.
*
* This header does _not_ include the custom kernel used for copying data
* between completely arbitrary mdspans on device. For arbitrary copies of this
* kind, `#include <raft/core/copy.cuh>` instead. Specializations of this
* function that require the custom kernel will be SFINAE-omitted when this
* header is used instead of `copy.cuh`. This header _does_ support
* device-to-device copies that can be performed with cuBLAS or a
* straightforward cudaMemcpy. Any necessary device operations will be stream-ordered via the CUDA
* stream provided by the `raft::resources` argument.
*
* Limitations: Currently this function does not support copying directly
* between two arbitrary mdspans on different CUDA devices. It is assumed that the caller sets the
* correct CUDA device. Furthermore, host-to-host copies that require a transformation of the
* underlying memory layout are currently not performant, although they are supported.
*
* Note that when copying to an mdspan with a non-unique layout (i.e. the same
* underlying memory is addressed by different element indexes), the source
* data must contain non-unique values for every non-unique destination
* element. If this is not the case, the behavior is undefined. Some copies
* to non-unique layouts which are well-defined will nevertheless fail with an
* exception to avoid race conditions in the underlying copy.
*
* @tparam DstType An mdspan type for the destination container.
* @tparam SrcType An mdspan type for the source container
* @param res raft::resources used to provide a stream for copies involving the
* device.
* @param dst The destination mdspan.
* @param src The source mdspan.
*/
template <typename DstType, typename SrcType>
detail::mdspan_copyable_not_with_kernel_t<DstType, SrcType> copy(resources const& res,
DstType&& dst,
SrcType&& src)
{
detail::copy(res, std::forward<DstType>(dst), std::forward<SrcType>(src));
}
#endif
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/host_coo_matrix.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/coo_matrix.hpp>
#include <raft/core/host_container_policy.hpp>
#include <raft/core/host_span.hpp>
#include <raft/core/sparse_types.hpp>
namespace raft {
/**
* \defgroup host_coo_matrix Host COO Matrix
* @{
*/
/**
* Specialization for a sparsity-preserving coordinate structure view which uses host memory
*/
template <typename RowType, typename ColType, typename NZType>
using host_coordinate_structure_view = coordinate_structure_view<RowType, ColType, NZType, false>;
/**
* Specialization for a sparsity-owning coordinate structure which uses host memory
*/
template <typename RowType,
typename ColType,
typename NZType,
template <typename T> typename ContainerPolicy = host_vector_policy>
using host_coordinate_structure =
coordinate_structure<RowType, ColType, NZType, false, ContainerPolicy>;
/**
* Specialization for a coo matrix view which uses host memory
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
using host_coo_matrix_view = coo_matrix_view<ElementType, RowType, ColType, NZType, false>;
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
template <typename T> typename ContainerPolicy = host_vector_policy,
SparsityType sparsity_type = SparsityType::OWNING>
using host_coo_matrix =
coo_matrix<ElementType, RowType, ColType, NZType, false, ContainerPolicy, sparsity_type>;
/**
* Specialization for a sparsity-owning coo matrix which uses host memory
*/
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
template <typename T> typename ContainerPolicy = host_vector_policy>
using host_sparsity_owning_coo_matrix =
coo_matrix<ElementType, RowType, ColType, NZType, false, ContainerPolicy>;
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
template <typename T> typename ContainerPolicy = host_vector_policy>
using host_sparsity_preserving_coo_matrix = coo_matrix<ElementType,
RowType,
ColType,
NZType,
false,
ContainerPolicy,
SparsityType::PRESERVING>;
template <typename T>
struct is_host_coo_matrix_view : std::false_type {};
template <typename ElementType, typename RowType, typename ColType, typename NZType>
struct is_host_coo_matrix_view<host_coo_matrix_view<ElementType, RowType, ColType, NZType>>
: std::true_type {};
template <typename T>
constexpr bool is_host_coo_matrix_view_v = is_host_coo_matrix_view<T>::value;
template <typename T>
struct is_host_coo_matrix : std::false_type {};
template <typename ElementType,
typename RowType,
typename ColType,
typename NZType,
template <typename T>
typename ContainerPolicy,
SparsityType sparsity_type>
struct is_host_coo_matrix<
host_coo_matrix<ElementType, RowType, ColType, NZType, ContainerPolicy, sparsity_type>>
: std::true_type {};
template <typename T>
constexpr bool is_host_coo_matrix_v = is_host_coo_matrix<T>::value;
template <typename T>
constexpr bool is_host_coo_sparsity_owning_v =
is_host_coo_matrix<T>::value and T::get_sparsity_type() == OWNING;
template <typename T>
constexpr bool is_host_coo_sparsity_preserving_v =
is_host_coo_matrix<T>::value and T::get_sparsity_type() == PRESERVING;
/**
* Create a sparsity-owning sparse matrix in the coordinate format. sparsity-owning means that
* all of the underlying vectors (data, indptr, indices) are owned by the coo_matrix instance. If
* not known up front, the sparsity can be ignored in this factory function and `resize()` invoked
* on the instance once the sparsity is known.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
*
* raft::resources handle;
* coo_matrix = raft::make_host_coo_matrix(handle, n_rows, n_cols);
* ...
* // compute expected sparsity
* ...
* int nnz = 5000;
* coo_matrix.initialize_sparsity(nnz);
* @endcode
*
* @tparam ElementType
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] handle raft handle for managing expensive resources
* @param[in] n_rows total number of rows in the matrix
* @param[in] n_cols total number of columns in the matrix
* @param[in] nnz number of non-zeros in the matrix if known [optional]
* @return a sparsity-owning sparse matrix in coordinate (coo) format
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
auto make_host_coo_matrix(raft::resources const& handle,
RowType n_rows,
ColType n_cols,
NZType nnz = 0)
{
return host_sparsity_owning_coo_matrix<ElementType, RowType, ColType, NZType>(
handle, n_rows, n_cols, nnz);
}
/**
* Create a sparsity-preserving sparse matrix in the coordinate format. sparsity-preserving means
* that a view of the coo sparsity is supplied, allowing the values in the sparsity to change but
* not the sparsity itself. The coo_matrix instance does not own the sparsity, the sparsity must
* be known up front, and cannot be resized later.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
*
* raft::resources handle;
* coo_structure = raft::make_host_coordinate_structure(handle, n_rows, n_cols);
* ...
* // compute expected sparsity
* ...
* coo_structure.initialize_sparsity(nnz);
* coo_matrix = raft::make_host_coo_matrix(handle, coo_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] handle raft handle for managing expensive resources
* @param[in] structure a sparsity-preserving coordinate structural view
* @return a sparsity-preserving sparse matrix in coordinate (coo) format
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
auto make_host_coo_matrix(raft::resources const& handle,
host_coordinate_structure_view<RowType, ColType, NZType> structure)
{
return host_sparsity_preserving_coo_matrix<ElementType, RowType, ColType, NZType>(handle,
structure);
}
/**
* Create a non-owning sparse matrix view in the coordinate format. This is sparsity-preserving,
* meaning that the underlying sparsity is known and cannot be changed. Use the sparsity-owning
* coo_matrix if sparsity needs to be mutable.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following pointer is assumed to reference host-accessible memory for a size of nnz
* float* h_elm_ptr = ...;
*
* raft::resources handle;
* coo_structure = raft::make_host_coordinate_structure(handle, n_rows, n_cols, nnz);
* coo_matrix_view = raft::make_host_coo_matrix_view(handle, h_elm_ptr, coo_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] ptr a pointer to array of nonzero matrix elements on host (size nnz)
* @param[in] structure a sparsity-preserving coordinate structural view
* @return a sparsity-preserving sparse matrix in coordinate (coo) format
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
auto make_host_coo_matrix_view(ElementType* ptr,
host_coordinate_structure_view<RowType, ColType, NZType> structure)
{
return host_coo_matrix_view<ElementType, RowType, ColType, NZType>(
raft::host_span<ElementType>(ptr, structure.get_nnz()), structure);
}
/**
* Create a non-owning sparse matrix view in the coordinate format. This is sparsity-preserving,
* meaning that the underlying sparsity is known and cannot be changed. Use the sparsity-owning
* coo_matrix if sparsity needs to be mutable.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_span.hpp>
* #include <raft/core/host_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following span is assumed to be of size nnz
* raft::host_span<float> h_elm_ptr;
*
* raft::resources handle;
* coo_structure = raft::make_host_coordinate_structure(handle, n_rows, n_cols, nnz);
* coo_matrix_view = raft::make_host_coo_matrix_view(handle, h_elm_ptr, coo_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] elements a host span containing nonzero matrix elements (size nnz)
* @param[in] structure a sparsity-preserving coordinate structural view
* @return
*/
template <typename ElementType, typename RowType, typename ColType, typename NZType>
auto make_host_coo_matrix_view(raft::host_span<ElementType> elements,
host_coordinate_structure_view<RowType, ColType, NZType> structure)
{
RAFT_EXPECTS(elements.size() == structure.get_nnz(),
"Size of elements must be equal to the nnz from the structure");
return host_coo_matrix_view<ElementType, RowType, ColType, NZType>(elements, structure);
}
/**
* Create a sparsity-owning coordinate structure object. If not known up front, this object can be
* resized() once the sparsity (number of non-zeros) is known, postponing the allocation of the
* underlying data arrays.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* raft::resources handle;
* coo_structure = raft::make_host_coordinate_structure(handle, n_rows, n_cols, nnz);
* * ...
* // compute expected sparsity
* ...
* coo_structure.initialize_sparsity(nnz);
* @endcode
*
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] handle raft handle for managing expensive resources on host
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of cols
* @param[in] nnz number of non-zeros
* @return a sparsity-owning coordinate structure instance
*/
template <typename RowType, typename ColType, typename NZType>
auto make_host_coordinate_structure(raft::resources const& handle,
RowType n_rows,
ColType n_cols,
NZType nnz = 0)
{
return host_coordinate_structure<RowType, ColType, NZType>(handle, n_rows, n_cols, nnz);
}
/**
* Create a non-owning sparsity-preserved coordinate structure view. Sparsity-preserving means that
* the underlying sparsity is known and cannot be changed. Use the sparsity-owning version if the
* sparsity is not known up front.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following pointers are assumed to reference host-accessible memory of size nnz
* int *rows = ...;
* int *cols = ...;
*
* raft::resources handle;
* coo_structure = raft::make_host_coordinate_structure_view(handle, rows, cols, n_rows, n_cols,
* nnz);
* @endcode
*
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] rows pointer to row indices array on host (size nnz)
* @param[in] cols pointer to column indices array on host (size nnz)
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of columns
* @param[in] nnz number of non-zeros
* @return a sparsity-preserving coordinate structural view
*/
template <typename RowType, typename ColType, typename NZType>
auto make_host_coordinate_structure_view(
RowType* rows, ColType* cols, RowType n_rows, ColType n_cols, NZType nnz)
{
return host_coordinate_structure_view<RowType, ColType, NZType>(
raft::host_span<RowType>(rows, nnz), raft::host_span<ColType>(cols, nnz), n_rows, n_cols);
}
/**
* Create a non-owning sparsity-preserved coordinate structure view. Sparsity-preserving means that
* the underlying sparsity is known and cannot be changed. Use the sparsity-owning version if the
* sparsity is not known up front.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/host_coo_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following host spans are assumed to be of size nnz
* raft::host_span<int> rows;
* raft::host_span<int> cols;
*
* raft::resources handle;
* coo_structure = raft::make_host_coordinate_structure_view(handle, rows, cols, n_rows, n_cols);
* @endcode
*
* @tparam RowType
* @tparam ColType
* @tparam NZType
* @param[in] rows a host span containing row indices (size nnz)
* @param[in] cols a host span containing column indices (size nnz)
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of columns
* @return a sparsity-preserving coordinate structural view
*/
template <typename RowType, typename ColType, typename NZType>
auto make_host_coordinate_structure_view(raft::host_span<RowType> rows,
raft::host_span<ColType> cols,
RowType n_rows,
ColType n_cols)
{
return host_coordinate_structure_view<RowType, ColType, NZType>(rows, cols, n_rows, n_cols);
}
/** @} */
}; // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_mdspan.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <raft/core/host_device_accessor.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/core/memory_type.hpp>
namespace raft {
template <typename AccessorPolicy>
using device_accessor = host_device_accessor<AccessorPolicy, memory_type::device>;
template <typename AccessorPolicy>
using managed_accessor = host_device_accessor<AccessorPolicy, memory_type::managed>;
/**
* @brief std::experimental::mdspan with device tag to avoid accessing incorrect memory location.
*/
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
using device_mdspan = mdspan<ElementType, Extents, LayoutPolicy, device_accessor<AccessorPolicy>>;
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
using managed_mdspan = mdspan<ElementType, Extents, LayoutPolicy, managed_accessor<AccessorPolicy>>;
template <typename T, bool B>
struct is_device_mdspan : std::false_type {};
template <typename T>
struct is_device_mdspan<T, true> : std::bool_constant<T::accessor_type::is_device_accessible> {};
/**
* @\brief Boolean to determine if template type T is either raft::device_mdspan or a derived type
*/
template <typename T>
using is_device_mdspan_t = is_device_mdspan<T, is_mdspan_v<T>>;
template <typename T>
using is_input_device_mdspan_t = is_device_mdspan<T, is_input_mdspan_v<T>>;
template <typename T>
using is_output_device_mdspan_t = is_device_mdspan<T, is_output_mdspan_v<T>>;
template <typename T, bool B>
struct is_managed_mdspan : std::false_type {};
template <typename T>
struct is_managed_mdspan<T, true> : std::bool_constant<T::accessor_type::is_managed_accessible> {};
/**
* @\brief Boolean to determine if template type T is either raft::managed_mdspan or a derived type
*/
template <typename T>
using is_managed_mdspan_t = is_managed_mdspan<T, is_mdspan_v<T>>;
template <typename T>
using is_input_managed_mdspan_t = is_managed_mdspan<T, is_input_mdspan_v<T>>;
template <typename T>
using is_output_managed_mdspan_t = is_managed_mdspan<T, is_output_mdspan_v<T>>;
/**
* @\brief Boolean to determine if variadic template types Tn are either raft::device_mdspan or a
* derived type
*/
template <typename... Tn>
inline constexpr bool is_device_mdspan_v = std::conjunction_v<is_device_mdspan_t<Tn>...>;
template <typename... Tn>
inline constexpr bool is_input_device_mdspan_v =
std::conjunction_v<is_input_device_mdspan_t<Tn>...>;
template <typename... Tn>
inline constexpr bool is_output_device_mdspan_v =
std::conjunction_v<is_output_device_mdspan_t<Tn>...>;
template <typename... Tn>
using enable_if_device_mdspan = std::enable_if_t<is_device_mdspan_v<Tn...>>;
template <typename... Tn>
using enable_if_input_device_mdspan = std::enable_if_t<is_input_device_mdspan_v<Tn...>>;
template <typename... Tn>
using enable_if_output_device_mdspan = std::enable_if_t<is_output_device_mdspan_v<Tn...>>;
/**
* @\brief Boolean to determine if variadic template types Tn are either raft::managed_mdspan or a
* derived type
*/
template <typename... Tn>
inline constexpr bool is_managed_mdspan_v = std::conjunction_v<is_managed_mdspan_t<Tn>...>;
template <typename... Tn>
inline constexpr bool is_input_managed_mdspan_v =
std::conjunction_v<is_input_managed_mdspan_t<Tn>...>;
template <typename... Tn>
inline constexpr bool is_output_managed_mdspan_v =
std::conjunction_v<is_output_managed_mdspan_t<Tn>...>;
template <typename... Tn>
using enable_if_managed_mdspan = std::enable_if_t<is_managed_mdspan_v<Tn...>>;
template <typename... Tn>
using enable_if_input_managed_mdspan = std::enable_if_t<is_input_managed_mdspan_v<Tn...>>;
template <typename... Tn>
using enable_if_output_managed_mdspan = std::enable_if_t<is_output_managed_mdspan_v<Tn...>>;
/**
* @brief Shorthand for 0-dim host mdspan (scalar).
* @tparam ElementType the data type of the scalar element
* @tparam IndexType the index type of the extents
*/
template <typename ElementType, typename IndexType = std::uint32_t>
using device_scalar_view = device_mdspan<ElementType, scalar_extent<IndexType>>;
/**
* @brief Shorthand for 1-dim device mdspan.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
using device_vector_view = device_mdspan<ElementType, vector_extent<IndexType>, LayoutPolicy>;
/**
* @brief Shorthand for c-contiguous device matrix view.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
using device_matrix_view = device_mdspan<ElementType, matrix_extent<IndexType>, LayoutPolicy>;
/**
* @brief Shorthand for 128 byte aligned device matrix view.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy must be of type layout_{left/right}_padded
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_right_padded<ElementType>,
typename = enable_if_layout_padded<ElementType, LayoutPolicy>>
using device_aligned_matrix_view =
device_mdspan<ElementType,
matrix_extent<IndexType>,
LayoutPolicy,
std::experimental::aligned_accessor<ElementType, detail::alignment::value>>;
/**
* @brief Create a 2-dim 128 byte aligned mdspan instance for device pointer. It's
* expected that the given layout policy match the layout of the underlying
* pointer.
* @tparam ElementType the data type of the matrix elements
* @tparam LayoutPolicy must be of type layout_{left/right}_padded
* @tparam IndexType the index type of the extents
* @param[in] ptr on device to wrap
* @param[in] n_rows number of rows in pointer
* @param[in] n_cols number of columns in pointer
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_right_padded<ElementType>>
auto make_device_aligned_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
{
using data_handle_type =
typename std::experimental::aligned_accessor<ElementType,
detail::alignment::value>::data_handle_type;
static_assert(std::is_same<LayoutPolicy, layout_left_padded<ElementType>>::value ||
std::is_same<LayoutPolicy, layout_right_padded<ElementType>>::value);
assert(reinterpret_cast<std::uintptr_t>(ptr) ==
std::experimental::details::alignTo(reinterpret_cast<std::uintptr_t>(ptr),
detail::alignment::value));
data_handle_type aligned_pointer = ptr;
matrix_extent<IndexType> extents{n_rows, n_cols};
return device_aligned_matrix_view<ElementType, IndexType, LayoutPolicy>{aligned_pointer, extents};
}
/**
* @brief Create a raft::managed_mdspan
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param ptr Pointer to the data
* @param exts dimensionality of the array (series of integers)
* @return raft::managed_mdspan
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
size_t... Extents>
auto make_managed_mdspan(ElementType* ptr, extents<IndexType, Extents...> exts)
{
return make_mdspan<ElementType, IndexType, LayoutPolicy, true, true>(ptr, exts);
}
/**
* @brief Create a 0-dim (scalar) mdspan instance for device value.
*
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @param[in] ptr on device to wrap
*/
template <typename ElementType, typename IndexType = std::uint32_t>
auto make_device_scalar_view(ElementType* ptr)
{
scalar_extent<IndexType> extents;
return device_scalar_view<ElementType, IndexType>{ptr, extents};
}
/**
* @brief Create a 2-dim c-contiguous mdspan instance for device pointer. It's
* expected that the given layout policy match the layout of the underlying
* pointer.
* @tparam ElementType the data type of the matrix elements
* @tparam LayoutPolicy policy for strides and layout ordering
* @tparam IndexType the index type of the extents
* @param[in] ptr on device to wrap
* @param[in] n_rows number of rows in pointer
* @param[in] n_cols number of columns in pointer
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_device_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
{
matrix_extent<IndexType> extents{n_rows, n_cols};
return device_matrix_view<ElementType, IndexType, LayoutPolicy>{ptr, extents};
}
/**
* @brief Create a 2-dim mdspan instance for device pointer with a strided layout
* that is restricted to stride 1 in the trailing dimension. It's
* expected that the given layout policy match the layout of the underlying
* pointer.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] ptr on device to wrap
* @param[in] n_rows number of rows in pointer
* @param[in] n_cols number of columns in pointer
* @param[in] stride leading dimension / stride of data
*/
template <typename ElementType, typename IndexType, typename LayoutPolicy = layout_c_contiguous>
auto make_device_strided_matrix_view(ElementType* ptr,
IndexType n_rows,
IndexType n_cols,
IndexType stride)
{
constexpr auto is_row_major = std::is_same_v<LayoutPolicy, layout_c_contiguous>;
IndexType stride0 = is_row_major ? (stride > 0 ? stride : n_cols) : 1;
IndexType stride1 = is_row_major ? 1 : (stride > 0 ? stride : n_rows);
assert(is_row_major ? stride0 >= n_cols : stride1 >= n_rows);
matrix_extent<IndexType> extents{n_rows, n_cols};
auto layout = make_strided_layout(extents, std::array<IndexType, 2>{stride0, stride1});
return device_matrix_view<ElementType, IndexType, layout_stride>{ptr, layout};
}
/**
* @brief Create a 1-dim mdspan instance for device pointer.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] ptr on device to wrap
* @param[in] n number of elements in pointer
* @return raft::device_vector_view
*/
template <typename ElementType, typename IndexType, typename LayoutPolicy = layout_c_contiguous>
auto make_device_vector_view(ElementType* ptr, IndexType n)
{
return device_vector_view<ElementType, IndexType, LayoutPolicy>{ptr, n};
}
/**
* @brief Create a 1-dim mdspan instance for device pointer.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] ptr on device to wrap
* @param[in] mapping The layout mapping to use for this vector
* @return raft::device_vector_view
*/
template <typename ElementType, typename IndexType, typename LayoutPolicy = layout_c_contiguous>
auto make_device_vector_view(
ElementType* ptr,
const typename LayoutPolicy::template mapping<vector_extent<IndexType>>& mapping)
{
return device_vector_view<ElementType, IndexType, LayoutPolicy>{ptr, mapping};
}
/**
* @brief Construct a strided vector layout mapping
*
* Usage example:
* @code{.cpp}
* #include <raft/core/device_mdspan.hpp>
*
* int n_elements = 10;
* int stride = 10;
* auto vector = raft::make_device_vector_view(vector_ptr,
* raft::make_vector_strided_layout(n_elements, stride));
* @endcode
*
* @tparam IndexType the index type of the extents
* @param[in] n the number of elements in the vector
* @param[in] stride the stride between elements in the vector
*/
template <typename IndexType>
auto make_vector_strided_layout(IndexType n, IndexType stride)
{
return make_strided_layout(vector_extent<IndexType>{n}, std::array<IndexType, 1>{stride});
}
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/cusparse_macros.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse.h>
#include <raft/core/error.hpp>
///@todo: enable this once logging is enabled
// #include <cuml/common/logger.hpp>
#define _CUSPARSE_ERR_TO_STR(err) \
case err: return #err;
// Notes:
//(1.) CUDA_VER_10_1_UP aggregates all the CUDA version selection logic;
//(2.) to enforce a lower version,
//
//`#define CUDA_ENFORCE_LOWER
// #include <raft/sparse/detail/cusparse_wrappers.h>`
//
// (i.e., before including this header)
//
#define CUDA_VER_10_1_UP (CUDART_VERSION >= 10100)
namespace raft {
/**
* @ingroup error_handling
* @{
*/
/**
* @brief Exception thrown when a cuSparse error is encountered.
*/
struct cusparse_error : public raft::exception {
explicit cusparse_error(char const* const message) : raft::exception(message) {}
explicit cusparse_error(std::string const& message) : raft::exception(message) {}
};
/**
* @}
*/
namespace sparse {
namespace detail {
inline const char* cusparse_error_to_string(cusparseStatus_t err)
{
#if defined(CUDART_VERSION) && CUDART_VERSION >= 10100
return cusparseGetErrorString(err);
#else // CUDART_VERSION
switch (err) {
_CUSPARSE_ERR_TO_STR(CUSPARSE_STATUS_SUCCESS);
_CUSPARSE_ERR_TO_STR(CUSPARSE_STATUS_NOT_INITIALIZED);
_CUSPARSE_ERR_TO_STR(CUSPARSE_STATUS_ALLOC_FAILED);
_CUSPARSE_ERR_TO_STR(CUSPARSE_STATUS_INVALID_VALUE);
_CUSPARSE_ERR_TO_STR(CUSPARSE_STATUS_ARCH_MISMATCH);
_CUSPARSE_ERR_TO_STR(CUSPARSE_STATUS_EXECUTION_FAILED);
_CUSPARSE_ERR_TO_STR(CUSPARSE_STATUS_INTERNAL_ERROR);
_CUSPARSE_ERR_TO_STR(CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
default: return "CUSPARSE_STATUS_UNKNOWN";
};
#endif // CUDART_VERSION
}
} // namespace detail
} // namespace sparse
} // namespace raft
#undef _CUSPARSE_ERR_TO_STR
/**
* @ingroup assertion
* @{
*/
/**
* @brief Error checking macro for cuSparse runtime API functions.
*
* Invokes a cuSparse runtime API function call, if the call does not return
* CUSPARSE_STATUS_SUCCESS, throws an exception detailing the cuSparse error that occurred
*/
#define RAFT_CUSPARSE_TRY(call) \
do { \
cusparseStatus_t const status = (call); \
if (CUSPARSE_STATUS_SUCCESS != status) { \
std::string msg{}; \
SET_ERROR_MSG(msg, \
"cuSparse error encountered at: ", \
"call='%s', Reason=%d:%s", \
#call, \
status, \
raft::sparse::detail::cusparse_error_to_string(status)); \
throw raft::cusparse_error(msg); \
} \
} while (0)
/**
* @}
*/
// FIXME: Remove after consumer rename
#ifndef CUSPARSE_TRY
#define CUSPARSE_TRY(call) RAFT_CUSPARSE_TRY(call)
#endif
// FIXME: Remove after consumer rename
#ifndef CUSPARSE_CHECK
#define CUSPARSE_CHECK(call) CUSPARSE_TRY(call)
#endif
/**
* @ingroup assertion
* @{
*/
//@todo: use logger here once logging is enabled
/** check for cusparse runtime API errors but do not assert */
#define RAFT_CUSPARSE_TRY_NO_THROW(call) \
do { \
cusparseStatus_t err = call; \
if (err != CUSPARSE_STATUS_SUCCESS) { \
printf("CUSPARSE call='%s' got errorcode=%d err=%s", \
#call, \
err, \
raft::sparse::detail::cusparse_error_to_string(err)); \
} \
} while (0)
/**
* @}
*/
// FIXME: Remove after consumer rename
#ifndef CUSPARSE_CHECK_NO_THROW
#define CUSPARSE_CHECK_NO_THROW(call) RAFT_CUSPARSE_TRY_NO_THROW(call)
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/host_span.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/span.hpp>
namespace raft {
/**
* @defgroup host_span one-dimensional device span type
* @{
*/
/**
* @brief A span class for host pointer.
*/
template <typename T, size_t extent = std::experimental::dynamic_extent>
using host_span = span<T, false, extent>;
/**
* @}
*/
} // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_setter.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime_api.h>
#include <raft/core/logger.hpp>
#include <raft/util/cuda_rt_essentials.hpp>
namespace raft {
/**
* @brief A scoped setter for the active CUDA device
*
* On construction, the device_setter will set the active CUDA device to the
* indicated value. On deletion, the active CUDA device will be set back to
* its previous value. If the call to set the new active device fails, an
* exception will be thrown. If the call to set the device back to its
* previously selected value throws, an error will be logged, but no
* exception will be thrown.
*
* @param int device_id The ID of the CUDA device to make active
*
*/
struct device_setter {
/**
* Return the id of the current device as an integer
*/
static auto get_current_device()
{
auto result = int{};
RAFT_CUDA_TRY(cudaGetDevice(&result));
return result;
}
/**
* Return the count of currently available CUDA devices
*/
static auto get_device_count()
{
auto result = int{};
RAFT_CUDA_TRY(cudaGetDeviceCount(&result));
return result;
}
explicit device_setter(int new_device) : prev_device_{get_current_device()}
{
RAFT_CUDA_TRY(cudaSetDevice(new_device));
}
~device_setter() { RAFT_CUDA_TRY_NO_THROW(cudaSetDevice(prev_device_)); }
private:
int prev_device_;
};
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/comms.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
#include <memory>
#include <raft/core/error.hpp>
#include <vector>
namespace raft {
namespace comms {
/**
* @defgroup comms_types Common mnmg comms types
* @{
*/
typedef unsigned int request_t;
enum class datatype_t { CHAR, UINT8, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT64 };
enum class op_t { SUM, PROD, MIN, MAX };
/**
* The resulting status of distributed stream synchronization
*/
enum class status_t {
SUCCESS, // Synchronization successful
ERROR, // An error occurred querying sync status
ABORT // A failure occurred in sync, queued operations aborted
};
template <typename value_t>
constexpr datatype_t
get_type();
template <>
constexpr datatype_t
get_type<char>()
{
return datatype_t::CHAR;
}
template <>
constexpr datatype_t
get_type<uint8_t>()
{
return datatype_t::UINT8;
}
template <>
constexpr datatype_t
get_type<int>()
{
return datatype_t::INT32;
}
template <>
constexpr datatype_t
get_type<uint32_t>()
{
return datatype_t::UINT32;
}
template <>
constexpr datatype_t
get_type<int64_t>()
{
return datatype_t::INT64;
}
template <>
constexpr datatype_t
get_type<uint64_t>()
{
return datatype_t::UINT64;
}
template <>
constexpr datatype_t
get_type<float>()
{
return datatype_t::FLOAT32;
}
template <>
constexpr datatype_t
get_type<double>()
{
return datatype_t::FLOAT64;
}
/**
* @}
*/
/**
* @defgroup comms_iface MNMG Communicator Interface
* @{
*/
class comms_iface {
public:
virtual ~comms_iface() {}
virtual int get_size() const = 0;
virtual int get_rank() const = 0;
virtual std::unique_ptr<comms_iface> comm_split(int color, int key) const = 0;
virtual void barrier() const = 0;
virtual status_t sync_stream(cudaStream_t stream) const = 0;
virtual void isend(const void* buf, size_t size, int dest, int tag, request_t* request) const = 0;
virtual void irecv(void* buf, size_t size, int source, int tag, request_t* request) const = 0;
virtual void waitall(int count, request_t array_of_requests[]) const = 0;
virtual void allreduce(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
op_t op,
cudaStream_t stream) const = 0;
virtual void bcast(
void* buff, size_t count, datatype_t datatype, int root, cudaStream_t stream) const = 0;
virtual void bcast(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
int root,
cudaStream_t stream) const = 0;
virtual void reduce(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
op_t op,
int root,
cudaStream_t stream) const = 0;
virtual void allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
datatype_t datatype,
cudaStream_t stream) const = 0;
virtual void allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
datatype_t datatype,
cudaStream_t stream) const = 0;
virtual void gather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
datatype_t datatype,
int root,
cudaStream_t stream) const = 0;
virtual void gatherv(const void* sendbuf,
void* recvbuf,
size_t sendcount,
const size_t* recvcounts,
const size_t* displs,
datatype_t datatype,
int root,
cudaStream_t stream) const = 0;
virtual void reducescatter(const void* sendbuff,
void* recvbuff,
size_t recvcount,
datatype_t datatype,
op_t op,
cudaStream_t stream) const = 0;
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
virtual void device_send(const void* buf, size_t size, int dest, cudaStream_t stream) const = 0;
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
virtual void device_recv(void* buf, size_t size, int source, cudaStream_t stream) const = 0;
virtual void device_sendrecv(const void* sendbuf,
size_t sendsize,
int dest,
void* recvbuf,
size_t recvsize,
int source,
cudaStream_t stream) const = 0;
virtual void device_multicast_sendrecv(const void* sendbuf,
std::vector<size_t> const& sendsizes,
std::vector<size_t> const& sendoffsets,
std::vector<int> const& dests,
void* recvbuf,
std::vector<size_t> const& recvsizes,
std::vector<size_t> const& recvoffsets,
std::vector<int> const& sources,
cudaStream_t stream) const = 0;
virtual void group_start() const = 0;
virtual void group_end() const = 0;
};
/**
* @}
*/
/**
* @defgroup comms_t Base Communicator Proxy
* @{
*/
class comms_t {
public:
comms_t(std::unique_ptr<comms_iface> impl) : impl_(impl.release())
{
ASSERT(nullptr != impl_.get(), "ERROR: Invalid comms_iface used!");
}
/**
* Virtual Destructor to enable polymorphism
*/
virtual ~comms_t() {}
/**
* Returns the size of the communicator clique
*/
int get_size() const { return impl_->get_size(); }
/**
* Returns the local rank
*/
int get_rank() const { return impl_->get_rank(); }
/**
* Splits the current communicator clique into sub-cliques matching
* the given color and key
*
* @param color ranks w/ the same color are placed in the same communicator
* @param key controls rank assignment
*/
std::unique_ptr<comms_iface> comm_split(int color, int key) const
{
return impl_->comm_split(color, key);
}
/**
* Performs a collective barrier synchronization
*/
void barrier() const { impl_->barrier(); }
/**
* Some collective communications implementations (eg. NCCL) might use asynchronous
* collectives that are explicitly synchronized. It's important to always synchronize
* using this method to allow failures to propagate, rather than `cudaStreamSynchronize()`,
* to prevent the potential for deadlocks.
*
* @param stream the cuda stream to sync collective operations on
*/
status_t sync_stream(cudaStream_t stream) const { return impl_->sync_stream(stream); }
/**
* Performs an asynchronous point-to-point send
* @tparam value_t the type of data to send
* @param buf pointer to array of data to send
* @param size number of elements in buf
* @param dest destination rank
* @param tag a tag to use for the receiver to filter
* @param request pointer to hold returned request_t object.
* This will be used in `waitall()` to synchronize until the message is delivered (or fails).
*/
template <typename value_t>
void isend(const value_t* buf, size_t size, int dest, int tag, request_t* request) const
{
impl_->isend(static_cast<const void*>(buf), size * sizeof(value_t), dest, tag, request);
}
/**
* Performs an asynchronous point-to-point receive
* @tparam value_t the type of data to be received
* @param buf pointer to (initialized) array that will hold received data
* @param size number of elements in buf
* @param source source rank
* @param tag a tag to use for message filtering
* @param request pointer to hold returned request_t object.
* This will be used in `waitall()` to synchronize until the message is delivered (or fails).
*/
template <typename value_t>
void irecv(value_t* buf, size_t size, int source, int tag, request_t* request) const
{
impl_->irecv(static_cast<void*>(buf), size * sizeof(value_t), source, tag, request);
}
/**
* Synchronize on an array of request_t objects returned from isend/irecv
* @param count number of requests to synchronize on
* @param array_of_requests an array of request_t objects returned from isend/irecv
*/
void waitall(int count, request_t array_of_requests[]) const
{
impl_->waitall(count, array_of_requests);
}
/**
* Perform an allreduce collective
* @tparam value_t datatype of underlying buffers
* @param sendbuff data to reduce
* @param recvbuff buffer to hold the reduced result
* @param count number of elements in sendbuff
* @param op reduction operation to perform
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void allreduce(
const value_t* sendbuff, value_t* recvbuff, size_t count, op_t op, cudaStream_t stream) const
{
impl_->allreduce(static_cast<const void*>(sendbuff),
static_cast<void*>(recvbuff),
count,
get_type<value_t>(),
op,
stream);
}
/**
* Broadcast data from one rank to the rest
* @tparam value_t datatype of underlying buffers
* @param buff buffer to send
* @param count number of elements if buff
* @param root the rank initiating the broadcast
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void bcast(value_t* buff, size_t count, int root, cudaStream_t stream) const
{
impl_->bcast(static_cast<void*>(buff), count, get_type<value_t>(), root, stream);
}
/**
* Broadcast data from one rank to the rest
* @tparam value_t datatype of underlying buffers
* @param sendbuff buffer containing data to broadcast (only used in root)
* @param recvbuff buffer to receive broadcasted data
* @param count number of elements if buff
* @param root the rank initiating the broadcast
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void bcast(
const value_t* sendbuff, value_t* recvbuff, size_t count, int root, cudaStream_t stream) const
{
impl_->bcast(static_cast<const void*>(sendbuff),
static_cast<void*>(recvbuff),
count,
get_type<value_t>(),
root,
stream);
}
/**
* Reduce data from many ranks down to a single rank
* @tparam value_t datatype of underlying buffers
* @param sendbuff buffer containing data to reduce
* @param recvbuff buffer containing reduced data (only needs to be initialized on root)
* @param count number of elements in sendbuff
* @param op reduction operation to perform
* @param root rank to store the results
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void reduce(const value_t* sendbuff,
value_t* recvbuff,
size_t count,
op_t op,
int root,
cudaStream_t stream) const
{
impl_->reduce(static_cast<const void*>(sendbuff),
static_cast<void*>(recvbuff),
count,
get_type<value_t>(),
op,
root,
stream);
}
/**
* Gathers data from each rank onto all ranks
* @tparam value_t datatype of underlying buffers
* @param sendbuff buffer containing data to gather
* @param recvbuff buffer containing gathered data from all ranks
* @param sendcount number of elements in send buffer
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void allgather(const value_t* sendbuff,
value_t* recvbuff,
size_t sendcount,
cudaStream_t stream) const
{
impl_->allgather(static_cast<const void*>(sendbuff),
static_cast<void*>(recvbuff),
sendcount,
get_type<value_t>(),
stream);
}
/**
* Gathers data from all ranks and delivers to combined data to all ranks
* @tparam value_t datatype of underlying buffers
* @param sendbuf buffer containing data to send
* @param recvbuf buffer containing data to receive
* @param recvcounts pointer to an array (of length num_ranks size) containing the number of
* elements that are to be received from each rank
* @param displs pointer to an array (of length num_ranks size) to specify the displacement
* (relative to recvbuf) at which to place the incoming data from each rank
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void allgatherv(const value_t* sendbuf,
value_t* recvbuf,
const size_t* recvcounts,
const size_t* displs,
cudaStream_t stream) const
{
impl_->allgatherv(static_cast<const void*>(sendbuf),
static_cast<void*>(recvbuf),
recvcounts,
displs,
get_type<value_t>(),
stream);
}
/**
* Gathers data from each rank onto all ranks
* @tparam value_t datatype of underlying buffers
* @param sendbuff buffer containing data to gather
* @param recvbuff buffer containing gathered data from all ranks
* @param sendcount number of elements in send buffer
* @param root rank to store the results
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void gather(const value_t* sendbuff,
value_t* recvbuff,
size_t sendcount,
int root,
cudaStream_t stream) const
{
impl_->gather(static_cast<const void*>(sendbuff),
static_cast<void*>(recvbuff),
sendcount,
get_type<value_t>(),
root,
stream);
}
/**
* Gathers data from all ranks and delivers to combined data to all ranks
* @tparam value_t datatype of underlying buffers
* @param sendbuf buffer containing data to send
* @param recvbuf buffer containing data to receive
* @param sendcount number of elements in send buffer
* @param recvcounts pointer to an array (of length num_ranks size) containing the number of
* elements that are to be received from each rank
* @param displs pointer to an array (of length num_ranks size) to specify the displacement
* (relative to recvbuf) at which to place the incoming data from each rank
* @param root rank to store the results
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void gatherv(const value_t* sendbuf,
value_t* recvbuf,
size_t sendcount,
const size_t* recvcounts,
const size_t* displs,
int root,
cudaStream_t stream) const
{
impl_->gatherv(static_cast<const void*>(sendbuf),
static_cast<void*>(recvbuf),
sendcount,
recvcounts,
displs,
get_type<value_t>(),
root,
stream);
}
/**
* Reduces data from all ranks then scatters the result across ranks
* @tparam value_t datatype of underlying buffers
* @param sendbuff buffer containing data to send (size recvcount * num_ranks)
* @param recvbuff buffer containing received data
* @param recvcount number of items to receive
* @param op reduction operation to perform
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void reducescatter(const value_t* sendbuff,
value_t* recvbuff,
size_t recvcount,
op_t op,
cudaStream_t stream) const
{
impl_->reducescatter(static_cast<const void*>(sendbuff),
static_cast<void*>(recvbuff),
recvcount,
get_type<value_t>(),
op,
stream);
}
/**
* Performs a point-to-point send
*
* if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock.
*
* @tparam value_t the type of data to send
* @param buf pointer to array of data to send
* @param size number of elements in buf
* @param dest destination rank
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void device_send(const value_t* buf, size_t size, int dest, cudaStream_t stream) const
{
impl_->device_send(static_cast<const void*>(buf), size * sizeof(value_t), dest, stream);
}
/**
* Performs a point-to-point receive
*
* if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock.
*
* @tparam value_t the type of data to be received
* @param buf pointer to (initialized) array that will hold received data
* @param size number of elements in buf
* @param source source rank
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void device_recv(value_t* buf, size_t size, int source, cudaStream_t stream) const
{
impl_->device_recv(static_cast<void*>(buf), size * sizeof(value_t), source, stream);
}
/**
* Performs a point-to-point send/receive
*
* @tparam value_t the type of data to be sent & received
* @param sendbuf pointer to array of data to send
* @param sendsize number of elements in sendbuf
* @param dest destination rank
* @param recvbuf pointer to (initialized) array that will hold received data
* @param recvsize number of elements in recvbuf
* @param source source rank
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void device_sendrecv(const value_t* sendbuf,
size_t sendsize,
int dest,
value_t* recvbuf,
size_t recvsize,
int source,
cudaStream_t stream) const
{
impl_->device_sendrecv(static_cast<const void*>(sendbuf),
sendsize * sizeof(value_t),
dest,
static_cast<void*>(recvbuf),
recvsize * sizeof(value_t),
source,
stream);
}
/**
* Performs a multicast send/receive
*
* @tparam value_t the type of data to be sent & received
* @param sendbuf pointer to array of data to send
* @param sendsizes numbers of elements to send
* @param sendoffsets offsets in a number of elements from sendbuf
* @param dests destination ranks
* @param recvbuf pointer to (initialized) array that will hold received data
* @param recvsizes numbers of elements to recv
* @param recvoffsets offsets in a number of elements from recvbuf
* @param sources source ranks
* @param stream CUDA stream to synchronize operation
*/
template <typename value_t>
void device_multicast_sendrecv(const value_t* sendbuf,
std::vector<size_t> const& sendsizes,
std::vector<size_t> const& sendoffsets,
std::vector<int> const& dests,
value_t* recvbuf,
std::vector<size_t> const& recvsizes,
std::vector<size_t> const& recvoffsets,
std::vector<int> const& sources,
cudaStream_t stream) const
{
auto sendbytesizes = sendsizes;
auto sendbyteoffsets = sendoffsets;
for (size_t i = 0; i < sendsizes.size(); ++i) {
sendbytesizes[i] *= sizeof(value_t);
sendbyteoffsets[i] *= sizeof(value_t);
}
auto recvbytesizes = recvsizes;
auto recvbyteoffsets = recvoffsets;
for (size_t i = 0; i < recvsizes.size(); ++i) {
recvbytesizes[i] *= sizeof(value_t);
recvbyteoffsets[i] *= sizeof(value_t);
}
impl_->device_multicast_sendrecv(static_cast<const void*>(sendbuf),
sendbytesizes,
sendbyteoffsets,
dests,
static_cast<void*>(recvbuf),
recvbytesizes,
recvbyteoffsets,
sources,
stream);
}
/**
* Multiple collectives & device send/receive operations placed between group_start() and
* group_end() are merged into one big operation. Internally, this function is a wrapper for
* ncclGroupStart().
*/
void group_start() const { impl_->group_start(); }
/**
* Multiple collectives & device send/receive operations placed between group_start() and
* group_end() are merged into one big operation. Internally, this function is a wrapper for
* ncclGroupEnd().
*/
void group_end() const { impl_->group_end(); }
private:
std::unique_ptr<comms_iface> impl_;
};
/**
* @}
*/
} // namespace comms
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_mdarray.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <raft/core/device_container_policy.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/mdarray.hpp>
#include <raft/core/resources.hpp>
namespace raft {
/**
* @brief mdarray with device container policy
* @tparam ElementType the data type of the elements
* @tparam Extents defines the shape
* @tparam LayoutPolicy policy for indexing strides and layout ordering
* @tparam ContainerPolicy storage and accessor policy
*/
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename ContainerPolicy = device_uvector_policy<ElementType>>
using device_mdarray =
mdarray<ElementType, Extents, LayoutPolicy, device_accessor<ContainerPolicy>>;
/**
* @brief Shorthand for 0-dim host mdarray (scalar).
* @tparam ElementType the data type of the scalar element
* @tparam IndexType the index type of the extents
*/
template <typename ElementType, typename IndexType = std::uint32_t>
using device_scalar = device_mdarray<ElementType, scalar_extent<IndexType>>;
/**
* @brief Shorthand for 1-dim device mdarray.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
using device_vector = device_mdarray<ElementType, vector_extent<IndexType>, LayoutPolicy>;
/**
* @brief Shorthand for c-contiguous device matrix.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
using device_matrix = device_mdarray<ElementType, matrix_extent<IndexType>, LayoutPolicy>;
/**
* @brief Create a device mdarray.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param handle raft::resources
* @param exts dimensionality of the array (series of integers)
* @return raft::device_mdarray
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
size_t... Extents>
auto make_device_mdarray(raft::resources const& handle, extents<IndexType, Extents...> exts)
{
using mdarray_t = device_mdarray<ElementType, decltype(exts), LayoutPolicy>;
typename mdarray_t::mapping_type layout{exts};
typename mdarray_t::container_policy_type policy{};
return mdarray_t{handle, layout, policy};
}
/**
* @brief Create a device mdarray.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param handle raft::resources
* @param mr rmm memory resource used for allocating the memory for the array
* @param exts dimensionality of the array (series of integers)
* @return raft::device_mdarray
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous,
size_t... Extents>
auto make_device_mdarray(raft::resources const& handle,
rmm::mr::device_memory_resource* mr,
extents<IndexType, Extents...> exts)
{
using mdarray_t = device_mdarray<ElementType, decltype(exts), LayoutPolicy>;
typename mdarray_t::mapping_type layout{exts};
typename mdarray_t::container_policy_type policy{mr};
return mdarray_t{handle, layout, policy};
}
/**
* @brief Create a 2-dim c-contiguous device mdarray.
*
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] handle raft handle for managing expensive resources
* @param[in] n_rows number or rows in matrix
* @param[in] n_cols number of columns in matrix
* @return raft::device_matrix
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_device_matrix(raft::resources const& handle, IndexType n_rows, IndexType n_cols)
{
return make_device_mdarray<ElementType, IndexType, LayoutPolicy>(
handle, make_extents<IndexType>(n_rows, n_cols));
}
/**
* @brief Create a device scalar from v.
*
* @tparam ElementType the data type of the scalar element
* @tparam IndexType the index type of the extents
* @param[in] handle raft handle for managing expensive cuda resources
* @param[in] v scalar to wrap on device
* @return raft::device_scalar
*/
template <typename ElementType, typename IndexType = std::uint32_t>
auto make_device_scalar(raft::resources const& handle, ElementType const& v)
{
scalar_extent<IndexType> extents;
using policy_t = typename device_scalar<ElementType>::container_policy_type;
policy_t policy{};
auto scalar = device_scalar<ElementType>{handle, extents, policy};
scalar(0) = v;
return scalar;
}
/**
* @brief Create a 1-dim device mdarray.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] handle raft handle for managing expensive cuda resources
* @param[in] n number of elements in vector
* @return raft::device_vector
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_device_vector(raft::resources const& handle, IndexType n)
{
return make_device_mdarray<ElementType, IndexType, LayoutPolicy>(handle,
make_extents<IndexType>(n));
}
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/kvp.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/macros.hpp>
#ifdef _RAFT_HAS_CUDA
#include <cub/cub.cuh>
#include <raft/util/cuda_utils.cuh> // raft::shfl_xor
#endif
namespace raft {
/**
* \brief A key identifier paired with a corresponding value
*
*/
template <typename _Key, typename _Value>
struct KeyValuePair {
typedef _Key Key; ///< Key data type
typedef _Value Value; ///< Value data type
Key key; ///< Item key
Value value; ///< Item value
/// Constructor
KeyValuePair() = default;
#ifdef _RAFT_HAS_CUDA
/// Conversion Constructor to allow integration w/ cub
RAFT_INLINE_FUNCTION KeyValuePair(cub::KeyValuePair<_Key, _Value> kvp)
: key(kvp.key), value(kvp.value)
{
}
RAFT_INLINE_FUNCTION operator cub::KeyValuePair<_Key, _Value>()
{
return cub::KeyValuePair(key, value);
}
#endif
/// Constructor
RAFT_INLINE_FUNCTION KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {}
/// Inequality operator
RAFT_INLINE_FUNCTION bool operator!=(const KeyValuePair& b)
{
return (value != b.value) || (key != b.key);
}
RAFT_INLINE_FUNCTION bool operator<(const KeyValuePair<_Key, _Value>& b) const
{
return (key < b.key) || ((key == b.key) && value < b.value);
}
RAFT_INLINE_FUNCTION bool operator>(const KeyValuePair<_Key, _Value>& b) const
{
return (key > b.key) || ((key == b.key) && value > b.value);
}
};
#ifdef _RAFT_HAS_CUDA
template <typename _Key, typename _Value>
RAFT_INLINE_FUNCTION KeyValuePair<_Key, _Value> shfl_xor(const KeyValuePair<_Key, _Value>& input,
int laneMask,
int width = WarpSize,
uint32_t mask = 0xffffffffu)
{
return KeyValuePair<_Key, _Value>(shfl_xor(input.key, laneMask, width, mask),
shfl_xor(input.value, laneMask, width, mask));
}
#endif
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/memory_type.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft {
enum class memory_type { host, device, managed, pinned };
auto constexpr is_device_accessible(memory_type mem_type)
{
return (mem_type == memory_type::device || mem_type == memory_type::managed);
}
auto constexpr is_host_accessible(memory_type mem_type)
{
return (mem_type == memory_type::host || mem_type == memory_type::managed ||
mem_type == memory_type::pinned);
}
auto constexpr is_host_device_accessible(memory_type mem_type)
{
return is_device_accessible(mem_type) && is_host_accessible(mem_type);
}
namespace detail {
template <bool is_host_accessible, bool is_device_accessible>
auto constexpr memory_type_from_access()
{
if constexpr (is_host_accessible && is_device_accessible) {
return memory_type::managed;
} else if constexpr (is_host_accessible) {
return memory_type::host;
} else if constexpr (is_device_accessible) {
return memory_type::device;
}
static_assert(is_host_accessible || is_device_accessible,
"Must be either host or device accessible to return a valid memory type");
}
} // end namespace detail
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/cudart_utils.hpp | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use util/cudart_utils.hpp instead.
*/
#pragma once
#include <raft/util/cudart_utils.hpp>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/cusolver_macros.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __RAFT_RT_CUSOLVER_MACROS_H
#define __RAFT_RT_CUSOLVER_MACROS_H
#pragma once
#include <cusolverDn.h>
#include <cusolverSp.h>
///@todo: enable this once logging is enabled
// #include <cuml/common/logger.hpp>
#include <raft/util/cudart_utils.hpp>
#include <type_traits>
#define _CUSOLVER_ERR_TO_STR(err) \
case err: return #err;
namespace raft {
/**
* @ingroup error_handling
* @{
*/
/**
* @brief Exception thrown when a cuSOLVER error is encountered.
*/
struct cusolver_error : public raft::exception {
explicit cusolver_error(char const* const message) : raft::exception(message) {}
explicit cusolver_error(std::string const& message) : raft::exception(message) {}
};
/**
* @}
*/
namespace linalg {
namespace detail {
inline const char* cusolver_error_to_string(cusolverStatus_t err)
{
switch (err) {
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_SUCCESS);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_NOT_INITIALIZED);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_ALLOC_FAILED);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_INVALID_VALUE);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_ARCH_MISMATCH);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_EXECUTION_FAILED);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_INTERNAL_ERROR);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_ZERO_PIVOT);
_CUSOLVER_ERR_TO_STR(CUSOLVER_STATUS_NOT_SUPPORTED);
default: return "CUSOLVER_STATUS_UNKNOWN";
};
}
} // namespace detail
} // namespace linalg
} // namespace raft
#undef _CUSOLVER_ERR_TO_STR
/**
* @ingroup assertion
* @{
*/
/**
* @brief Error checking macro for cuSOLVER runtime API functions.
*
* Invokes a cuSOLVER runtime API function call, if the call does not return
* CUSolver_STATUS_SUCCESS, throws an exception detailing the cuSOLVER error that occurred
*/
#define RAFT_CUSOLVER_TRY(call) \
do { \
cusolverStatus_t const status = (call); \
if (CUSOLVER_STATUS_SUCCESS != status) { \
std::string msg{}; \
SET_ERROR_MSG(msg, \
"cuSOLVER error encountered at: ", \
"call='%s', Reason=%d:%s", \
#call, \
status, \
raft::linalg::detail::cusolver_error_to_string(status)); \
throw raft::cusolver_error(msg); \
} \
} while (0)
// FIXME: remove after consumer rename
#ifndef CUSOLVER_TRY
#define CUSOLVER_TRY(call) RAFT_CUSOLVER_TRY(call)
#endif
// /**
// * @brief check for cuda runtime API errors but log error instead of raising
// * exception.
// */
#define RAFT_CUSOLVER_TRY_NO_THROW(call) \
do { \
cusolverStatus_t const status = call; \
if (CUSOLVER_STATUS_SUCCESS != status) { \
printf("CUSOLVER call='%s' at file=%s line=%d failed with %s\n", \
#call, \
__FILE__, \
__LINE__, \
raft::linalg::detail::cusolver_error_to_string(status)); \
} \
} while (0)
/**
* @}
*/
// FIXME: remove after cuml rename
#ifndef CUSOLVER_CHECK
#define CUSOLVER_CHECK(call) CUSOLVER_TRY(call)
#endif
#ifndef CUSOLVER_CHECK_NO_THROW
#define CUSOLVER_CHECK_NO_THROW(call) CUSOLVER_TRY_NO_THROW(call)
#endif
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/bitset.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/mdspan_util.cuh> // native_popc
#include <raft/core/device_container_policy.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/map.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/util/device_atomics.cuh>
#include <thrust/for_each.h>
namespace raft::core {
/**
* @defgroup bitset Bitset
* @{
*/
/**
* @brief View of a RAFT Bitset.
*
* This lightweight structure stores a pointer to a bitset in device memory with it's length.
* It provides a test() device function to check if a given index is set in the bitset.
*
* @tparam bitset_t Underlying type of the bitset array. Default is uint32_t.
* @tparam index_t Indexing type used. Default is uint32_t.
*/
template <typename bitset_t = uint32_t, typename index_t = uint32_t>
struct bitset_view {
static constexpr index_t bitset_element_size = sizeof(bitset_t) * 8;
_RAFT_HOST_DEVICE bitset_view(bitset_t* bitset_ptr, index_t bitset_len)
: bitset_ptr_{bitset_ptr}, bitset_len_{bitset_len}
{
}
/**
* @brief Create a bitset view from a device vector view of the bitset.
*
* @param bitset_span Device vector view of the bitset
* @param bitset_len Number of bits in the bitset
*/
_RAFT_HOST_DEVICE bitset_view(raft::device_vector_view<bitset_t, index_t> bitset_span,
index_t bitset_len)
: bitset_ptr_{bitset_span.data_handle()}, bitset_len_{bitset_len}
{
}
/**
* @brief Device function to test if a given index is set in the bitset.
*
* @param sample_index Single index to test
* @return bool True if index has not been unset in the bitset
*/
inline _RAFT_DEVICE auto test(const index_t sample_index) const -> bool
{
const bitset_t bit_element = bitset_ptr_[sample_index / bitset_element_size];
const index_t bit_index = sample_index % bitset_element_size;
const bool is_bit_set = (bit_element & (bitset_t{1} << bit_index)) != 0;
return is_bit_set;
}
/**
* @brief Device function to test if a given index is set in the bitset.
*
* @param sample_index Single index to test
* @return bool True if index has not been unset in the bitset
*/
inline _RAFT_DEVICE auto operator[](const index_t sample_index) const -> bool
{
return test(sample_index);
}
/**
* @brief Device function to set a given index to set_value in the bitset.
*
* @param sample_index index to set
* @param set_value Value to set the bit to (true or false)
*/
inline _RAFT_DEVICE void set(const index_t sample_index, bool set_value) const
{
const index_t bit_element = sample_index / bitset_element_size;
const index_t bit_index = sample_index % bitset_element_size;
const bitset_t bitmask = bitset_t{1} << bit_index;
if (set_value) {
atomicOr(bitset_ptr_ + bit_element, bitmask);
} else {
const bitset_t bitmask2 = ~bitmask;
atomicAnd(bitset_ptr_ + bit_element, bitmask2);
}
}
/**
* @brief Get the device pointer to the bitset.
*/
inline _RAFT_HOST_DEVICE auto data() -> bitset_t* { return bitset_ptr_; }
inline _RAFT_HOST_DEVICE auto data() const -> const bitset_t* { return bitset_ptr_; }
/**
* @brief Get the number of bits of the bitset representation.
*/
inline _RAFT_HOST_DEVICE auto size() const -> index_t { return bitset_len_; }
/**
* @brief Get the number of elements used by the bitset representation.
*/
inline _RAFT_HOST_DEVICE auto n_elements() const -> index_t
{
return raft::ceildiv(bitset_len_, bitset_element_size);
}
inline auto to_mdspan() -> raft::device_vector_view<bitset_t, index_t>
{
return raft::make_device_vector_view<bitset_t, index_t>(bitset_ptr_, n_elements());
}
inline auto to_mdspan() const -> raft::device_vector_view<const bitset_t, index_t>
{
return raft::make_device_vector_view<const bitset_t, index_t>(bitset_ptr_, n_elements());
}
private:
bitset_t* bitset_ptr_;
index_t bitset_len_;
};
/**
* @brief RAFT Bitset.
*
* This structure encapsulates a bitset in device memory. It provides a view() method to get a
* device-usable lightweight view of the bitset.
* Each index is represented by a single bit in the bitset. The total number of bytes used is
* ceil(bitset_len / 8).
* @tparam bitset_t Underlying type of the bitset array. Default is uint32_t.
* @tparam index_t Indexing type used. Default is uint32_t.
*/
template <typename bitset_t = uint32_t, typename index_t = uint32_t>
struct bitset {
static constexpr index_t bitset_element_size = sizeof(bitset_t) * 8;
/**
* @brief Construct a new bitset object with a list of indices to unset.
*
* @param res RAFT resources
* @param mask_index List of indices to unset in the bitset
* @param bitset_len Length of the bitset
* @param default_value Default value to set the bits to. Default is true.
*/
bitset(const raft::resources& res,
raft::device_vector_view<const index_t, index_t> mask_index,
index_t bitset_len,
bool default_value = true)
: bitset_{std::size_t(raft::ceildiv(bitset_len, bitset_element_size)),
raft::resource::get_cuda_stream(res)},
bitset_len_{bitset_len}
{
reset(res, default_value);
set(res, mask_index, !default_value);
}
/**
* @brief Construct a new bitset object
*
* @param res RAFT resources
* @param bitset_len Length of the bitset
* @param default_value Default value to set the bits to. Default is true.
*/
bitset(const raft::resources& res, index_t bitset_len, bool default_value = true)
: bitset_{std::size_t(raft::ceildiv(bitset_len, bitset_element_size)),
resource::get_cuda_stream(res)},
bitset_len_{bitset_len}
{
reset(res, default_value);
}
// Disable copy constructor
bitset(const bitset&) = delete;
bitset(bitset&&) = default;
bitset& operator=(const bitset&) = delete;
bitset& operator=(bitset&&) = default;
/**
* @brief Create a device-usable view of the bitset.
*
* @return bitset_view<bitset_t, index_t>
*/
inline auto view() -> raft::core::bitset_view<bitset_t, index_t>
{
return bitset_view<bitset_t, index_t>(to_mdspan(), bitset_len_);
}
[[nodiscard]] inline auto view() const -> raft::core::bitset_view<const bitset_t, index_t>
{
return bitset_view<const bitset_t, index_t>(to_mdspan(), bitset_len_);
}
/**
* @brief Get the device pointer to the bitset.
*/
inline auto data() -> bitset_t* { return bitset_.data(); }
inline auto data() const -> const bitset_t* { return bitset_.data(); }
/**
* @brief Get the number of bits of the bitset representation.
*/
inline auto size() const -> index_t { return bitset_len_; }
/**
* @brief Get the number of elements used by the bitset representation.
*/
inline auto n_elements() const -> index_t
{
return raft::ceildiv(bitset_len_, bitset_element_size);
}
/** @brief Get an mdspan view of the current bitset */
inline auto to_mdspan() -> raft::device_vector_view<bitset_t, index_t>
{
return raft::make_device_vector_view<bitset_t, index_t>(bitset_.data(), n_elements());
}
[[nodiscard]] inline auto to_mdspan() const -> raft::device_vector_view<const bitset_t, index_t>
{
return raft::make_device_vector_view<const bitset_t, index_t>(bitset_.data(), n_elements());
}
/** @brief Resize the bitset. If the requested size is larger, new memory is allocated and set to
* the default value.
* @param res RAFT resources
* @param new_bitset_len new size of the bitset
* @param default_value default value to initialize the new bits to
*/
void resize(const raft::resources& res, index_t new_bitset_len, bool default_value = true)
{
auto old_size = raft::ceildiv(bitset_len_, bitset_element_size);
auto new_size = raft::ceildiv(new_bitset_len, bitset_element_size);
bitset_.resize(new_size);
bitset_len_ = new_bitset_len;
if (old_size < new_size) {
// If the new size is larger, set the new bits to the default value
thrust::fill_n(resource::get_thrust_policy(res),
bitset_.data() + old_size,
new_size - old_size,
default_value ? ~bitset_t{0} : bitset_t{0});
}
}
/**
* @brief Test a list of indices in a bitset.
*
* @tparam output_t Output type of the test. Default is bool.
* @param res RAFT resources
* @param queries List of indices to test
* @param output List of outputs
*/
template <typename output_t = bool>
void test(const raft::resources& res,
raft::device_vector_view<const index_t, index_t> queries,
raft::device_vector_view<output_t, index_t> output) const
{
RAFT_EXPECTS(output.extent(0) == queries.extent(0), "Output and queries must be same size");
auto bitset_view = view();
raft::linalg::map(
res,
output,
[bitset_view] __device__(index_t query) { return output_t(bitset_view.test(query)); },
queries);
}
/**
* @brief Set a list of indices in a bitset to set_value.
*
* @param res RAFT resources
* @param mask_index indices to remove from the bitset
* @param set_value Value to set the bits to (true or false)
*/
void set(const raft::resources& res,
raft::device_vector_view<const index_t, index_t> mask_index,
bool set_value = false)
{
auto this_bitset_view = view();
thrust::for_each_n(resource::get_thrust_policy(res),
mask_index.data_handle(),
mask_index.extent(0),
[this_bitset_view, set_value] __device__(const index_t sample_index) {
this_bitset_view.set(sample_index, set_value);
});
}
/**
* @brief Flip all the bits in a bitset.
* @param res RAFT resources
*/
void flip(const raft::resources& res)
{
auto bitset_span = this->to_mdspan();
raft::linalg::map(
res,
bitset_span,
[] __device__(bitset_t element) { return bitset_t(~element); },
raft::make_const_mdspan(bitset_span));
}
/**
* @brief Reset the bits in a bitset.
*
* @param res RAFT resources
* @param default_value Value to set the bits to (true or false)
*/
void reset(const raft::resources& res, bool default_value = true)
{
thrust::fill_n(resource::get_thrust_policy(res),
bitset_.data(),
n_elements(),
default_value ? ~bitset_t{0} : bitset_t{0});
}
/**
* @brief Returns the number of bits set to true in count_gpu_scalar.
*
* @param[in] res RAFT resources
* @param[out] count_gpu_scalar Device scalar to store the count
*/
void count(const raft::resources& res, raft::device_scalar_view<index_t> count_gpu_scalar)
{
auto n_elements_ = n_elements();
auto count_gpu =
raft::make_device_vector_view<index_t, index_t>(count_gpu_scalar.data_handle(), 1);
auto bitset_matrix_view = raft::make_device_matrix_view<const bitset_t, index_t, col_major>(
bitset_.data(), n_elements_, 1);
bitset_t n_last_element = (bitset_len_ % bitset_element_size);
bitset_t last_element_mask =
n_last_element ? (bitset_t)((bitset_t{1} << n_last_element) - bitset_t{1}) : ~bitset_t{0};
raft::linalg::coalesced_reduction(
res,
bitset_matrix_view,
count_gpu,
index_t{0},
false,
[last_element_mask, n_elements_] __device__(bitset_t element, index_t index) {
index_t result = 0;
if constexpr (bitset_element_size == 64) {
if (index == n_elements_ - 1)
result = index_t(raft::detail::popc(element & last_element_mask));
else
result = index_t(raft::detail::popc(element));
} else { // Needed because popc is not overloaded for 16 and 8 bit elements
if (index == n_elements_ - 1)
result = index_t(raft::detail::popc(uint32_t{element} & last_element_mask));
else
result = index_t(raft::detail::popc(uint32_t{element}));
}
return result;
});
}
/**
* @brief Returns the number of bits set to true.
*
* @param res RAFT resources
* @return index_t Number of bits set to true
*/
auto count(const raft::resources& res) -> index_t
{
auto count_gpu_scalar = raft::make_device_scalar<index_t>(res, 0.0);
count(res, count_gpu_scalar.view());
index_t count_cpu = 0;
raft::update_host(
&count_cpu, count_gpu_scalar.data_handle(), 1, resource::get_cuda_stream(res));
resource::sync_stream(res);
return count_cpu;
}
/**
* @brief Checks if any of the bits are set to true in the bitset.
* @param res RAFT resources
*/
bool any(const raft::resources& res) { return count(res) > 0; }
/**
* @brief Checks if all of the bits are set to true in the bitset.
* @param res RAFT resources
*/
bool all(const raft::resources& res) { return count(res) == bitset_len_; }
/**
* @brief Checks if none of the bits are set to true in the bitset.
* @param res RAFT resources
*/
bool none(const raft::resources& res) { return count(res) == 0; }
private:
raft::device_uvector<bitset_t> bitset_;
index_t bitset_len_;
};
/** @} */
} // end namespace raft::core
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_resources_manager.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <memory>
#include <optional>
#include <raft/core/device_resources.hpp>
#include <raft/core/device_setter.hpp>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/optional.h>
namespace raft {
/**
* @brief A singleton used to easily generate a raft::device_resources object
*
* Many calls to RAFT functions require a `raft::device_resources` object
* to provide CUDA resources like streams and stream pools. The
* `raft::device_resources_manager` singleton provides a straightforward method to create those
* objects in a way that allows consumers of RAFT to limit total consumption of device resources
* without actively managing streams or other CUDA-specific objects.
*
* To control the resources a consuming application will use, the
* resource manager provides setters for a variety of values. For
* instance, to ensure that no more than `N` CUDA streams are used per
* device, a consumer might call
* `raft::device_resources_manager::set_streams_per_device(N)`. Note that all of these
* setters must be used prior to retrieving the first `device_resources` from
* the manager. Setters invoked after this will log a warning but have no
* effect.
*
* After calling all desired setters, consumers can simply call
* `auto res = raft::device_resources_manager::get_device_resources();` to get a valid
* device_resources object for the current device based on previously-set
* parameters. Importantly, calling `get_device_resources()` again from the same
* thread is guaranteed to return a `device_resources` object with the same
* underlying CUDA stream and (if a non-zero number of stream pools has been
* requested) stream pool.
*
* Typical usage might look something like the following:
* @code
* void initialize_application() {
* raft::device_resources_manager::set_streams_per_device(16);
* }
*
* void foo_called_from_multiple_threads() {
* auto res = raft::device_resources_manager::get_device_resources();
* // Call RAFT function using res
* res.sync_stream() // Ensure work completes before returning
* }
* @endcode
*
* Note that all public methods of the `device_resources_manager` are thread-safe,
* but the manager is designed to minimize locking required for
* retrieving `device_resources` objects. Each thread must acquire a lock
* exactly once per device when calling `get_device_resources`. Subsequent calls
* will still be thread-safe but will not require a lock.
*
* All public methods of the `device_resources_manager` are static. Please see
* documentation of those methods for additional usage information.
*
*/
struct device_resources_manager {
device_resources_manager(device_resources_manager const&) = delete;
void operator=(device_resources_manager const&) = delete;
private:
device_resources_manager() {}
~device_resources_manager()
{
// Ensure that we destroy any pool memory resources before CUDA context is
// lost
per_device_components_.clear();
}
// Get an id used to identify this thread for the purposes of assigning
// (in round-robin fashion) the same resources to the thread on subsequent calls to
// `get_device_resources`
static auto get_thread_id()
{
static std::atomic<std::size_t> thread_counter{};
thread_local std::size_t id = ++thread_counter;
return id;
}
// This struct holds the various parameters used to control
// construction of the underlying resources shared by all
// `device_resources` objects returned by `get_device_resources`
struct resource_params {
// The total number of primary streams to be used by the
// application. If no value is provided, the default stream per thread
// is used.
std::optional<std::size_t> stream_count{std::nullopt};
// The total number of stream pools to be used by the application
std::size_t pool_count{};
// How many streams to assign to each pool
std::size_t pool_size{rmm::cuda_stream_pool::default_size};
// If a memory pool is requested (max_mem_pool_size is non-zero), use
// this initial size for the pool in bytes. Must be a multiple of 256.
// If nullopt, use half of the available memory on the current
// device.
thrust::optional<std::size_t> init_mem_pool_size{thrust::nullopt};
// If set to any non-zero value, create a memory pool with this
// maximum size. If nullopt, use up to the entire available memory of the
// device
thrust::optional<std::size_t> max_mem_pool_size{std::size_t{}};
// Limit on workspace memory for the returned device_resources object
std::optional<std::size_t> workspace_allocation_limit{std::nullopt};
// Optional specification of separate workspace memory resources for each
// device. The integer in each pair indicates the device for this memory
// resource.
std::vector<std::pair<std::shared_ptr<rmm::mr::device_memory_resource>, int>> workspace_mrs{};
auto get_workspace_memory_resource(int device_id) {}
} params_;
// This struct stores the underlying resources to be shared among
// `device_resources` objects returned by this manager.
struct resource_components {
// Construct all underlying resources indicated by `params` for the
// indicated device. This includes primary streams, stream pools, and
// a memory pool if requested.
resource_components(int device_id, resource_params const& params)
: device_id_{device_id},
streams_{[¶ms, this]() {
auto scoped_device = device_setter{device_id_};
auto result = std::unique_ptr<rmm::cuda_stream_pool>{nullptr};
if (params.stream_count) {
result = std::make_unique<rmm::cuda_stream_pool>(*params.stream_count);
}
return result;
}()},
pools_{[¶ms, this]() {
auto scoped_device = device_setter{device_id_};
auto result = std::vector<std::shared_ptr<rmm::cuda_stream_pool>>{};
if (params.pool_size != 0) {
for (auto i = std::size_t{}; i < params.pool_count; ++i) {
result.push_back(std::make_shared<rmm::cuda_stream_pool>(params.pool_size));
}
} else if (params.pool_count != 0) {
RAFT_LOG_WARN("Stream pools of size 0 requested; no pools will be created");
}
return result;
}()},
pool_mr_{[¶ms, this]() {
auto scoped_device = device_setter{device_id_};
auto result =
std::shared_ptr<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>>{nullptr};
// If max_mem_pool_size is nullopt or non-zero, create a pool memory
// resource
if (params.max_mem_pool_size.value_or(1) != 0) {
auto* upstream =
dynamic_cast<rmm::mr::cuda_memory_resource*>(rmm::mr::get_current_device_resource());
if (upstream != nullptr) {
result =
std::make_shared<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>>(
upstream, params.init_mem_pool_size, params.max_mem_pool_size);
rmm::mr::set_current_device_resource(result.get());
} else {
RAFT_LOG_WARN(
"Pool allocation requested, but other memory resource has already been set and "
"will not be overwritten");
}
}
return result;
}()},
workspace_mr_{[¶ms, this]() {
auto result = std::shared_ptr<rmm::mr::device_memory_resource>{nullptr};
auto iter = std::find_if(std::begin(params.workspace_mrs),
std::end(params.workspace_mrs),
[this](auto&& pair) { return pair.second == device_id_; });
if (iter != std::end(params.workspace_mrs)) { result = iter->first; }
return result;
}()}
{
}
// Get the id of the device associated with the constructed resource
// components
[[nodiscard]] auto get_device_id() const { return device_id_; }
// Get the total number of streams available for this application
[[nodiscard]] auto stream_count() const
{
auto result = std::size_t{};
if (streams_) { result = streams_->get_pool_size(); }
return result;
}
// Get the stream assigned to this host thread. Note that the same stream
// may be used by multiple threads, but any given thread will always use
// the same stream
[[nodiscard]] auto get_stream() const
{
auto result = rmm::cuda_stream_per_thread;
if (stream_count() != 0) { result = streams_->get_stream(get_thread_id() % stream_count()); }
return result;
}
// Get the total number of stream pools available for this
// application
[[nodiscard]] auto pool_count() const { return pools_.size(); }
// Get the stream pool assigned to this host thread. Note that the same stream pool
// may be used by multiple threads, but any given thread will always use
// the same stream pool
[[nodiscard]] auto get_pool() const
{
auto result = std::shared_ptr<rmm::cuda_stream_pool>{nullptr};
if (pool_count() != 0) { result = pools_[get_thread_id() % pool_count()]; }
return result;
}
// Return a (possibly null) shared_ptr to the pool memory resource
// created for this device by the manager
[[nodiscard]] auto get_pool_memory_resource() const { return pool_mr_; }
// Return the RAFT workspace allocation limit that will be used by
// `device_resources` returned from this manager
[[nodiscard]] auto get_workspace_allocation_limit() const
{
return workspace_allocation_limit_;
}
// Return a (possibly null) shared_ptr to the memory resource that will
// be used for workspace allocations by `device_resources` returned from
// this manager
[[nodiscard]] auto get_workspace_memory_resource() { return workspace_mr_; }
private:
int device_id_;
std::unique_ptr<rmm::cuda_stream_pool> streams_;
std::vector<std::shared_ptr<rmm::cuda_stream_pool>> pools_;
std::shared_ptr<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>> pool_mr_;
std::shared_ptr<rmm::mr::device_memory_resource> workspace_mr_;
std::optional<std::size_t> workspace_allocation_limit_{std::nullopt};
};
// Mutex used to lock access to shared data until after the first
// `get_device_resources` call in each thread
mutable std::mutex manager_mutex_{};
// Indicates whether or not `get_device_resources` has been called by any
// host thread
bool params_finalized_{};
// Container for underlying device resources to be re-used across host
// threads for each device
std::vector<resource_components> per_device_components_;
// Container for device_resources objects shared among threads. The index
// of the outer vector is the thread id of the thread requesting resources
// modulo the total number of resources managed by this object. The inner
// vector contains all resources associated with that id across devices
// in any order.
std::vector<std::vector<raft::device_resources>> resources_{};
// Return a lock for accessing shared data
[[nodiscard]] auto get_lock() const { return std::unique_lock{manager_mutex_}; }
// Retrieve the underlying resources to be shared across the
// application for the indicated device. This method acquires a lock the
// first time it is called in each thread for a specific device to ensure that the
// underlying resources have been correctly initialized exactly once across
// all host threads.
auto const& get_device_resources_(int device_id)
{
// Each thread maintains an independent list of devices it has
// accessed. If it has not marked a device as initialized, it
// acquires a lock to initialize it exactly once. This means that each
// thread will lock once for a particular device and not proceed until
// some thread has actually generated the corresponding device
// components
thread_local auto initialized_devices = std::vector<int>{};
auto res_iter = decltype(std::end(resources_[0])){};
if (std::find(std::begin(initialized_devices), std::end(initialized_devices), device_id) ==
std::end(initialized_devices)) {
// Only lock if we have not previously accessed this device on this
// thread
auto lock = get_lock();
initialized_devices.push_back(device_id);
// If we are building components, do not allow any further changes to
// resource parameters.
params_finalized_ = true;
if (resources_.empty()) {
// We will potentially need as many device_resources objects as there are combinations of
// streams and pools on a given device.
resources_.resize(std::max(params_.stream_count.value_or(1), std::size_t{1}) *
std::max(params_.pool_count, std::size_t{1}));
}
auto res_idx = get_thread_id() % resources_.size();
// Check to see if we have constructed device_resources for the
// requested device at the index assigned to this thread
res_iter = std::find_if(std::begin(resources_[res_idx]),
std::end(resources_[res_idx]),
[device_id](auto&& res) { return res.get_device() == device_id; });
if (res_iter == std::end(resources_[res_idx])) {
// Even if we have not yet built device_resources for the current
// device, we may have already built the underlying components, since
// multiple device_resources may point to the same components.
auto component_iter = std::find_if(
std::begin(per_device_components_),
std::end(per_device_components_),
[device_id](auto&& components) { return components.get_device_id() == device_id; });
if (component_iter == std::end(per_device_components_)) {
// Build components for this device if we have not yet done so on
// another thread
per_device_components_.emplace_back(device_id, params_);
component_iter = std::prev(std::end(per_device_components_));
}
auto scoped_device = device_setter(device_id);
// Build the device_resources object for this thread out of shared
// components
resources_[res_idx].emplace_back(component_iter->get_stream(),
component_iter->get_pool(),
component_iter->get_workspace_memory_resource(),
component_iter->get_workspace_allocation_limit());
res_iter = std::prev(std::end(resources_[res_idx]));
}
} else {
auto res_idx = get_thread_id() % resources_.size();
// If we have previously accessed this device on this thread, we do not
// need to lock. We know that this thread already initialized the
// resources it requires for this device if no other thread had already done so, so we simply
// retrieve the previously-generated resources.
res_iter = std::find_if(std::begin(resources_[res_idx]),
std::end(resources_[res_idx]),
[device_id](auto&& res) { return res.get_device() == device_id; });
}
return *res_iter;
}
// Thread-safe setter for the number of streams
void set_streams_per_device_(std::optional<std::size_t> num_streams)
{
auto lock = get_lock();
if (params_finalized_) {
RAFT_LOG_WARN(
"Attempted to set device_resources_manager properties after resources have already been "
"retrieved");
} else {
params_.stream_count = num_streams;
}
}
// Thread-safe setter for the number and size of stream pools
void set_stream_pools_per_device_(std::size_t num_pools, std::size_t num_streams)
{
auto lock = get_lock();
if (params_finalized_) {
RAFT_LOG_WARN(
"Attempted to set device_resources_manager properties after resources have already been "
"retrieved");
} else {
params_.pool_count = num_pools;
params_.pool_size = num_streams;
}
}
// Thread-safe setter for the RAFT workspace allocation limit
void set_workspace_allocation_limit_(std::size_t memory_limit)
{
auto lock = get_lock();
if (params_finalized_) {
RAFT_LOG_WARN(
"Attempted to set device_resources_manager properties after resources have already been "
"retrieved");
} else {
params_.workspace_allocation_limit.emplace(memory_limit);
}
}
// Thread-safe setter for the maximum memory pool size
void set_max_mem_pool_size_(std::optional<std::size_t> memory_limit)
{
auto lock = get_lock();
if (params_finalized_) {
RAFT_LOG_WARN(
"Attempted to set device_resources_manager properties after resources have already been "
"retrieved");
} else {
if (memory_limit) {
params_.max_mem_pool_size.emplace(*memory_limit);
} else {
params_.max_mem_pool_size = thrust::nullopt;
}
}
}
// Thread-safe setter for the initial memory pool size
void set_init_mem_pool_size_(std::optional<std::size_t> init_memory)
{
auto lock = get_lock();
if (params_finalized_) {
RAFT_LOG_WARN(
"Attempted to set device_resources_manager properties after resources have already been "
"retrieved");
} else {
if (init_memory) {
params_.init_mem_pool_size.emplace(*init_memory);
} else {
params_.init_mem_pool_size = thrust::nullopt;
}
}
}
// Thread-safe setter for workspace memory resources
void set_workspace_memory_resource_(std::shared_ptr<rmm::mr::device_memory_resource> mr,
int device_id)
{
auto lock = get_lock();
if (params_finalized_) {
RAFT_LOG_WARN(
"Attempted to set device_resources_manager properties after resources have already been "
"retrieved");
} else {
auto iter = std::find_if(std::begin(params_.workspace_mrs),
std::end(params_.workspace_mrs),
[device_id](auto&& pair) { return pair.second == device_id; });
if (iter != std::end(params_.workspace_mrs)) {
iter->first = mr;
} else {
params_.workspace_mrs.emplace_back(mr, device_id);
}
}
}
// Retrieve the instance of this singleton
static auto& get_manager()
{
static auto manager = device_resources_manager{};
return manager;
}
public:
/**
* @brief Retrieve device_resources to be used with the RAFT API
*
* This thread-safe method ensures that a `device_resources` object with
* the same underlying stream and stream pool is returned every time it is
* called by the same host thread. This means that if `get_device_resources` is
* used to provide all `device_resources` in an application, then
* `raft::get_device_resources().sync_stream()` and (if a stream pool is used)
* raft::get_device_resources().sync_stream_pool() are guaranteed to synchronize all
* work previously submitted to the device by this host thread.
*
* If the max memory pool size set with `set_max_mem_pool_size` is non-zero,
* the first call of this method will also create a memory pool to be used
* for all RMM-based allocations on device.
*
* @param device_id int If provided, the device for which resources should
* be returned. Defaults to active CUDA device.
*/
static auto const& get_device_resources(int device_id = device_setter::get_current_device())
{
return get_manager().get_device_resources_(device_id);
}
/**
* @brief Set the total number of CUDA streams to be used per device
*
* If nullopt, the default stream per thread will be used
* (essentially allowing as many streams as there are host threads).
* Otherwise, all returned `device_resources` will draw their streams from this
* limited pool.
*
* Limiting the total number of streams can be desirable for a number of
* reasons, but it is most often used in consuming applications to
* prevent a large number of host threads from flooding the device with
* simultaneous requests that may exhaust device memory or other
* resources.
*
* If called after the first call to
* `raft::device_resources_manager::get_device_resources`, no change will be made,
* and a warning will be emitted.
*/
static void set_streams_per_device(std::optional<std::size_t> num_streams)
{
get_manager().set_streams_per_device_(num_streams);
}
/**
* @brief Set the total number and size of CUDA stream pools to be used per device
*
* Setting the number of stream pools to a non-zero value will provide a
* pool of stream pools that can be shared among host threads. This can be
* useful for the same reason it is useful to limit the total number of
* primary streams assigned to `device_resoures` for each host thread.
* Repeated calls to `get_device_resources` on a given host thread are
* guaranteed to return `device_resources` with the same underlying stream
* pool.
*
* If called after the first call to
* `raft::device_resources_manager::get_device_resources`, no change will be made,
* and a warning will be emitted.
*/
static void set_stream_pools_per_device(
std::size_t num_pools, std::size_t num_streams = rmm::cuda_stream_pool::default_size)
{
get_manager().set_stream_pools_per_device_(num_pools, num_streams);
}
/**
* @brief Set the maximum size of temporary RAFT workspaces
*
* Note that this limits only the size of temporary workspace
* allocations. To cap the device memory generally available for all device
* allocations made with RMM, use
* `raft::device_manager::set_max_mem_pool_size`
*
* If called after the first call to
* `raft::device_resources_manager::get_device_resources`, no change will be made,
* and a warning will be emitted.
*/
static void set_workspace_allocation_limit(std::size_t memory_limit)
{
get_manager().set_workspace_allocation_limit_(memory_limit);
}
/**
* @brief Set the maximum size of the device memory pool
*
* If set to 0, no memory pool will be used. If set to nullopt, the memory
* pool is allowed to grow to the size of available device memory.
*
* Note that the pool will not actually be created until the first call
* to `raft::device_manager::get_device_resources(device_id)`, after which it will become
* the current RMM device memory resource for the indicated device. If the
* current RMM device memory resource has already been set to some
* non-default resource, no pool resource will be created and a warning will be emitted. It is
* assumed that applications which have set a memory resource already wish to manage RMM
* themselves.
*
* If called after the first call to
* `raft::device_resources_manager::get_device_resources`, no change will be made,
* and a warning will be emitted.
*/
static void set_max_mem_pool_size(std::optional<std::size_t> max_mem)
{
get_manager().set_max_mem_pool_size_(max_mem);
}
/**
* @brief Set the initial size of the device memory pool
*
* If set to nullopt, the memory pool starts with half of the available
* device memory.
*
* If called after the first call to
* `raft::device_resources_manager::get_device_resources`, no change will be made,
* and a warning will be emitted.
*/
static void set_init_mem_pool_size(std::optional<std::size_t> init_mem)
{
get_manager().set_init_mem_pool_size_(init_mem);
}
/**
* @brief Request a device memory pool with specified parameters
*
* This convenience method essentially combines
* `set_init_mem_pool_size` and `set_max_mem_pool_size`. It is provided
* primarily to allow users who want a memory pool but do not want to choose
* specific pool sizes to simply call
* `raft::device_manager::set_memory_pool()` and enable a memory pool using
* RMM defaults (initialize with half of available memory, allow to grow
* to all available memory).
*
* If called after the first call to
* `raft::device_resources_manager::get_device_resources`, no change will be made,
* and a warning will be emitted.
*/
static void set_mem_pool(std::optional<std::size_t> init_mem = std::nullopt,
std::optional<std::size_t> max_mem = std::nullopt)
{
set_init_mem_pool_size(init_mem);
set_max_mem_pool_size(max_mem);
}
/**
* @brief Set the workspace memory resource to be used on a specific device
*
* RAFT device_resources objects can be built with a separate memory
* resource for allocating temporary workspaces. If a (non-nullptr) memory
* resource is provided by this setter, it will be used as the
* workspace memory resource for all `device_resources` returned for the
* indicated device.
*
* If called after the first call to
* `raft::device_resources_manager::get_device_resources`, no change will be made,
* and a warning will be emitted.
*/
static void set_workspace_memory_resource(std::shared_ptr<rmm::mr::device_memory_resource> mr,
int device_id = device_setter::get_current_device())
{
get_manager().set_workspace_memory_resource_(mr, device_id);
}
};
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/nvtx.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <optional>
#include <raft/core/detail/nvtx.hpp>
/**
* \section Usage
*
* To add NVTX ranges to your code, use the `nvtx::range` RAII object. A
* range begins when the object is created, and ends when the object is
* destroyed.
*
* The example below creates nested NVTX ranges. The range `fun_scope` spans
* the whole function, while the range `epoch_scope` spans an iteration
* (and appears 5 times in the timeline).
* \code{.cpp}
* #include <raft/common/nvtx.hpp>
* void some_function(int k){
* // Begins a NVTX range with the message "some_function_{k}"
* // The range ends when some_function() returns
* common::nvtx::range fun_scope( r{"some_function_%d", k};
*
* for(int i = 0; i < 5; i++){
* common::nvtx::range epoch_scope{"epoch-%d", i};
* // some logic inside the loop
* }
* }
* \endcode
*
* \section Domains
*
* All NVTX ranges are assigned to domains. A domain defines a named timeline in
* the Nsight Systems view. By default, we put all ranges into a domain `domain::app`
* named "application". This is controlled by the template parameter `Domain`.
*
* The example below defines a domain and uses it in a function.
* \code{.cpp}
* #include <raft/common/nvtx.hpp>
*
* struct my_app_domain {
* static constexpr char const* name{"my application"};
* }
*
* void some_function(int k){
* // This NVTX range appears in the timeline named "my application" in Nsight Systems.
* common::nvtx::range<my_app_domain> fun_scope( r{"some_function_%d", k};
* // some logic inside the loop
* }
* \endcode
*/
namespace raft::common::nvtx {
namespace domain {
/** @brief The default NVTX domain. */
struct app {
static constexpr char const* name{"application"};
};
/** @brief This NVTX domain is supposed to be used within raft. */
struct raft {
static constexpr char const* name{"raft"};
};
} // namespace domain
/**
* @brief Push a named NVTX range.
*
* @tparam Domain optional struct that defines the NVTX domain message;
* You can create a new domain with a custom message as follows:
* \code{.cpp}
* struct custom_domain { static constexpr char const* name{"custom message"}; }
* \endcode
* NB: make sure to use the same domain for `push_range` and `pop_range`.
* @param format range name format (accepts printf-style arguments)
* @param args the arguments for the printf-style formatting
*/
template <typename Domain = domain::app, typename... Args>
inline void push_range(const char* format, Args... args)
{
detail::push_range<Domain, Args...>(format, args...);
}
/**
* @brief Pop the latest range.
*
* @tparam Domain optional struct that defines the NVTX domain message;
* You can create a new domain with a custom message as follows:
* \code{.cpp}
* struct custom_domain { static constexpr char const* name{"custom message"}; }
* \endcode
* NB: make sure to use the same domain for `push_range` and `pop_range`.
*/
template <typename Domain = domain::app>
inline void pop_range()
{
detail::pop_range<Domain>();
}
/**
* @brief Push a named NVTX range that would be popped at the end of the object lifetime.
*
* Refer to \ref Usage for the usage examples.
*
* @tparam Domain optional struct that defines the NVTX domain message;
* You can create a new domain with a custom message as follows:
* \code{.cpp}
* struct custom_domain { static constexpr char const* name{"custom message"}; }
* \endcode
*/
template <typename Domain = domain::app>
class range {
public:
/**
* Push a named NVTX range.
* At the end of the object lifetime, pop the range back.
*
* @param format range name format (accepts printf-style arguments)
* @param args the arguments for the printf-style formatting
*/
template <typename... Args>
explicit range(const char* format, Args... args)
{
push_range<Domain, Args...>(format, args...);
}
~range() { pop_range<Domain>(); }
/* This object is not meant to be touched. */
range(const range&) = delete;
range(range&&) = delete;
auto operator=(const range&) -> range& = delete;
auto operator=(range&&) -> range& = delete;
static auto operator new(std::size_t) -> void* = delete;
static auto operator new[](std::size_t) -> void* = delete;
};
} // namespace raft::common::nvtx
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/host_container_policy.hpp | /*
* Copyright (2019) Sandia Corporation
*
* The source code is licensed under the 3-clause BSD license found in the LICENSE file
* thirdparty/LICENSES/mdarray.license
*/
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resources.hpp>
#include <vector>
namespace raft {
/**
* @brief A container policy for host mdarray.
*/
template <typename ElementType, typename Allocator = std::allocator<ElementType>>
class host_vector_policy {
public:
using element_type = ElementType;
using container_type = std::vector<element_type, Allocator>;
using allocator_type = typename container_type::allocator_type;
using pointer = typename container_type::pointer;
using const_pointer = typename container_type::const_pointer;
using reference = element_type&;
using const_reference = element_type const&;
using accessor_policy = std::experimental::default_accessor<element_type>;
using const_accessor_policy = std::experimental::default_accessor<element_type const>;
public:
auto create(raft::resources const&, size_t n) -> container_type { return container_type(n); }
constexpr host_vector_policy() noexcept(std::is_nothrow_default_constructible_v<ElementType>) =
default;
[[nodiscard]] constexpr auto access(container_type& c, size_t n) const noexcept -> reference
{
return c[n];
}
[[nodiscard]] constexpr auto access(container_type const& c, size_t n) const noexcept
-> const_reference
{
return c[n];
}
[[nodiscard]] auto make_accessor_policy() noexcept { return accessor_policy{}; }
[[nodiscard]] auto make_accessor_policy() const noexcept { return const_accessor_policy{}; }
};
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/math.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <type_traits>
#include <raft/core/detail/macros.hpp>
#if defined(_RAFT_HAS_CUDA)
#include <cuda_bf16.h>
#include <cuda_fp16.h>
#endif
namespace raft {
/**
* @defgroup math_functions Mathematical Functions
* @{
*/
template <typename T>
RAFT_INLINE_FUNCTION auto abs(T x)
-> std::enable_if_t<std::is_same_v<float, T> || std::is_same_v<double, T> ||
std::is_same_v<int, T> || std::is_same_v<long int, T> ||
std::is_same_v<long long int, T>,
T>
{
#ifdef __CUDA_ARCH__
return ::abs(x);
#else
return std::abs(x);
#endif
}
template <typename T>
constexpr RAFT_INLINE_FUNCTION auto abs(T x)
-> std::enable_if_t<!std::is_same_v<float, T> && !std::is_same_v<double, T> &&
!std::is_same_v<int, T> && !std::is_same_v<long int, T> &&
!std::is_same_v<long long int, T>,
T>
{
return x < T{0} ? -x : x;
}
/** Inverse cosine */
template <typename T>
RAFT_INLINE_FUNCTION auto acos(T x)
{
#ifdef __CUDA_ARCH__
return ::acos(x);
#else
return std::acos(x);
#endif
}
/** Inverse sine */
template <typename T>
RAFT_INLINE_FUNCTION auto asin(T x)
{
#ifdef __CUDA_ARCH__
return ::asin(x);
#else
return std::asin(x);
#endif
}
/** Inverse hyperbolic tangent */
template <typename T>
RAFT_INLINE_FUNCTION auto atanh(T x)
{
#ifdef __CUDA_ARCH__
return ::atanh(x);
#else
return std::atanh(x);
#endif
}
/** Cosine */
template <typename T,
std::enable_if_t<CUDA_CONDITION_ELSE_TRUE(((!std::is_same_v<T, __half> &&
(!std::is_same_v<T, nv_bfloat16>)))),
int> = 0>
RAFT_INLINE_FUNCTION auto cos(T x)
{
#ifdef __CUDA_ARCH__
return ::cos(x);
#else
return std::cos(x);
#endif
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, __half>, __half> cos(T x)
{
#if (__CUDA_ARCH__ >= 530)
return ::hcos(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
cos(T x)
{
#if (__CUDA_ARCH__ >= 800)
return ::hcos(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/** Sine */
template <typename T,
std::enable_if_t<CUDA_CONDITION_ELSE_TRUE(((!std::is_same_v<T, __half> &&
(!std::is_same_v<T, nv_bfloat16>)))),
int> = 0>
RAFT_INLINE_FUNCTION auto sin(T x)
{
#ifdef __CUDA_ARCH__
return ::sin(x);
#else
return std::sin(x);
#endif
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, __half>, __half> sin(T x)
{
#if (__CUDA_ARCH__ >= 530)
return ::hsin(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
sin(T x)
{
#if (__CUDA_ARCH__ >= 800)
return ::hsin(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/** Sine and cosine */
template <typename T>
RAFT_INLINE_FUNCTION std::enable_if_t<std::is_same_v<float, T> || std::is_same_v<double, T>> sincos(
const T& x, T* s, T* c)
{
#ifdef __CUDA_ARCH__
::sincos(x, s, c);
#else
*s = std::sin(x);
*c = std::cos(x);
#endif
}
/** Hyperbolic tangent */
template <typename T>
RAFT_INLINE_FUNCTION auto tanh(T x)
{
#ifdef __CUDA_ARCH__
return ::tanh(x);
#else
return std::tanh(x);
#endif
}
/** Exponential function */
template <typename T,
std::enable_if_t<CUDA_CONDITION_ELSE_TRUE(((!std::is_same_v<T, __half> &&
(!std::is_same_v<T, nv_bfloat16>)))),
int> = 0>
RAFT_INLINE_FUNCTION auto exp(T x)
{
#ifdef __CUDA_ARCH__
return ::exp(x);
#else
return std::exp(x);
#endif
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, __half>, __half> exp(T x)
{
#if (__CUDA_ARCH__ >= 530)
return ::hexp(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
exp(T x)
{
#if (__CUDA_ARCH__ >= 800)
return ::hexp(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/** Natural logarithm */
template <typename T,
std::enable_if_t<CUDA_CONDITION_ELSE_TRUE(((!std::is_same_v<T, __half> &&
(!std::is_same_v<T, nv_bfloat16>)))),
int> = 0>
RAFT_INLINE_FUNCTION auto log(T x)
{
#ifdef __CUDA_ARCH__
return ::log(x);
#else
return std::log(x);
#endif
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, __half>, __half> log(T x)
{
#if (__CUDA_ARCH__ >= 530)
return ::hlog(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
log(T x)
{
#if (__CUDA_ARCH__ >= 800)
return ::hlog(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/**
* @brief The CUDA Math API has overloads for all combinations of float/double. We provide similar
* functionality while wrapping around std::max, which only supports arguments of the same type.
* However, though the CUDA Math API supports combinations of unsigned and signed integers, this is
* very error-prone so we do not support that and require the user to cast instead. (e.g the max of
* -1 and 1u is 4294967295u...)
*
* When no overload matches, we provide a generic implementation but require that both types be the
* same (and that the less-than operator be defined).
* @{
*/
template <
typename T1,
typename T2,
std::enable_if_t<CUDA_CONDITION_ELSE_TRUE(RAFT_DEPAREN(
((!std::is_same_v<T1, __half> && !std::is_same_v<T2, __half>) ||
(!std::is_same_v<T1, nv_bfloat16> && !std::is_same_v<T2, nv_bfloat16>)))),
int> = 0>
RAFT_INLINE_FUNCTION auto max(const T1& x, const T2& y)
{
#ifdef __CUDA_ARCH__
// Combinations of types supported by the CUDA Math API
if constexpr ((std::is_integral_v<T1> && std::is_integral_v<T2> && std::is_same_v<T1, T2>) ||
((std::is_same_v<T1, float> || std::is_same_v<T1, double>)&&(
std::is_same_v<T2, float> || std::is_same_v<T2, double>))) {
return ::max(x, y);
}
// Else, check that the types are the same and provide a generic implementation
else {
static_assert(
std::is_same_v<T1, T2>,
"No native max overload for these types. Both argument types must be the same to use "
"the generic max. Please cast appropriately.");
return (x < y) ? y : x;
}
#else
if constexpr (std::is_same_v<T1, float> && std::is_same_v<T2, double>) {
return std::max(static_cast<double>(x), y);
} else if constexpr (std::is_same_v<T1, double> && std::is_same_v<T2, float>) {
return std::max(x, static_cast<double>(y));
} else {
static_assert(
std::is_same_v<T1, T2>,
"std::max requires that both argument types be the same. Please cast appropriately.");
return std::max(x, y);
}
#endif
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, __half>, __half> max(T x,
T y)
{
#if (__CUDA_ARCH__ >= 530)
return ::__hmax(x, y);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
max(T x, T y)
{
#if (__CUDA_ARCH__ >= 800)
return ::__hmax(x, y);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/** Many-argument overload to avoid verbose nested calls or use with variadic arguments */
template <typename T1, typename T2, typename... Args>
RAFT_INLINE_FUNCTION auto max(const T1& x, const T2& y, Args&&... args)
{
return raft::max(x, raft::max(y, std::forward<Args>(args)...));
}
/** One-argument overload for convenience when using with variadic arguments */
template <typename T>
constexpr RAFT_INLINE_FUNCTION auto max(const T& x)
{
return x;
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, __half>, __half> max(T x)
{
#if (__CUDA_ARCH__ >= 530)
return x;
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
max(T x)
{
#if (__CUDA_ARCH__ >= 800)
return x;
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/**
* @brief Minimum Minimum of two or more values.
*
* The CUDA Math API has overloads for all combinations of float/double. We provide similar
* functionality while wrapping around std::min, which only supports arguments of the same type.
* However, though the CUDA Math API supports combinations of unsigned and signed integers, this is
* very error-prone so we do not support that and require the user to cast instead. (e.g the min of
* -1 and 1u is 1u...)
*
* When no overload matches, we provide a generic implementation but require that both types be the
* same (and that the less-than operator be defined).
* @{
*/
template <
typename T1,
typename T2,
std::enable_if_t<CUDA_CONDITION_ELSE_TRUE(RAFT_DEPAREN(
((!std::is_same_v<T1, __half> && !std::is_same_v<T2, __half>) ||
(!std::is_same_v<T1, nv_bfloat16> && !std::is_same_v<T2, nv_bfloat16>)))),
int> = 0>
RAFT_INLINE_FUNCTION auto min(const T1& x, const T2& y)
{
#ifdef __CUDA_ARCH__
// Combinations of types supported by the CUDA Math API
if constexpr ((std::is_integral_v<T1> && std::is_integral_v<T2> && std::is_same_v<T1, T2>) ||
((std::is_same_v<T1, float> || std::is_same_v<T1, double>)&&(
std::is_same_v<T2, float> || std::is_same_v<T2, double>))) {
return ::min(x, y);
}
// Else, check that the types are the same and provide a generic implementation
else {
static_assert(
std::is_same_v<T1, T2>,
"No native min overload for these types. Both argument types must be the same to use "
"the generic min. Please cast appropriately.");
return (y < x) ? y : x;
}
#else
if constexpr (std::is_same_v<T1, float> && std::is_same_v<T2, double>) {
return std::min(static_cast<double>(x), y);
} else if constexpr (std::is_same_v<T1, double> && std::is_same_v<T2, float>) {
return std::min(x, static_cast<double>(y));
} else {
static_assert(
std::is_same_v<T1, T2>,
"std::min requires that both argument types be the same. Please cast appropriately.");
return std::min(x, y);
}
#endif
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, __half>, __half> min(T x,
T y)
{
#if (__CUDA_ARCH__ >= 530)
return ::__hmin(x, y);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
min(T x, T y)
{
#if (__CUDA_ARCH__ >= 800)
return ::__hmin(x, y);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/** Many-argument overload to avoid verbose nested calls or use with variadic arguments */
template <typename T1, typename T2, typename... Args>
RAFT_INLINE_FUNCTION auto min(const T1& x, const T2& y, Args&&... args)
{
return raft::min(x, raft::min(y, std::forward<Args>(args)...));
}
/** One-argument overload for convenience when using with variadic arguments */
template <typename T>
constexpr RAFT_INLINE_FUNCTION auto min(const T& x)
{
return x;
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, __half> min(
T x)
{
#if (__CUDA_ARCH__ >= 530)
return x;
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
min(T x)
{
#if (__CUDA_ARCH__ >= 800)
return x;
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/** Power */
template <typename T1, typename T2>
RAFT_INLINE_FUNCTION auto pow(T1 x, T2 y)
{
#ifdef __CUDA_ARCH__
return ::pow(x, y);
#else
return std::pow(x, y);
#endif
}
/** Square root */
template <typename T,
std::enable_if_t<CUDA_CONDITION_ELSE_TRUE(((!std::is_same_v<T, __half> &&
(!std::is_same_v<T, nv_bfloat16>)))),
int> = 0>
RAFT_INLINE_FUNCTION auto sqrt(T x)
{
#ifdef __CUDA_ARCH__
return ::sqrt(x);
#else
return std::sqrt(x);
#endif
}
#if defined(_RAFT_HAS_CUDA)
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, __half>, __half> sqrt(T x)
{
#if (__CUDA_ARCH__ >= 530)
return ::hsqrt(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "__half is only supported on __CUDA_ARCH__ >= 530");
return T{};
#endif
}
template <typename T>
RAFT_DEVICE_INLINE_FUNCTION typename std::enable_if_t<std::is_same_v<T, nv_bfloat16>, nv_bfloat16>
sqrt(T x)
{
#if (__CUDA_ARCH__ >= 800)
return ::hsqrt(x);
#else
// Fail during template instantiation if the compute capability doesn't support this operation
static_assert(sizeof(T) != sizeof(T), "nv_bfloat16 is only supported on __CUDA_ARCH__ >= 800");
return T{};
#endif
}
#endif
/** Sign */
template <typename T>
RAFT_INLINE_FUNCTION auto sgn(T val) -> int
{
return (T(0) < val) - (val < T(0));
}
/** @} */
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/host_mdspan.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <raft/core/mdspan.hpp>
#include <raft/core/memory_type.hpp>
#include <raft/core/host_device_accessor.hpp>
namespace raft {
template <typename AccessorPolicy>
using host_accessor = host_device_accessor<AccessorPolicy, memory_type::host>;
/**
* @brief std::experimental::mdspan with host tag to avoid accessing incorrect memory location.
*/
template <typename ElementType,
typename Extents,
typename LayoutPolicy = layout_c_contiguous,
typename AccessorPolicy = std::experimental::default_accessor<ElementType>>
using host_mdspan = mdspan<ElementType, Extents, LayoutPolicy, host_accessor<AccessorPolicy>>;
template <typename T, bool B>
struct is_host_mdspan : std::false_type {};
template <typename T>
struct is_host_mdspan<T, true> : std::bool_constant<T::accessor_type::is_host_accessible> {};
/**
* @\brief Boolean to determine if template type T is either raft::host_mdspan or a derived type
*/
template <typename T>
using is_host_mdspan_t = is_host_mdspan<T, is_mdspan_v<T>>;
template <typename T>
using is_input_host_mdspan_t = is_host_mdspan<T, is_input_mdspan_v<T>>;
template <typename T>
using is_output_host_mdspan_t = is_host_mdspan<T, is_output_mdspan_v<T>>;
/**
* @\brief Boolean to determine if variadic template types Tn are either raft::host_mdspan or a
* derived type
*/
template <typename... Tn>
inline constexpr bool is_host_mdspan_v = std::conjunction_v<is_host_mdspan_t<Tn>...>;
template <typename... Tn>
inline constexpr bool is_input_host_mdspan_v = std::conjunction_v<is_input_host_mdspan_t<Tn>...>;
template <typename... Tn>
inline constexpr bool is_output_host_mdspan_v = std::conjunction_v<is_output_host_mdspan_t<Tn>...>;
template <typename... Tn>
using enable_if_host_mdspan = std::enable_if_t<is_input_mdspan_v<Tn...>>;
template <typename... Tn>
using enable_if_input_host_mdspan = std::enable_if_t<is_input_host_mdspan_v<Tn...>>;
template <typename... Tn>
using enable_if_output_host_mdspan = std::enable_if_t<is_output_host_mdspan_v<Tn...>>;
/**
* @brief Shorthand for 0-dim host mdspan (scalar).
* @tparam ElementType the data type of the scalar element
* @tparam IndexType the index type of the extents
*/
template <typename ElementType, typename IndexType = std::uint32_t>
using host_scalar_view = host_mdspan<ElementType, scalar_extent<IndexType>>;
/**
* @brief Shorthand for 1-dim host mdspan.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
using host_vector_view = host_mdspan<ElementType, vector_extent<IndexType>, LayoutPolicy>;
/**
* @brief Shorthand for c-contiguous host matrix view.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
using host_matrix_view = host_mdspan<ElementType, matrix_extent<IndexType>, LayoutPolicy>;
/**
* @brief Shorthand for 128 byte aligned host matrix view.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy must be of type layout_{left/right}_padded
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_right_padded<ElementType>,
typename = enable_if_layout_padded<ElementType, LayoutPolicy>>
using host_aligned_matrix_view =
host_mdspan<ElementType,
matrix_extent<IndexType>,
LayoutPolicy,
std::experimental::aligned_accessor<ElementType, detail::alignment::value>>;
/**
* @brief Create a 2-dim 128 byte aligned mdspan instance for host pointer. It's
* expected that the given layout policy match the layout of the underlying
* pointer.
* @tparam ElementType the data type of the matrix elements
* @tparam LayoutPolicy must be of type layout_{left/right}_padded
* @tparam IndexType the index type of the extents
* @param[in] ptr on host to wrap
* @param[in] n_rows number of rows in pointer
* @param[in] n_cols number of columns in pointer
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_right_padded<ElementType>>
auto make_host_aligned_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
{
using data_handle_type =
typename std::experimental::aligned_accessor<ElementType,
detail::alignment::value>::data_handle_type;
static_assert(std::is_same<LayoutPolicy, layout_left_padded<ElementType>>::value ||
std::is_same<LayoutPolicy, layout_right_padded<ElementType>>::value);
assert(reinterpret_cast<std::uintptr_t>(ptr) ==
std::experimental::details::alignTo(reinterpret_cast<std::uintptr_t>(ptr),
detail::alignment::value));
data_handle_type aligned_pointer = ptr;
matrix_extent<IndexType> extents{n_rows, n_cols};
return host_aligned_matrix_view<ElementType, IndexType, LayoutPolicy>{aligned_pointer, extents};
}
/**
* @brief Create a 0-dim (scalar) mdspan instance for host value.
*
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @param[in] ptr on device to wrap
*/
template <typename ElementType, typename IndexType = std::uint32_t>
auto make_host_scalar_view(ElementType* ptr)
{
scalar_extent<IndexType> extents;
return host_scalar_view<ElementType, IndexType>{ptr, extents};
}
/**
* @brief Create a 2-dim c-contiguous mdspan instance for host pointer. It's
* expected that the given layout policy match the layout of the underlying
* pointer.
* @tparam ElementType the data type of the matrix elements
* @tparam IndexType the index type of the extents
* @tparam LayoutPolicy policy for strides and layout ordering
* @param[in] ptr on host to wrap
* @param[in] n_rows number of rows in pointer
* @param[in] n_cols number of columns in pointer
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_host_matrix_view(ElementType* ptr, IndexType n_rows, IndexType n_cols)
{
matrix_extent<IndexType> extents{n_rows, n_cols};
return host_matrix_view<ElementType, IndexType, LayoutPolicy>{ptr, extents};
}
/**
* @brief Create a 1-dim mdspan instance for host pointer.
* @tparam ElementType the data type of the vector elements
* @tparam IndexType the index type of the extents
* @param[in] ptr on host to wrap
* @param[in] n number of elements in pointer
* @return raft::host_vector_view
*/
template <typename ElementType,
typename IndexType = std::uint32_t,
typename LayoutPolicy = layout_c_contiguous>
auto make_host_vector_view(ElementType* ptr, IndexType n)
{
return host_vector_view<ElementType, IndexType, LayoutPolicy>{ptr, n};
}
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/core/device_csr_matrix.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/csr_matrix.hpp>
#include <raft/core/device_container_policy.hpp>
#include <raft/core/device_span.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/core/sparse_types.hpp>
#include <type_traits>
namespace raft {
/**
* \defgroup device_csr_matrix Device CSR Matrix Types
* @{
*/
/**
* Specialization for a sparsity-preserving compressed structure view which uses device memory
*/
template <typename IndptrType, typename IndicesType, typename NZType>
using device_compressed_structure_view =
compressed_structure_view<IndptrType, IndicesType, NZType, true>;
/**
* Specialization for a sparsity-owning compressed structure which uses device memory
*/
template <typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T> typename ContainerPolicy = device_uvector_policy>
using device_compressed_structure =
compressed_structure<IndptrType, IndicesType, NZType, true, ContainerPolicy>;
/**
* Specialization for a csr matrix view which uses device memory
*/
template <typename ElementType, typename IndptrType, typename IndicesType, typename NZType>
using device_csr_matrix_view = csr_matrix_view<ElementType, IndptrType, IndicesType, NZType, true>;
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T> typename ContainerPolicy = device_uvector_policy,
SparsityType sparsity_type = SparsityType::OWNING>
using device_csr_matrix =
csr_matrix<ElementType, IndptrType, IndicesType, NZType, true, ContainerPolicy, sparsity_type>;
/**
* Specialization for a sparsity-owning csr matrix which uses device memory
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T> typename ContainerPolicy = device_uvector_policy>
using device_sparsity_owning_csr_matrix =
csr_matrix<ElementType, IndptrType, IndicesType, NZType, true, ContainerPolicy>;
/**
* Specialization for a sparsity-preserving csr matrix which uses device memory
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T> typename ContainerPolicy = device_uvector_policy>
using device_sparsity_preserving_csr_matrix = csr_matrix<ElementType,
IndptrType,
IndicesType,
NZType,
true,
ContainerPolicy,
SparsityType::PRESERVING>;
template <typename T>
struct is_device_csr_matrix_view : std::false_type {};
template <typename ElementType, typename IndptrType, typename IndicesType, typename NZType>
struct is_device_csr_matrix_view<
device_csr_matrix_view<ElementType, IndptrType, IndicesType, NZType>> : std::true_type {};
template <typename T>
constexpr bool is_device_csr_matrix_view_v = is_device_csr_matrix_view<T>::value;
template <typename T>
struct is_device_csr_matrix : std::false_type {};
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType,
template <typename T>
typename ContainerPolicy,
SparsityType sparsity_type>
struct is_device_csr_matrix<
device_csr_matrix<ElementType, IndptrType, IndicesType, NZType, ContainerPolicy, sparsity_type>>
: std::true_type {};
template <typename T>
constexpr bool is_device_csr_matrix_v = is_device_csr_matrix<T>::value;
template <typename T>
constexpr bool is_device_csr_sparsity_owning_v =
is_device_csr_matrix<T>::value and T::get_sparsity_type() == OWNING;
template <typename T>
constexpr bool is_device_csr_sparsity_preserving_v =
is_device_csr_matrix<T>::value and T::get_sparsity_type() == PRESERVING;
/**
* Create a sparsity-owning sparse matrix in the compressed-sparse row format. sparsity-owning
* means that all of the underlying vectors (data, indptr, indices) are owned by the csr_matrix
* instance. If not known up front, the sparsity can be ignored in this factory function and
* `resize()` invoked on the instance once the sparsity is known.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
*
* raft::resources handle;
* csr_matrix = raft::make_device_csr_matrix(handle, n_rows, n_cols);
* ...
* // compute expected sparsity
* ...
* int nnz = 5000;
* csr_matrix.initialize_sparsity(nnz);
* @endcode
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] handle a raft handle for managing expensive device resources
* @param[in] n_rows total number of rows in the matrix
* @param[in] n_cols total number of columns in the matrix
* @param[in] nnz number of non-zeros in the matrix if known [optional]
* @return a sparsity-owning sparse matrix in compressed (csr) format
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType = uint64_t>
auto make_device_csr_matrix(raft::resources const& handle,
IndptrType n_rows,
IndicesType n_cols,
NZType nnz = 0)
{
return device_sparsity_owning_csr_matrix<ElementType, IndptrType, IndicesType, NZType>(
handle, n_rows, n_cols, nnz);
}
/**
* Create a sparsity-preserving sparse matrix in the compressed-sparse row format.
* sparsity-preserving means that a view of the csr sparsity is supplied, allowing the values in
* the sparsity to change but not the sparsity itself. The csr_matrix instance does not own the
* sparsity, the sparsity must be known up front, and cannot be resized later.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
*
* raft::resources handle;
* coo_structure = raft::make_device_compressed_structure(handle, n_rows, n_cols);
* ...
* // compute expected sparsity
* ...
* csr_structure.initialize_sparsity(nnz);
* csr_matrix = raft::make_device_csr_matrix(handle, csr_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] handle raft handle for managing expensive device resources
* @param[in] structure a sparsity-preserving compressed structural view
* @return a sparsity-preserving sparse matrix in compressed (csr) format
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType = uint64_t>
auto make_device_csr_matrix(
raft::resources const& handle,
device_compressed_structure_view<IndptrType, IndicesType, NZType> structure)
{
return device_sparsity_preserving_csr_matrix<ElementType, IndptrType, IndicesType, NZType>(
handle, structure);
}
/**
* Create a non-owning sparse matrix view in the coordinate format. This is sparsity-preserving,
* meaning that the underlying sparsity is known and cannot be changed. Use the sparsity-owning
* coo_matrix if sparsity needs to be mutable.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following pointer is assumed to reference device memory for a size of nnz
* float* d_elm_ptr = ...;
*
* raft::resources handle;
* csr_structure = raft::make_device_compressed_structure(handle, n_rows, n_cols, nnz);
* csr_matrix_view = raft::make_device_csr_matrix_view(handle, d_elm_ptr, csr_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] ptr a pointer to array of nonzero matrix elements on device (size nnz)
* @param[in] structure a sparsity-preserving compressed sparse structural view
* @return a sparsity-preserving csr matrix view
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType = uint64_t>
auto make_device_csr_matrix_view(
ElementType* ptr, device_compressed_structure_view<IndptrType, IndicesType, NZType> structure)
{
return device_csr_matrix_view<ElementType, IndptrType, IndicesType, NZType>(
raft::device_span<ElementType>(ptr, structure.get_nnz()), structure);
}
/**
* Create a non-owning sparse matrix view in the compressed-sparse row format. This is
* sparsity-preserving, meaning that the underlying sparsity is known and cannot be changed. Use the
* sparsity-owning coo_matrix if sparsity needs to be mutable.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_span.hpp>
* #include <raft/core/device_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following span is assumed to be of size nnz
* raft::device_span<float> d_elm_ptr;
*
* raft::resources handle;
* csr_structure = raft::make_device_compressed_structure(handle, n_rows, n_cols, nnz);
* csr_matrix_view = raft::make_device_csr_matrix_view(handle, d_elm_ptr, csr_structure.view());
* @endcode
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] elements device span containing array of matrix elements (size nnz)
* @param[in] structure a sparsity-preserving structural view
* @return a sparsity-preserving csr matrix view
*/
template <typename ElementType,
typename IndptrType,
typename IndicesType,
typename NZType = uint64_t>
auto make_device_csr_matrix_view(
raft::device_span<ElementType> elements,
device_compressed_structure_view<IndptrType, IndicesType, NZType> structure)
{
RAFT_EXPECTS(elements.size() == structure.get_nnz(),
"Size of elements must be equal to the nnz from the structure");
return device_csr_matrix_view<ElementType, IndptrType, IndicesType, NZType>(elements, structure);
}
/**
* Create a sparsity-owning compressed structure. This is not sparsity-preserving, meaning that
* the underlying sparsity does not need to be known upon construction. When not known up front,
* the allocation of the underlying indices array is delayed until `resize(nnz)` is invoked.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* raft::resources handle;
* csr_structure = raft::make_device_compressed_structure(handle, n_rows, n_cols, nnz);
* ...
* // compute expected sparsity
* ...
* csr_structure.initialize_sparsity(nnz);
* @endcode
*
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] handle raft handle for managing expensive device resources
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of cols
* @param[in] nnz total number of nonzeros, if known
* @return a sparsity-owning compressed structure instance
*/
template <typename IndptrType, typename IndicesType, typename NZType = uint64_t>
auto make_device_compressed_structure(raft::resources const& handle,
IndptrType n_rows,
IndicesType n_cols,
NZType nnz = 0)
{
return device_compressed_structure<IndptrType, IndicesType, NZType>(handle, n_rows, n_cols, nnz);
}
/**
* Create a non-owning sparsity-preserved compressed structure view. Sparsity-preserving means that
* the underlying sparsity is known and cannot be changed. Use the sparsity-owning version if the
* sparsity is not known up front.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following pointer is assumed to reference device memory of size n_rows+1
* int *indptr = ...;
*
* // The following pointer is assumed to reference device memory of size nnz
* int *indices = ...;
*
* raft::resources handle;
* csr_structure = raft::make_device_compressed_structure_view(handle, indptr, indices, n_rows,
* n_cols, nnz);
* @endcode *
*
* @tparam ElementType
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] indptr structural indptr (size n_rows+1)
* @param[in] indices structural indices (size nnz)
* @param[in] n_rows total number of rows
* @param[in] n_cols total number of columns
* @param[in] nnz number of non-zeros
* @return a sparsity-preserving compressed structural view
*/
template <typename IndptrType, typename IndicesType, typename NZType = uint64_t>
auto make_device_compressed_structure_view(
IndptrType* indptr, IndicesType* indices, IndptrType n_rows, IndicesType n_cols, NZType nnz)
{
return device_compressed_structure_view<IndptrType, IndicesType, NZType>(
raft::device_span<IndptrType>(indptr, n_rows + 1),
raft::device_span<IndicesType>(indices, nnz),
n_cols);
}
/**
* Create a non-owning sparsity-preserved compressed structure view. Sparsity-preserving means that
* the underlying sparsity is known and cannot be changed. Use the sparsity-owning version if the
* sparsity is not known up front.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/core/device_csr_matrix.hpp>
*
* int n_rows = 100000;
* int n_cols = 10000;
* int nnz = 5000;
*
* // The following device spans is assumed to be of size n_rows+1
* raft::device_span<int> indptr;
*
* // The following device span is assumed to be of size nnz
* raft::device_span<int> indices;
*
* raft::resources handle;
* csr_structure = raft::make_device_compressed_structure_view(handle, indptr, indices, n_rows,
* n_cols);
* @endcode
*
* @tparam IndptrType
* @tparam IndicesType
* @tparam NZType
* @param[in] indptr structural indptr (size n_rows+1)
* @param[in] indices structural indices (size nnz)
* @param[in] n_cols total number of columns
* @return a sparsity-preserving compressed structural view
*
*/
template <typename IndptrType, typename IndicesType, typename NZType = uint64_t>
auto make_device_compressed_structure_view(raft::device_span<IndptrType> indptr,
raft::device_span<IndicesType> indices,
IndicesType n_cols)
{
return device_compressed_structure_view<IndptrType, IndicesType, NZType>(indptr, indices, n_cols);
}
/** @} */
}; // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/device_properties.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
#include <raft/core/resource/device_id.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft::resource {
class device_properties_resource : public resource {
public:
device_properties_resource(int dev_id)
{
RAFT_CUDA_TRY_NO_THROW(cudaGetDeviceProperties(&prop_, dev_id));
}
void* get_resource() override { return &prop_; }
~device_properties_resource() override {}
private:
cudaDeviceProp prop_;
};
/**
* @defgroup resource_device_props Device properties resource functions
* @{
*/
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class device_properties_resource_factory : public resource_factory {
public:
device_properties_resource_factory(int dev_id) : dev_id_(dev_id) {}
resource_type get_resource_type() override { return resource_type::DEVICE_PROPERTIES; }
resource* make_resource() override { return new device_properties_resource(dev_id_); }
private:
int dev_id_;
};
/**
* Load a cudaDeviceProp from a res (and populate it on the res if needed).
* @param res raft res object for managing resources
* @return populated cuda device properties instance
*/
inline cudaDeviceProp& get_device_properties(resources const& res)
{
if (!res.has_resource_factory(resource_type::DEVICE_PROPERTIES)) {
int dev_id = get_device_id(res);
res.add_resource_factory(std::make_shared<device_properties_resource_factory>(dev_id));
}
return *res.get_resource<cudaDeviceProp>(resource_type::DEVICE_PROPERTIES);
};
/**
* @}
*/
} // namespace raft::resource | 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/cusolver_dn_handle.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cuda_stream.hpp"
#include <cusolverDn.h>
#include <raft/core/cusolver_macros.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace raft::resource {
/**
*
*/
class cusolver_dn_resource : public resource {
public:
cusolver_dn_resource(rmm::cuda_stream_view stream)
{
RAFT_CUSOLVER_TRY_NO_THROW(cusolverDnCreate(&cusolver_res));
RAFT_CUSOLVER_TRY_NO_THROW(cusolverDnSetStream(cusolver_res, stream));
}
void* get_resource() override { return &cusolver_res; }
~cusolver_dn_resource() override { RAFT_CUSOLVER_TRY_NO_THROW(cusolverDnDestroy(cusolver_res)); }
private:
cusolverDnHandle_t cusolver_res;
};
/**
* @defgroup resource_cusolver_dn cuSolver DN handle resource functions
* @{
*/
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class cusolver_dn_resource_factory : public resource_factory {
public:
cusolver_dn_resource_factory(rmm::cuda_stream_view stream) : stream_(stream) {}
resource_type get_resource_type() override { return resource_type::CUSOLVER_DN_HANDLE; }
resource* make_resource() override { return new cusolver_dn_resource(stream_); }
private:
rmm::cuda_stream_view stream_;
};
/**
* Load a cusolverSpres_t from raft res if it exists, otherwise
* add it and return it.
* @param[in] res the raft resources object
* @return cusolver dn handle
*/
inline cusolverDnHandle_t get_cusolver_dn_handle(resources const& res)
{
if (!res.has_resource_factory(resource_type::CUSOLVER_DN_HANDLE)) {
cudaStream_t stream = get_cuda_stream(res);
res.add_resource_factory(std::make_shared<cusolver_dn_resource_factory>(stream));
}
return *res.get_resource<cusolverDnHandle_t>(resource_type::CUSOLVER_DN_HANDLE);
};
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/stream_view.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resources.hpp>
#include <raft/core/stream_view.hpp>
#ifndef RAFT_DISABLE_CUDA
#include <raft/core/resource/cuda_stream.hpp>
#endif
namespace raft::resource {
struct stream_view_resource : public resource {
stream_view_resource(raft::stream_view view = raft::stream_view_per_thread) : stream(view) {}
void* get_resource() override { return &stream; }
~stream_view_resource() override {}
private:
raft::stream_view stream;
};
/**
* Factory that knows how to construct a specific raft::resource to populate
* the resources instance.
*/
struct stream_view_resource_factory : public resource_factory {
public:
stream_view_resource_factory(raft::stream_view view = raft::stream_view_per_thread) : stream(view)
{
}
resource_type get_resource_type() override { return resource_type::STREAM_VIEW; }
resource* make_resource() override { return new stream_view_resource(stream); }
private:
raft::stream_view stream;
};
/**
* @defgroup resource_stream_view stream resource functions compatible with
* non-CUDA builds
* @{
*/
/**
* Load a raft::stream_view from a resources instance (and populate it on the res
* if needed).
* @param res raft res object for managing resources
* @return
*/
inline raft::stream_view get_stream_view(resources const& res)
{
if (!res.has_resource_factory(resource_type::STREAM_VIEW)) {
res.add_resource_factory(std::make_shared<stream_view_resource_factory>());
}
return *res.get_resource<raft::stream_view>(resource_type::STREAM_VIEW);
};
/**
* Load a raft::stream__view from a resources instance (and populate it on the res
* if needed).
* @param[in] res raft resources object for managing resources
* @param[in] view raft stream view
*/
inline void set_stream_view(resources const& res, raft::stream_view view)
{
res.add_resource_factory(std::make_shared<stream_view_resource_factory>(view));
};
/**
* @brief synchronize a specific stream
*
* @param[in] res the raft resources object
* @param[in] stream stream to synchronize
*/
inline void sync_stream_view(const resources& res, raft::stream_view stream)
{
stream.interruptible_synchronize();
}
/**
* @brief synchronize main stream on the resources instance
*/
inline void sync_stream_view(const resources& res) { sync_stream_view(res, get_stream_view(res)); }
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/cuda_stream_pool.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
#include <raft/core/resource/cuda_event.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/detail/stream_sync_event.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <rmm/cuda_stream_pool.hpp>
namespace raft::resource {
class cuda_stream_pool_resource : public resource {
public:
cuda_stream_pool_resource(std::shared_ptr<rmm::cuda_stream_pool> stream_pool)
: stream_pool_(stream_pool)
{
}
~cuda_stream_pool_resource() override {}
void* get_resource() override { return &stream_pool_; }
private:
std::shared_ptr<rmm::cuda_stream_pool> stream_pool_{nullptr};
};
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class cuda_stream_pool_resource_factory : public resource_factory {
public:
cuda_stream_pool_resource_factory(std::shared_ptr<rmm::cuda_stream_pool> stream_pool = {nullptr})
: stream_pool_(stream_pool)
{
}
resource_type get_resource_type() override { return resource_type::CUDA_STREAM_POOL; }
resource* make_resource() override { return new cuda_stream_pool_resource(stream_pool_); }
private:
std::shared_ptr<rmm::cuda_stream_pool> stream_pool_{nullptr};
};
inline bool is_stream_pool_initialized(const resources& res)
{
return *res.get_resource<std::shared_ptr<rmm::cuda_stream_pool>>(
resource_type::CUDA_STREAM_POOL) != nullptr;
}
/**
* @defgroup resource_stream_pool CUDA Stream pool resource functions
* @{
*/
/**
* Load a cuda_stream_pool, and create a new one if it doesn't already exist
* @param res raft res object for managing resources
* @return
*/
inline const rmm::cuda_stream_pool& get_cuda_stream_pool(const resources& res)
{
if (!res.has_resource_factory(resource_type::CUDA_STREAM_POOL)) {
res.add_resource_factory(std::make_shared<cuda_stream_pool_resource_factory>());
}
return *(
*res.get_resource<std::shared_ptr<rmm::cuda_stream_pool>>(resource_type::CUDA_STREAM_POOL));
};
/**
* Explicitly set a stream pool on the current res. Note that this will overwrite
* an existing stream pool on the res.
* @param res
* @param stream_pool
*/
inline void set_cuda_stream_pool(const resources& res,
std::shared_ptr<rmm::cuda_stream_pool> stream_pool)
{
res.add_resource_factory(std::make_shared<cuda_stream_pool_resource_factory>(stream_pool));
};
inline std::size_t get_stream_pool_size(const resources& res)
{
return is_stream_pool_initialized(res) ? get_cuda_stream_pool(res).get_pool_size() : 0;
}
/**
* @brief return stream from pool
*/
inline rmm::cuda_stream_view get_stream_from_stream_pool(const resources& res)
{
RAFT_EXPECTS(is_stream_pool_initialized(res), "ERROR: rmm::cuda_stream_pool was not initialized");
return get_cuda_stream_pool(res).get_stream();
}
/**
* @brief return stream from pool at index
*/
inline rmm::cuda_stream_view get_stream_from_stream_pool(const resources& res,
std::size_t stream_idx)
{
RAFT_EXPECTS(is_stream_pool_initialized(res), "ERROR: rmm::cuda_stream_pool was not initialized");
return get_cuda_stream_pool(res).get_stream(stream_idx);
}
/**
* @brief return stream from pool if size > 0, else main stream on res
*/
inline rmm::cuda_stream_view get_next_usable_stream(const resources& res)
{
return is_stream_pool_initialized(res) ? get_stream_from_stream_pool(res) : get_cuda_stream(res);
}
/**
* @brief return stream from pool at index if size > 0, else main stream on res
*
* @param[in] res the raft resources object
* @param[in] stream_idx the required index of the stream in the stream pool if available
*/
inline rmm::cuda_stream_view get_next_usable_stream(const resources& res, std::size_t stream_idx)
{
return is_stream_pool_initialized(res) ? get_stream_from_stream_pool(res, stream_idx)
: get_cuda_stream(res);
}
/**
* @brief synchronize the stream pool on the res
*
* @param[in] res the raft resources object
*/
inline void sync_stream_pool(const resources& res)
{
for (std::size_t i = 0; i < get_stream_pool_size(res); i++) {
sync_stream(res, get_cuda_stream_pool(res).get_stream(i));
}
}
/**
* @brief synchronize subset of stream pool
*
* @param[in] res the raft resources object
* @param[in] stream_indices the indices of the streams in the stream pool to synchronize
*/
inline void sync_stream_pool(const resources& res, const std::vector<std::size_t> stream_indices)
{
RAFT_EXPECTS(is_stream_pool_initialized(res), "ERROR: rmm::cuda_stream_pool was not initialized");
for (const auto& stream_index : stream_indices) {
sync_stream(res, get_cuda_stream_pool(res).get_stream(stream_index));
}
}
/**
* @brief ask stream pool to wait on last event in main stream
*
* @param[in] res the raft resources object
*/
inline void wait_stream_pool_on_stream(const resources& res)
{
if (!res.has_resource_factory(resource_type::CUDA_STREAM_POOL)) {
res.add_resource_factory(std::make_shared<cuda_stream_pool_resource_factory>());
}
cudaEvent_t event = detail::get_cuda_stream_sync_event(res);
RAFT_CUDA_TRY(cudaEventRecord(event, get_cuda_stream(res)));
for (std::size_t i = 0; i < get_stream_pool_size(res); i++) {
RAFT_CUDA_TRY(cudaStreamWaitEvent(get_cuda_stream_pool(res).get_stream(i), event, 0));
}
}
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/thrust_policy.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <rmm/exec_policy.hpp>
namespace raft::resource {
class thrust_policy_resource : public resource {
public:
thrust_policy_resource(rmm::cuda_stream_view stream_view)
: thrust_policy_(std::make_unique<rmm::exec_policy>(stream_view))
{
}
void* get_resource() override { return thrust_policy_.get(); }
~thrust_policy_resource() override {}
private:
std::unique_ptr<rmm::exec_policy> thrust_policy_;
};
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class thrust_policy_resource_factory : public resource_factory {
public:
thrust_policy_resource_factory(rmm::cuda_stream_view stream_view) : stream_view_(stream_view) {}
resource_type get_resource_type() override { return resource_type::THRUST_POLICY; }
resource* make_resource() override { return new thrust_policy_resource(stream_view_); }
private:
rmm::cuda_stream_view stream_view_;
};
/**
* @defgroup resource_thrust_policy Thrust policy resource functions
* @{
*/
/**
* Load a thrust policy from a res (and populate it on the res if needed).
* @param res raft res object for managing resources
* @return thrust execution policy
*/
inline rmm::exec_policy& get_thrust_policy(resources const& res)
{
if (!res.has_resource_factory(resource_type::THRUST_POLICY)) {
rmm::cuda_stream_view stream = get_cuda_stream(res);
res.add_resource_factory(std::make_shared<thrust_policy_resource_factory>(stream));
}
return *res.get_resource<rmm::exec_policy>(resource_type::THRUST_POLICY);
};
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/cuda_stream.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
#include <raft/core/interruptible.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
namespace raft::resource {
class cuda_stream_resource : public resource {
public:
cuda_stream_resource(rmm::cuda_stream_view stream_view = rmm::cuda_stream_per_thread)
: stream(stream_view)
{
}
void* get_resource() override { return &stream; }
~cuda_stream_resource() override {}
private:
rmm::cuda_stream_view stream;
};
/**
* Factory that knows how to construct a specific raft::resource to populate
* the resources instance.
*/
class cuda_stream_resource_factory : public resource_factory {
public:
cuda_stream_resource_factory(rmm::cuda_stream_view stream_view = rmm::cuda_stream_per_thread)
: stream(stream_view)
{
}
resource_type get_resource_type() override { return resource_type::CUDA_STREAM_VIEW; }
resource* make_resource() override { return new cuda_stream_resource(stream); }
private:
rmm::cuda_stream_view stream;
};
/**
* @defgroup resource_cuda_stream CUDA stream resource functions
* @{
*/
/**
* Load a rmm::cuda_stream_view from a resources instance (and populate it on the res
* if needed).
* @param res raft res object for managing resources
* @return
*/
inline rmm::cuda_stream_view get_cuda_stream(resources const& res)
{
if (!res.has_resource_factory(resource_type::CUDA_STREAM_VIEW)) {
res.add_resource_factory(std::make_shared<cuda_stream_resource_factory>());
}
return *res.get_resource<rmm::cuda_stream_view>(resource_type::CUDA_STREAM_VIEW);
};
/**
* Load a rmm::cuda_stream_view from a resources instance (and populate it on the res
* if needed).
* @param[in] res raft resources object for managing resources
* @param[in] stream_view cuda stream view
*/
inline void set_cuda_stream(resources const& res, rmm::cuda_stream_view stream_view)
{
res.add_resource_factory(std::make_shared<cuda_stream_resource_factory>(stream_view));
};
/**
* @brief synchronize a specific stream
*
* @param[in] res the raft resources object
* @param[in] stream stream to synchronize
*/
inline void sync_stream(const resources& res, rmm::cuda_stream_view stream)
{
interruptible::synchronize(stream);
}
/**
* @brief synchronize main stream on the resources instance
*/
inline void sync_stream(const resources& res) { sync_stream(res, get_cuda_stream(res)); }
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/cusolver_sp_handle.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusolverSp.h>
#include <raft/core/cusolver_macros.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
namespace raft::resource {
/**
*
*/
class cusolver_sp_resource : public resource {
public:
cusolver_sp_resource(rmm::cuda_stream_view stream)
{
RAFT_CUSOLVER_TRY_NO_THROW(cusolverSpCreate(&cusolver_res));
RAFT_CUSOLVER_TRY_NO_THROW(cusolverSpSetStream(cusolver_res, stream));
}
void* get_resource() override { return &cusolver_res; }
~cusolver_sp_resource() override { RAFT_CUSOLVER_TRY_NO_THROW(cusolverSpDestroy(cusolver_res)); }
private:
cusolverSpHandle_t cusolver_res;
};
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class cusolver_sp_resource_factory : public resource_factory {
public:
cusolver_sp_resource_factory(rmm::cuda_stream_view stream) : stream_(stream) {}
resource_type get_resource_type() override { return resource_type::CUSOLVER_SP_HANDLE; }
resource* make_resource() override { return new cusolver_sp_resource(stream_); }
private:
rmm::cuda_stream_view stream_;
};
/**
* @defgroup resource_cusolver_sp cuSolver SP handle resource functions
* @{
*/
/**
* Load a cusolverSpres_t from raft res if it exists, otherwise
* add it and return it.
* @param[in] res the raft resources object
* @return cusolver sp handle
*/
inline cusolverSpHandle_t get_cusolver_sp_handle(resources const& res)
{
if (!res.has_resource_factory(resource_type::CUSOLVER_SP_HANDLE)) {
cudaStream_t stream = get_cuda_stream(res);
res.add_resource_factory(std::make_shared<cusolver_sp_resource_factory>(stream));
}
return *res.get_resource<cusolverSpHandle_t>(resource_type::CUSOLVER_SP_HANDLE);
};
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/cuda_event.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft::resource {
class cuda_event_resource : public resource {
public:
cuda_event_resource()
{
RAFT_CUDA_TRY_NO_THROW(cudaEventCreateWithFlags(&event_, cudaEventDisableTiming));
}
void* get_resource() override { return &event_; }
~cuda_event_resource() override { RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(event_)); }
private:
cudaEvent_t event_;
};
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/device_id.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft::resource {
class device_id_resource : public resource {
public:
device_id_resource()
: dev_id_([]() -> int {
int cur_dev = -1;
RAFT_CUDA_TRY_NO_THROW(cudaGetDevice(&cur_dev));
return cur_dev;
}())
{
}
void* get_resource() override { return &dev_id_; }
~device_id_resource() override {}
private:
int dev_id_;
};
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class device_id_resource_factory : public resource_factory {
public:
resource_type get_resource_type() override { return resource_type::DEVICE_ID; }
resource* make_resource() override { return new device_id_resource(); }
};
/**
* @defgroup resource_device_id Device ID resource functions
* @{
*/
/**
* Load a device id from a res (and populate it on the res if needed).
* @param res raft res object for managing resources
* @return device id
*/
inline int get_device_id(resources const& res)
{
if (!res.has_resource_factory(resource_type::DEVICE_ID)) {
res.add_resource_factory(std::make_shared<device_id_resource_factory>());
}
return *res.get_resource<int>(resource_type::DEVICE_ID);
};
/**
* @}
*/
} // namespace raft::resource | 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/cusparse_handle.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusparse_v2.h>
#include <raft/core/cusparse_macros.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
namespace raft::resource {
class cusparse_resource : public resource {
public:
cusparse_resource(rmm::cuda_stream_view stream)
{
RAFT_CUSPARSE_TRY_NO_THROW(cusparseCreate(&cusparse_res));
RAFT_CUSPARSE_TRY_NO_THROW(cusparseSetStream(cusparse_res, stream));
}
~cusparse_resource() { RAFT_CUSPARSE_TRY_NO_THROW(cusparseDestroy(cusparse_res)); }
void* get_resource() override { return &cusparse_res; }
private:
cusparseHandle_t cusparse_res;
};
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class cusparse_resource_factory : public resource_factory {
public:
cusparse_resource_factory(rmm::cuda_stream_view stream) : stream_(stream) {}
resource_type get_resource_type() override { return resource_type::CUSPARSE_HANDLE; }
resource* make_resource() override { return new cusparse_resource(stream_); }
private:
rmm::cuda_stream_view stream_;
};
/**
* @defgroup resource_cusparse cuSparse handle resource functions
* @{
*/
/**
* Load a cusparseres_t from raft res if it exists, otherwise
* add it and return it.
* @param[in] res the raft resources object
* @return cusparse handle
*/
inline cusparseHandle_t get_cusparse_handle(resources const& res)
{
if (!res.has_resource_factory(resource_type::CUSPARSE_HANDLE)) {
rmm::cuda_stream_view stream = get_cuda_stream(res);
res.add_resource_factory(std::make_shared<cusparse_resource_factory>(stream));
}
return *res.get_resource<cusparseHandle_t>(resource_type::CUSPARSE_HANDLE);
};
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/sub_comms.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/comms.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
namespace raft::resource {
class sub_comms_resource : public resource {
public:
sub_comms_resource() : communicators_() {}
void* get_resource() override { return &communicators_; }
~sub_comms_resource() override {}
private:
std::unordered_map<std::string, std::shared_ptr<comms::comms_t>> communicators_;
};
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class sub_comms_resource_factory : public resource_factory {
public:
resource_type get_resource_type() override { return resource_type::SUB_COMMUNICATOR; }
resource* make_resource() override { return new sub_comms_resource(); }
};
/**
* @defgroup resource_sub_comms Subcommunicator resource functions
* @{
*/
inline const comms::comms_t& get_subcomm(const resources& res, std::string key)
{
if (!res.has_resource_factory(resource_type::SUB_COMMUNICATOR)) {
res.add_resource_factory(std::make_shared<sub_comms_resource_factory>());
}
auto sub_comms =
res.get_resource<std::unordered_map<std::string, std::shared_ptr<comms::comms_t>>>(
resource_type::SUB_COMMUNICATOR);
auto sub_comm = sub_comms->at(key);
RAFT_EXPECTS(nullptr != sub_comm.get(), "ERROR: Subcommunicator was not initialized");
return *sub_comm;
}
inline void set_subcomm(resources const& res,
std::string key,
std::shared_ptr<comms::comms_t> subcomm)
{
if (!res.has_resource_factory(resource_type::SUB_COMMUNICATOR)) {
res.add_resource_factory(std::make_shared<sub_comms_resource_factory>());
}
auto sub_comms =
res.get_resource<std::unordered_map<std::string, std::shared_ptr<comms::comms_t>>>(
resource_type::SUB_COMMUNICATOR);
sub_comms->insert(std::make_pair(key, subcomm));
}
/**
* @}
*/
} // namespace raft::resource | 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/cublas_handle.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cublas_v2.h>
#include <raft/core/cublas_macros.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
namespace raft::resource {
class cublas_resource : public resource {
public:
cublas_resource(rmm::cuda_stream_view stream)
{
RAFT_CUBLAS_TRY_NO_THROW(cublasCreate(&cublas_res));
RAFT_CUBLAS_TRY_NO_THROW(cublasSetStream(cublas_res, stream));
}
~cublas_resource() override { RAFT_CUBLAS_TRY_NO_THROW(cublasDestroy(cublas_res)); }
void* get_resource() override { return &cublas_res; }
private:
cublasHandle_t cublas_res;
};
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class cublas_resource_factory : public resource_factory {
public:
cublas_resource_factory(rmm::cuda_stream_view stream) : stream_(stream) {}
resource_type get_resource_type() override { return resource_type::CUBLAS_HANDLE; }
resource* make_resource() override { return new cublas_resource(stream_); }
private:
rmm::cuda_stream_view stream_;
};
/**
* @defgroup resource_cublas cuBLAS handle resource functions
* @{
*/
/**
* Load a cublasres_t from raft res if it exists, otherwise
* add it and return it.
* @param[in] res the raft resources object
* @return cublas handle
*/
inline cublasHandle_t get_cublas_handle(resources const& res)
{
if (!res.has_resource_factory(resource_type::CUBLAS_HANDLE)) {
cudaStream_t stream = get_cuda_stream(res);
res.add_resource_factory(std::make_shared<cublas_resource_factory>(stream));
}
auto ret = *res.get_resource<cublasHandle_t>(resource_type::CUBLAS_HANDLE);
RAFT_CUBLAS_TRY(cublasSetStream(ret, get_cuda_stream(res)));
return ret;
};
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/comms.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/comms.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
namespace raft::resource {
class comms_resource : public resource {
public:
comms_resource(std::shared_ptr<comms::comms_t> comnumicator) : communicator_(comnumicator) {}
void* get_resource() override { return &communicator_; }
~comms_resource() override {}
private:
std::shared_ptr<comms::comms_t> communicator_;
};
/**
* Factory that knows how to construct a
* specific raft::resource to populate
* the res_t.
*/
class comms_resource_factory : public resource_factory {
public:
comms_resource_factory(std::shared_ptr<comms::comms_t> communicator) : communicator_(communicator)
{
}
resource_type get_resource_type() override { return resource_type::COMMUNICATOR; }
resource* make_resource() override { return new comms_resource(communicator_); }
private:
std::shared_ptr<comms::comms_t> communicator_;
};
/**
* @defgroup resource_comms Comms resource functions
* @{
*/
inline bool comms_initialized(resources const& res)
{
return res.has_resource_factory(resource_type::COMMUNICATOR);
}
inline comms::comms_t const& get_comms(resources const& res)
{
RAFT_EXPECTS(comms_initialized(res), "ERROR: Communicator was not initialized\n");
return *(*res.get_resource<std::shared_ptr<comms::comms_t>>(resource_type::COMMUNICATOR));
}
inline void set_comms(resources const& res, std::shared_ptr<comms::comms_t> communicator)
{
res.add_resource_factory(std::make_shared<comms_resource_factory>(communicator));
}
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/device_memory_resource.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/operators.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/limiting_resource_adaptor.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cstddef>
#include <optional>
namespace raft::resource {
/**
* \defgroup device_memory_resource Device memory resources
* @{
*/
class limiting_memory_resource : public resource {
public:
limiting_memory_resource(std::shared_ptr<rmm::mr::device_memory_resource> mr,
std::size_t allocation_limit,
std::optional<std::size_t> alignment)
: upstream_(mr), mr_(make_adaptor(mr, allocation_limit, alignment))
{
}
auto get_resource() -> void* override { return &mr_; }
~limiting_memory_resource() override = default;
private:
std::shared_ptr<rmm::mr::device_memory_resource> upstream_;
rmm::mr::limiting_resource_adaptor<rmm::mr::device_memory_resource> mr_;
static inline auto make_adaptor(std::shared_ptr<rmm::mr::device_memory_resource> upstream,
std::size_t limit,
std::optional<std::size_t> alignment)
-> rmm::mr::limiting_resource_adaptor<rmm::mr::device_memory_resource>
{
auto p = upstream.get();
if (alignment.has_value()) {
return rmm::mr::limiting_resource_adaptor(p, limit, alignment.value());
} else {
return rmm::mr::limiting_resource_adaptor(p, limit);
}
}
};
/**
* Factory that knows how to construct a specific raft::resource to populate
* the resources instance.
*/
class workspace_resource_factory : public resource_factory {
public:
explicit workspace_resource_factory(
std::shared_ptr<rmm::mr::device_memory_resource> mr = {nullptr},
std::optional<std::size_t> allocation_limit = std::nullopt,
std::optional<std::size_t> alignment = std::nullopt)
: allocation_limit_(allocation_limit.value_or(default_allocation_limit())),
alignment_(alignment),
mr_(mr ? mr : default_plain_resource())
{
}
auto get_resource_type() -> resource_type override { return resource_type::WORKSPACE_RESOURCE; }
auto make_resource() -> resource* override
{
return new limiting_memory_resource(mr_, allocation_limit_, alignment_);
}
/** Construct a sensible default pool memory resource. */
static inline auto default_pool_resource(std::size_t limit)
-> std::shared_ptr<rmm::mr::device_memory_resource>
{
// Set the default granularity to 1 GiB
constexpr std::size_t kOneGb = 1024lu * 1024lu * 1024lu;
// The initial size of the pool. The choice of this value only affects the performance a little
// bit. Heuristics:
// 1) the pool shouldn't be too big from the beginning independently of the limit;
// 2) otherwise, set it to half the max size to avoid too many resize calls.
auto min_size = std::min<std::size_t>(kOneGb, limit / 2lu);
// The pool is going to be place behind the limiting resource adaptor. This means the user won't
// be able to allocate more than 'limit' bytes of memory anyway. At the same time, the pool
// itself may consume a little bit more memory than the 'limit' due to memory fragmentation.
// Therefore, we look for a compromise, such that:
// 1) 'limit' is accurate - the user should be more likely to run into the limiting
// resource adaptor bad_alloc error than into the pool bad_alloc error.
// 2) The pool doesn't grab too much memory on top of the 'limit'.
auto max_size = std::min<std::size_t>(limit + kOneGb / 2lu, limit * 3lu / 2lu);
auto upstream = rmm::mr::get_current_device_resource();
RAFT_LOG_DEBUG(
"Setting the workspace pool resource; memory limit = %zu, initial pool size = %zu, max pool "
"size = %zu.",
limit,
min_size,
max_size);
return std::make_shared<rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>>(
upstream, min_size, max_size);
}
/**
* Get the global memory resource wrapped into an unmanaged shared_ptr (with no deleter).
*
* Note: the lifetime of the underlying `rmm::mr::get_current_device_resource()` is managed
* somewhere else, since it's passed by a raw pointer. Hence, this shared_ptr wrapper is not
* allowed to delete the pointer on destruction.
*/
static inline auto default_plain_resource() -> std::shared_ptr<rmm::mr::device_memory_resource>
{
return std::shared_ptr<rmm::mr::device_memory_resource>{rmm::mr::get_current_device_resource(),
void_op{}};
}
private:
std::size_t allocation_limit_;
std::optional<std::size_t> alignment_;
std::shared_ptr<rmm::mr::device_memory_resource> mr_;
static inline auto default_allocation_limit() -> std::size_t
{
std::size_t free_size{};
std::size_t total_size{};
RAFT_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
// Note, the workspace does not claim all this memory from the start, so it's still usable by
// the main resource as well.
// This limit is merely an order for algorithm internals to plan the batching accordingly.
return total_size / 2;
}
};
/**
* Load a temp workspace resource from a resources instance (and populate it on the res
* if needed).
*
* @param res raft resources object for managing resources
* @return device memory resource object
*/
inline auto get_workspace_resource(resources const& res)
-> rmm::mr::limiting_resource_adaptor<rmm::mr::device_memory_resource>*
{
if (!res.has_resource_factory(resource_type::WORKSPACE_RESOURCE)) {
res.add_resource_factory(std::make_shared<workspace_resource_factory>());
}
return res.get_resource<rmm::mr::limiting_resource_adaptor<rmm::mr::device_memory_resource>>(
resource_type::WORKSPACE_RESOURCE);
};
/** Get the total size of the workspace resource. */
inline auto get_workspace_total_bytes(resources const& res) -> size_t
{
return get_workspace_resource(res)->get_allocation_limit();
};
/** Get the already allocated size of the workspace resource. */
inline auto get_workspace_used_bytes(resources const& res) -> size_t
{
return get_workspace_resource(res)->get_allocated_bytes();
};
/** Get the available size of the workspace resource. */
inline auto get_workspace_free_bytes(resources const& res) -> size_t
{
const auto* p = get_workspace_resource(res);
return p->get_allocation_limit() - p->get_allocated_bytes();
};
/**
* Set a temporary workspace resource on a resources instance.
*
* @param res raft resources object for managing resources
* @param mr an optional RMM device_memory_resource
* @param allocation_limit
* the total amount of memory in bytes available to the temporary workspace resources.
* @param alignment optional alignment requirements passed to RMM allocations
*
*/
inline void set_workspace_resource(resources const& res,
std::shared_ptr<rmm::mr::device_memory_resource> mr = {nullptr},
std::optional<std::size_t> allocation_limit = std::nullopt,
std::optional<std::size_t> alignment = std::nullopt)
{
res.add_resource_factory(
std::make_shared<workspace_resource_factory>(mr, allocation_limit, alignment));
};
/**
* Set the temporary workspace resource to a pool on top of the global memory resource
* (`rmm::mr::get_current_device_resource()`.
*
* @param res raft resources object for managing resources
* @param allocation_limit
* the total amount of memory in bytes available to the temporary workspace resources;
* if not provided, a last used or default limit is used.
*
*/
inline void set_workspace_to_pool_resource(
resources const& res, std::optional<std::size_t> allocation_limit = std::nullopt)
{
if (!allocation_limit.has_value()) { allocation_limit = get_workspace_total_bytes(res); }
res.add_resource_factory(std::make_shared<workspace_resource_factory>(
workspace_resource_factory::default_pool_resource(*allocation_limit),
allocation_limit,
std::nullopt));
};
/**
* Set the temporary workspace resource the same as the global memory resource
* (`rmm::mr::get_current_device_resource()`.
*
* Note, the workspace resource is always limited; the limit here defines how much of the global
* memory resource can be consumed by the workspace allocations.
*
* @param res raft resources object for managing resources
* @param allocation_limit
* the total amount of memory in bytes available to the temporary workspace resources.
*/
inline void set_workspace_to_global_resource(
resources const& res, std::optional<std::size_t> allocation_limit = std::nullopt)
{
res.add_resource_factory(std::make_shared<workspace_resource_factory>(
workspace_resource_factory::default_plain_resource(), allocation_limit, std::nullopt));
};
/** @} */
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/resource/resource_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft::resource {
/**
* @defgroup resource_types Core resource vocabulary types
* @{
*/
/**
* @brief Resource types can apply to any resource and don't have to be host- or device-specific.
*/
enum resource_type {
// device-specific resource types
CUBLAS_HANDLE = 0, // cublas handle
CUSOLVER_DN_HANDLE, // cusolver dn handle
CUSOLVER_SP_HANDLE, // cusolver sp handle
CUSPARSE_HANDLE, // cusparse handle
CUDA_STREAM_VIEW, // view of a cuda stream
CUDA_STREAM_POOL, // cuda stream pool
CUDA_STREAM_SYNC_EVENT, // cuda event for syncing streams
COMMUNICATOR, // raft communicator
SUB_COMMUNICATOR, // raft sub communicator
DEVICE_PROPERTIES, // cuda device properties
DEVICE_ID, // cuda device id
STREAM_VIEW, // view of a cuda stream or a placeholder in
// CUDA-free builds
THRUST_POLICY, // thrust execution policy
WORKSPACE_RESOURCE, // rmm device memory resource
LAST_KEY // reserved for the last key
};
/**
* @brief A resource constructs and contains an instance of
* some pre-determined object type and facades that object
* behind a common API.
*/
class resource {
public:
virtual void* get_resource() = 0;
virtual ~resource() {}
};
class empty_resource : public resource {
public:
empty_resource() : resource() {}
void* get_resource() override { return nullptr; }
~empty_resource() override {}
};
/**
* @brief A resource factory knows how to construct an instance of
* a specific raft::resource::resource.
*/
class resource_factory {
public:
/**
* @brief Return the resource_type associated with the current factory
* @return resource_type corresponding to the current factory
*/
virtual resource_type get_resource_type() = 0;
/**
* @brief Construct an instance of the factory's underlying resource.
* @return resource instance
*/
virtual resource* make_resource() = 0;
virtual ~resource_factory() {}
};
/**
* @brief A resource factory knows how to construct an instance of
* a specific raft::resource::resource.
*/
class empty_resource_factory : public resource_factory {
public:
empty_resource_factory() : resource_factory() {}
/**
* @brief Return the resource_type associated with the current factory
* @return resource_type corresponding to the current factory
*/
resource_type get_resource_type() override { return resource_type::LAST_KEY; }
/**
* @brief Construct an instance of the factory's underlying resource.
* @return resource instance
*/
resource* make_resource() override { return &res; }
private:
empty_resource res;
};
/**
* @}
*/
} // namespace raft::resource
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core/resource | rapidsai_public_repos/raft/cpp/include/raft/core/resource/detail/device_memory_resource.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/logger.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <mutex>
#include <set>
#include <string>
namespace raft::resource::detail {
/**
* Warn a user of the calling algorithm if they use the default non-pooled memory allocator,
* as it may hurt the performance.
*
* This helper function is designed to produce the warning once for a given `user_name`.
*
* @param[in] res
* @param[in] user_name the name of the algorithm or any other identification.
*
*/
inline void warn_non_pool_workspace(resources const& res, std::string user_name)
{
// Detect if the plain cuda memory resource is used for the workspace
if (rmm::mr::cuda_memory_resource{}.is_equal(*get_workspace_resource(res)->get_upstream())) {
static std::set<std::string> notified_names{};
static std::mutex mutex{};
std::lock_guard<std::mutex> guard(mutex);
auto [it, inserted] = notified_names.insert(std::move(user_name));
if (inserted) {
RAFT_LOG_WARN(
"[%s] the default cuda resource is used for the raft workspace allocations. This may lead "
"to a significant slowdown for this algorithm. Consider using the default pool resource "
"(`raft::resource::set_workspace_to_pool_resource`) or set your own resource explicitly "
"(`raft::resource::set_workspace_resource`).",
it->c_str());
}
}
}
} // namespace raft::resource::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core/resource | rapidsai_public_repos/raft/cpp/include/raft/core/resource/detail/stream_sync_event.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuda_runtime.h>
#include <raft/core/resource/cuda_event.hpp>
#include <raft/core/resource/resource_types.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft::resource::detail {
/**
* Factory that knows how to construct a specific raft::resource to populate
* the res_t.
*/
class cuda_stream_sync_event_resource_factory : public resource_factory {
public:
resource_type get_resource_type() override { return resource_type::CUDA_STREAM_SYNC_EVENT; }
resource* make_resource() override { return new cuda_event_resource(); }
};
/**
* Load a cudaEvent from a resources instance (and populate it on the resources instance)
* if needed) for syncing the main cuda stream.
* @param res raft resources instance for managing resources
* @return
*/
inline cudaEvent_t& get_cuda_stream_sync_event(resources const& res)
{
if (!res.has_resource_factory(resource_type::CUDA_STREAM_SYNC_EVENT)) {
res.add_resource_factory(std::make_shared<cuda_stream_sync_event_resource_factory>());
}
return *res.get_resource<cudaEvent_t>(resource_type::CUDA_STREAM_SYNC_EVENT);
};
} // namespace raft::resource::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/detail/callback_sink.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iostream>
#include <mutex>
#define SPDLOG_HEADER_ONLY
#include <spdlog/common.h>
#include <spdlog/details/log_msg.h>
#include <spdlog/sinks/base_sink.h>
namespace spdlog::sinks {
typedef void (*LogCallback)(int lvl, const char* msg);
template <class Mutex>
class CallbackSink : public base_sink<Mutex> {
public:
explicit CallbackSink(std::string tag = "spdlog",
LogCallback callback = nullptr,
void (*flush)() = nullptr)
: _callback{callback}, _flush{flush} {};
void set_callback(LogCallback callback) { _callback = callback; }
void set_flush(void (*flush)()) { _flush = flush; }
protected:
void sink_it_(const details::log_msg& msg) override
{
spdlog::memory_buf_t formatted;
base_sink<Mutex>::formatter_->format(msg, formatted);
std::string msg_string = fmt::to_string(formatted);
if (_callback) {
_callback(static_cast<int>(msg.level), msg_string.c_str());
} else {
std::cout << msg_string;
}
}
void flush_() override
{
if (_flush) {
_flush();
} else {
std::cout << std::flush;
}
}
LogCallback _callback;
void (*_flush)();
};
using callback_sink_mt = CallbackSink<std::mutex>;
using callback_sink_st = CallbackSink<details::null_mutex>;
} // end namespace spdlog::sinks
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/detail/span.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits> // numeric_limits
#include <raft/core/detail/macros.hpp>
#include <raft/core/mdspan_types.hpp>
#include <type_traits>
namespace raft {
template <class ElementType, bool is_device, std::size_t Extent>
class span;
namespace detail {
/*!
* The extent E of the span returned by subspan is determined as follows:
*
* - If Count is not dynamic_extent, Count;
* - Otherwise, if Extent is not dynamic_extent, Extent - Offset;
* - Otherwise, dynamic_extent.
*/
template <std::size_t Extent, std::size_t Offset, std::size_t Count>
struct extent_value_t
: public std::integral_constant<
std::size_t,
Count != dynamic_extent ? Count : (Extent != dynamic_extent ? Extent - Offset : Extent)> {};
/*!
* If N is dynamic_extent, the extent of the returned span E is also
* dynamic_extent; otherwise it is std::size_t(sizeof(T)) * N.
*/
template <typename T, std::size_t Extent>
struct extent_as_bytes_value_t
: public std::integral_constant<std::size_t,
Extent == dynamic_extent ? Extent : sizeof(T) * Extent> {};
template <std::size_t From, std::size_t To>
struct is_allowed_extent_conversion_t
: public std::integral_constant<bool,
From == To || From == dynamic_extent || To == dynamic_extent> {};
template <class From, class To>
struct is_allowed_element_type_conversion_t
: public std::integral_constant<bool, std::is_convertible<From (*)[], To (*)[]>::value> {};
template <class T>
struct is_span_oracle_t : std::false_type {};
template <class T, bool is_device, std::size_t Extent>
struct is_span_oracle_t<span<T, is_device, Extent>> : std::true_type {};
template <class T>
struct is_span_t : public is_span_oracle_t<typename std::remove_cv<T>::type> {};
template <class InputIt1, class InputIt2, class Compare>
_RAFT_HOST_DEVICE constexpr auto lexicographical_compare(InputIt1 first1,
InputIt1 last1,
InputIt2 first2,
InputIt2 last2) -> bool
{
Compare comp;
for (; first1 != last1 && first2 != last2; ++first1, ++first2) {
if (comp(*first1, *first2)) { return true; }
if (comp(*first2, *first1)) { return false; }
}
return first1 == last1 && first2 != last2;
}
template <typename T, std::size_t Extent>
struct span_storage {
private:
T* ptr_{nullptr};
public:
constexpr span_storage() noexcept = default;
constexpr span_storage(T* ptr, std::size_t) noexcept : ptr_{ptr} {}
[[nodiscard]] constexpr auto size() const noexcept -> std::size_t { return Extent; }
[[nodiscard]] constexpr auto data() const noexcept -> T* { return ptr_; }
};
template <typename T>
struct span_storage<T, dynamic_extent> {
private:
T* ptr_{nullptr};
std::size_t size_{0};
public:
constexpr span_storage() noexcept = default;
constexpr span_storage(T* ptr, std::size_t size) noexcept : ptr_{ptr}, size_{size} {}
[[nodiscard]] constexpr auto size() const noexcept -> std::size_t { return size_; }
[[nodiscard]] constexpr auto data() const noexcept -> T* { return ptr_; }
};
} // namespace detail
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/detail/mdspan_util.cuh | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/thirdparty/mdspan/include/experimental/mdspan>
#include <raft/core/detail/macros.hpp>
#include <tuple>
#include <utility>
namespace raft::detail {
template <class T, std::size_t N, std::size_t... Idx>
MDSPAN_INLINE_FUNCTION constexpr auto arr_to_tup(T (&arr)[N], std::index_sequence<Idx...>)
{
return std::make_tuple(arr[Idx]...);
}
template <class T, std::size_t N>
MDSPAN_INLINE_FUNCTION constexpr auto arr_to_tup(T (&arr)[N])
{
return arr_to_tup(arr, std::make_index_sequence<N>{});
}
template <typename T>
MDSPAN_INLINE_FUNCTION auto native_popc(T v) -> int32_t
{
int c = 0;
for (; v != 0; v &= v - 1) {
c++;
}
return c;
}
MDSPAN_INLINE_FUNCTION auto popc(uint32_t v) -> int32_t
{
#if defined(__CUDA_ARCH__)
return __popc(v);
#elif defined(__GNUC__) || defined(__clang__)
return __builtin_popcount(v);
#else
return native_popc(v);
#endif // compiler
}
MDSPAN_INLINE_FUNCTION auto popc(uint64_t v) -> int32_t
{
#if defined(__CUDA_ARCH__)
return __popcll(v);
#elif defined(__GNUC__) || defined(__clang__)
return __builtin_popcountll(v);
#else
return native_popc(v);
#endif // compiler
}
} // end namespace raft::detail | 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/detail/mdspan_numpy_serializer.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <algorithm>
#include <complex>
#include <cstdint>
#include <cstring>
#include <iostream>
#include <map>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
namespace raft {
namespace detail {
namespace numpy_serializer {
/**
* A small implementation of NumPy serialization format.
* Reference: https://numpy.org/doc/1.23/reference/generated/numpy.lib.format.html
*
* Adapted from https://github.com/llohse/libnpy/blob/master/include/npy.hpp, using the following
* license:
*
* MIT License
*
* Copyright (c) 2021 Leon Merten Lohse
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define RAFT_NUMPY_LITTLE_ENDIAN_CHAR '<'
#define RAFT_NUMPY_BIG_ENDIAN_CHAR '>'
#define RAFT_NUMPY_NO_ENDIAN_CHAR '|'
#define RAFT_NUMPY_MAGIC_STRING "\x93NUMPY"
#define RAFT_NUMPY_MAGIC_STRING_LENGTH 6
#if RAFT_SYSTEM_LITTLE_ENDIAN == 1
#define RAFT_NUMPY_HOST_ENDIAN_CHAR RAFT_NUMPY_LITTLE_ENDIAN_CHAR
#else // RAFT_SYSTEM_LITTLE_ENDIAN == 1
#define RAFT_NUMPY_HOST_ENDIAN_CHAR RAFT_NUMPY_BIG_ENDIAN_CHAR
#endif // RAFT_SYSTEM_LITTLE_ENDIAN == 1
using ndarray_len_t = std::uint64_t;
struct dtype_t {
const char byteorder;
const char kind;
const unsigned int itemsize;
std::string to_string() const
{
char buf[16] = {0};
std::sprintf(buf, "%c%c%u", byteorder, kind, itemsize);
return std::string(buf);
}
bool operator==(const dtype_t& other) const
{
return (byteorder == other.byteorder && kind == other.kind && itemsize == other.itemsize);
}
};
struct header_t {
const dtype_t dtype;
const bool fortran_order;
const std::vector<ndarray_len_t> shape;
bool operator==(const header_t& other) const
{
return (dtype == other.dtype && fortran_order == other.fortran_order && shape == other.shape);
}
};
template <class T>
struct is_complex : std::false_type {};
template <class T>
struct is_complex<std::complex<T>> : std::true_type {};
template <typename T, typename std::enable_if_t<std::is_floating_point_v<T>, bool> = true>
inline dtype_t get_numpy_dtype()
{
return {RAFT_NUMPY_HOST_ENDIAN_CHAR, 'f', sizeof(T)};
}
template <typename T,
typename std::enable_if_t<std::is_integral_v<T> && std::is_signed_v<T>, bool> = true>
inline dtype_t get_numpy_dtype()
{
const char endian_char =
(sizeof(T) == 1 ? RAFT_NUMPY_NO_ENDIAN_CHAR : RAFT_NUMPY_HOST_ENDIAN_CHAR);
return {endian_char, 'i', sizeof(T)};
}
template <typename T,
typename std::enable_if_t<std::is_integral_v<T> && std::is_unsigned_v<T>, bool> = true>
inline dtype_t get_numpy_dtype()
{
const char endian_char =
(sizeof(T) == 1 ? RAFT_NUMPY_NO_ENDIAN_CHAR : RAFT_NUMPY_HOST_ENDIAN_CHAR);
return {endian_char, 'u', sizeof(T)};
}
template <typename T, typename std::enable_if_t<is_complex<T>{}, bool> = true>
inline dtype_t get_numpy_dtype()
{
return {RAFT_NUMPY_HOST_ENDIAN_CHAR, 'c', sizeof(T)};
}
template <typename T, typename std::enable_if_t<std::is_enum_v<T>, bool> = true>
inline dtype_t get_numpy_dtype()
{
return get_numpy_dtype<std::underlying_type_t<T>>();
}
template <typename T>
inline std::string tuple_to_string(const std::vector<T>& tuple)
{
std::ostringstream oss;
if (tuple.empty()) {
oss << "()";
} else if (tuple.size() == 1) {
oss << "(" << tuple.front() << ",)";
} else {
oss << "(";
for (std::size_t i = 0; i < tuple.size() - 1; ++i) {
oss << tuple[i] << ", ";
}
oss << tuple.back() << ")";
}
return oss.str();
}
inline std::string header_to_string(const header_t& header)
{
std::ostringstream oss;
oss << "{'descr': '" << header.dtype.to_string()
<< "', 'fortran_order': " << (header.fortran_order ? "True" : "False")
<< ", 'shape': " << tuple_to_string(header.shape) << "}";
return oss.str();
}
inline std::string trim(const std::string& str)
{
const std::string whitespace = " \t";
auto begin = str.find_first_not_of(whitespace);
if (begin == std::string::npos) { return ""; }
auto end = str.find_last_not_of(whitespace);
return str.substr(begin, end - begin + 1);
}
// A poor man's parser for Python dictionary
// TODO(hcho3): Consider writing a proper parser
// Limitation: can only parse a flat dictionary; all values are assumed to non-objects
// Limitation: must know all the keys ahead of time; you get undefined behavior if you omit any key
inline std::map<std::string, std::string> parse_pydict(std::string str,
const std::vector<std::string>& keys)
{
std::map<std::string, std::string> result;
// Unwrap dictionary
str = trim(str);
RAFT_EXPECTS(str.front() == '{' && str.back() == '}', "Expected a Python dictionary");
str = str.substr(1, str.length() - 2);
// Get the position of each key and put it in the list
std::vector<std::pair<std::size_t, std::string>> positions;
for (auto const& key : keys) {
std::size_t pos = str.find("'" + key + "'");
RAFT_EXPECTS(pos != std::string::npos, "Missing '%s' key.", key.c_str());
positions.emplace_back(pos, key);
}
// Sort the list
std::sort(positions.begin(), positions.end());
// Extract each key-value pair
for (std::size_t i = 0; i < positions.size(); ++i) {
std::string key = positions[i].second;
std::size_t begin = positions[i].first;
std::size_t end = (i + 1 < positions.size() ? positions[i + 1].first : std::string::npos);
std::string raw_value = trim(str.substr(begin, end - begin));
if (raw_value.back() == ',') { raw_value.pop_back(); }
std::size_t sep_pos = raw_value.find_first_of(":");
if (sep_pos == std::string::npos) {
result[key] = "";
} else {
result[key] = trim(raw_value.substr(sep_pos + 1));
}
}
return result;
}
inline std::string parse_pystring(std::string str)
{
RAFT_EXPECTS(str.front() == '\'' && str.back() == '\'', "Invalid Python string: %s", str.c_str());
return str.substr(1, str.length() - 2);
}
inline bool parse_pybool(std::string str)
{
if (str == "True") {
return true;
} else if (str == "False") {
return false;
} else {
RAFT_FAIL("Invalid Python boolean: %s", str.c_str());
}
}
inline std::vector<std::string> parse_pytuple(std::string str)
{
std::vector<std::string> result;
str = trim(str);
RAFT_EXPECTS(str.front() == '(' && str.back() == ')', "Invalid Python tuple: %s", str.c_str());
str = str.substr(1, str.length() - 2);
std::istringstream iss(str);
for (std::string token; std::getline(iss, token, ',');) {
result.push_back(trim(token));
}
return result;
}
inline dtype_t parse_descr(std::string typestr)
{
RAFT_EXPECTS(typestr.length() >= 3, "Invalid typestr: Too short");
char byteorder_c = typestr.at(0);
char kind_c = typestr.at(1);
std::string itemsize_s = typestr.substr(2);
const char endian_chars[] = {
RAFT_NUMPY_LITTLE_ENDIAN_CHAR, RAFT_NUMPY_BIG_ENDIAN_CHAR, RAFT_NUMPY_NO_ENDIAN_CHAR};
const char numtype_chars[] = {'f', 'i', 'u', 'c'};
RAFT_EXPECTS(std::find(std::begin(endian_chars), std::end(endian_chars), byteorder_c) !=
std::end(endian_chars),
"Invalid typestr: unrecognized byteorder %c",
byteorder_c);
RAFT_EXPECTS(std::find(std::begin(numtype_chars), std::end(numtype_chars), kind_c) !=
std::end(numtype_chars),
"Invalid typestr: unrecognized kind %c",
kind_c);
unsigned int itemsize = std::stoul(itemsize_s);
return {byteorder_c, kind_c, itemsize};
}
inline void write_magic(std::ostream& os)
{
os.write(RAFT_NUMPY_MAGIC_STRING, RAFT_NUMPY_MAGIC_STRING_LENGTH);
RAFT_EXPECTS(os.good(), "Error writing magic string");
// Use version 1.0
os.put(1);
os.put(0);
RAFT_EXPECTS(os.good(), "Error writing magic string");
}
inline void read_magic(std::istream& is)
{
char magic_buf[RAFT_NUMPY_MAGIC_STRING_LENGTH + 2] = {0};
is.read(magic_buf, RAFT_NUMPY_MAGIC_STRING_LENGTH + 2);
RAFT_EXPECTS(is.good(), "Error reading magic string");
RAFT_EXPECTS(std::memcmp(magic_buf, RAFT_NUMPY_MAGIC_STRING, RAFT_NUMPY_MAGIC_STRING_LENGTH) == 0,
"The given stream does not have a valid NumPy format.");
std::uint8_t version_major = magic_buf[RAFT_NUMPY_MAGIC_STRING_LENGTH];
std::uint8_t version_minor = magic_buf[RAFT_NUMPY_MAGIC_STRING_LENGTH + 1];
RAFT_EXPECTS(version_major == 1 && version_minor == 0,
"Unsupported NumPy version: %d.%d",
version_major,
version_minor);
}
inline void write_header(std::ostream& os, const header_t& header)
{
std::string header_dict = header_to_string(header);
std::size_t preamble_length = RAFT_NUMPY_MAGIC_STRING_LENGTH + 2 + 2 + header_dict.length() + 1;
RAFT_EXPECTS(preamble_length < 255 * 255, "Header too long");
// Enforce 64-byte alignment
std::size_t padding_len = 64 - preamble_length % 64;
std::string padding(padding_len, ' ');
write_magic(os);
// Write header length
std::uint8_t header_len_le16[2];
std::uint16_t header_len =
static_cast<std::uint16_t>(header_dict.length() + padding.length() + 1);
header_len_le16[0] = (header_len >> 0) & 0xff;
header_len_le16[1] = (header_len >> 8) & 0xff;
os.put(header_len_le16[0]);
os.put(header_len_le16[1]);
RAFT_EXPECTS(os.good(), "Error writing HEADER_LEN");
os << header_dict << padding << "\n";
RAFT_EXPECTS(os.good(), "Error writing header dict");
}
inline std::string read_header_bytes(std::istream& is)
{
read_magic(is);
// Read header length
std::uint8_t header_len_le16[2];
is.read(reinterpret_cast<char*>(header_len_le16), 2);
RAFT_EXPECTS(is.good(), "Error while reading HEADER_LEN");
const std::uint32_t header_length = (header_len_le16[0] << 0) | (header_len_le16[1] << 8);
std::vector<char> header_bytes(header_length);
is.read(header_bytes.data(), header_length);
RAFT_EXPECTS(is.good(), "Error while reading the header");
return std::string(header_bytes.data(), header_length);
}
inline header_t read_header(std::istream& is)
{
std::string header_bytes = read_header_bytes(is);
// remove trailing newline
RAFT_EXPECTS(header_bytes.back() == '\n', "Invalid NumPy header");
header_bytes.pop_back();
// parse the header dict
auto header_dict = parse_pydict(header_bytes, {"descr", "fortran_order", "shape"});
dtype_t descr = parse_descr(parse_pystring(header_dict["descr"]));
bool fortran_order = parse_pybool(header_dict["fortran_order"]);
std::vector<ndarray_len_t> shape;
auto shape_tup_str = parse_pytuple(header_dict["shape"]);
for (const auto& e : shape_tup_str) {
shape.push_back(static_cast<ndarray_len_t>(std::stoul(e)));
}
RAFT_EXPECTS(
descr.byteorder == RAFT_NUMPY_HOST_ENDIAN_CHAR || descr.byteorder == RAFT_NUMPY_NO_ENDIAN_CHAR,
"The mdspan was serialized on a %s machine but you're attempting to load it on "
"a %s machine. This use case is not currently supported.",
(RAFT_SYSTEM_LITTLE_ENDIAN ? "big-endian" : "little-endian"),
(RAFT_SYSTEM_LITTLE_ENDIAN ? "little-endian" : "big-endian"));
return {descr, fortran_order, shape};
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void serialize_host_mdspan(
std::ostream& os,
const raft::host_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>& obj)
{
static_assert(std::is_same_v<LayoutPolicy, raft::layout_c_contiguous> ||
std::is_same_v<LayoutPolicy, raft::layout_f_contiguous>,
"The serializer only supports row-major and column-major layouts");
using obj_t = raft::host_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>;
const auto dtype = get_numpy_dtype<ElementType>();
const bool fortran_order = std::is_same_v<LayoutPolicy, raft::layout_f_contiguous>;
std::vector<ndarray_len_t> shape;
for (typename obj_t::rank_type i = 0; i < obj.rank(); ++i) {
shape.push_back(obj.extent(i));
}
const header_t header = {dtype, fortran_order, shape};
write_header(os, header);
// For contiguous layouts, size() == product of dimensions
os.write(reinterpret_cast<const char*>(obj.data_handle()), obj.size() * sizeof(ElementType));
RAFT_EXPECTS(os.good(), "Error writing content of mdspan");
}
template <typename T>
inline void serialize_scalar(std::ostream& os, const T& value)
{
const auto dtype = get_numpy_dtype<T>();
const bool fortran_order = false;
const std::vector<ndarray_len_t> shape{};
const header_t header = {dtype, fortran_order, shape};
write_header(os, header);
os.write(reinterpret_cast<const char*>(&value), sizeof(T));
RAFT_EXPECTS(os.good(), "Error serializing a scalar");
}
template <typename ElementType, typename Extents, typename LayoutPolicy, typename AccessorPolicy>
inline void deserialize_host_mdspan(
std::istream& is,
const raft::host_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>& obj)
{
static_assert(std::is_same_v<LayoutPolicy, raft::layout_c_contiguous> ||
std::is_same_v<LayoutPolicy, raft::layout_f_contiguous>,
"The serializer only supports row-major and column-major layouts");
using obj_t = raft::host_mdspan<ElementType, Extents, LayoutPolicy, AccessorPolicy>;
// Check if given dtype and fortran_order are compatible with the mdspan
const auto expected_dtype = get_numpy_dtype<ElementType>();
const bool expected_fortran_order = std::is_same_v<LayoutPolicy, raft::layout_f_contiguous>;
header_t header = read_header(is);
RAFT_EXPECTS(header.dtype == expected_dtype,
"Expected dtype %s but got %s instead",
header.dtype.to_string().c_str(),
expected_dtype.to_string().c_str());
RAFT_EXPECTS(header.fortran_order == expected_fortran_order,
"Wrong matrix layout; expected %s but got a different layout",
(expected_fortran_order ? "Fortran layout" : "C layout"));
// Check if dimensions are correct
RAFT_EXPECTS(obj.rank() == header.shape.size(),
"Incorrect rank: expected %zu but got %zu",
obj.rank(),
header.shape.size());
for (typename obj_t::rank_type i = 0; i < obj.rank(); ++i) {
RAFT_EXPECTS(static_cast<ndarray_len_t>(obj.extent(i)) == header.shape[i],
"Incorrect dimension: expected %zu but got %zu",
static_cast<ndarray_len_t>(obj.extent(i)),
header.shape[i]);
}
// For contiguous layouts, size() == product of dimensions
is.read(reinterpret_cast<char*>(obj.data_handle()), obj.size() * sizeof(ElementType));
RAFT_EXPECTS(is.good(), "Error while reading mdspan content");
}
template <typename T>
inline T deserialize_scalar(std::istream& is)
{
// Check if dtype is correct
const auto expected_dtype = get_numpy_dtype<T>();
header_t header = read_header(is);
RAFT_EXPECTS(header.dtype == expected_dtype,
"Expected dtype %s but got %s instead",
header.dtype.to_string().c_str(),
expected_dtype.to_string().c_str());
// Check if dimensions are correct; shape should be ()
RAFT_EXPECTS(header.shape.empty(), "Incorrect rank: expected 0 but got %zu", header.shape.size());
T value;
is.read(reinterpret_cast<char*>(&value), sizeof(T));
RAFT_EXPECTS(is.good(), "Error while deserializing scalar");
return value;
}
} // end namespace numpy_serializer
} // end namespace detail
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/detail/logger.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in future releases." \
" Please use the <raft/core/logger.hpp> version instead.")
#include <raft/core/logger.hpp>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/detail/copy.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdio>
#include <raft/core/cuda_support.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/core/resource/stream_view.hpp>
#include <raft/core/resources.hpp>
#include <type_traits>
#ifndef RAFT_DISABLE_CUDA
#include <raft/core/cudart_utils.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#ifdef __CUDACC__
#include <raft/util/cuda_dev_essentials.cuh>
#endif
#endif
namespace raft {
namespace detail {
template <bool B,
typename DstType = void,
typename SrcType = void,
typename T = void,
typename = void>
struct mdspan_copyable : std::false_type {
auto static constexpr const custom_kernel_allowed = false;
auto static constexpr const custom_kernel_not_allowed = false;
};
/*
* A helper struct used to determine whether one mdspan type can be copied to
* another and if so how
*/
template <typename DstType, typename SrcType, typename T>
struct mdspan_copyable<true,
DstType,
SrcType,
T,
std::enable_if_t<std::conjunction_v<
std::bool_constant<is_mdspan_v<std::remove_reference_t<DstType>>>,
std::bool_constant<is_mdspan_v<std::remove_reference_t<SrcType>>>>>> {
using dst_type = std::remove_reference_t<DstType>;
using src_type = std::remove_reference_t<SrcType>;
// Extents properties
using dst_extents_type = typename dst_type::extents_type;
using src_extents_type = typename src_type::extents_type;
using index_type =
std::conditional_t<(std::numeric_limits<typename dst_extents_type::index_type>::max() >
std::numeric_limits<typename src_extents_type::index_type>::max()),
typename dst_extents_type::index_type,
typename src_extents_type::index_type>;
// Dtype properties
using dst_value_type = typename dst_type::value_type;
using src_value_type = typename src_type::value_type;
using dst_element_type = typename dst_type::element_type;
using src_element_type = typename src_type::element_type;
auto static constexpr const same_dtype = std::is_same_v<dst_value_type, src_value_type>;
auto static constexpr const compatible_dtype =
std::is_assignable_v<typename dst_type::reference, typename src_type::reference>;
auto static constexpr const dst_float = std::is_same_v<dst_value_type, float>;
auto static constexpr const src_float = std::is_same_v<src_value_type, float>;
auto static constexpr const dst_double = std::is_same_v<dst_value_type, double>;
auto static constexpr const src_double = std::is_same_v<src_value_type, double>;
auto static constexpr const both_float = dst_float && src_float;
auto static constexpr const both_double = dst_double && src_double;
auto static constexpr const both_float_or_both_double = both_float || both_double;
// Ranks
auto static constexpr const dst_rank = dst_extents_type::rank();
auto static constexpr const src_rank = src_extents_type::rank();
auto static constexpr const compatible_rank = (dst_rank == src_rank);
auto static constexpr const has_vector_rank = (dst_rank == 1);
auto static constexpr const has_matrix_rank = (dst_rank == 2);
// Layout properties
using dst_layout_type = typename dst_type::layout_type;
using src_layout_type = typename src_type::layout_type;
auto static constexpr const same_layout = std::is_same_v<dst_layout_type, src_layout_type>;
auto static check_for_unique_dst(dst_type dst)
{
if constexpr (!dst_type::is_always_unique()) {
RAFT_EXPECTS(dst.is_unique(), "Destination mdspan must be unique for parallelized copies");
}
}
auto static constexpr const src_contiguous =
std::disjunction_v<std::is_same<src_layout_type, layout_c_contiguous>,
std::is_same<src_layout_type, layout_f_contiguous>>;
auto static constexpr const dst_contiguous =
std::disjunction_v<std::is_same<dst_layout_type, layout_c_contiguous>,
std::is_same<dst_layout_type, layout_f_contiguous>>;
auto static constexpr const both_contiguous = src_contiguous && dst_contiguous;
auto static constexpr const same_underlying_layout =
std::disjunction_v<std::bool_constant<same_layout>,
std::bool_constant<has_vector_rank && both_contiguous>>;
// Layout for intermediate tile if copying through custom kernel
using tile_layout_type =
std::conditional_t<src_contiguous,
src_layout_type,
std::conditional_t<dst_contiguous, dst_layout_type, layout_c_contiguous>>;
// Accessibility
auto static constexpr const dst_device_accessible = is_device_mdspan_v<dst_type>;
auto static constexpr const src_device_accessible = is_device_mdspan_v<src_type>;
auto static constexpr const both_device_accessible =
dst_device_accessible && src_device_accessible;
auto static constexpr const dst_host_accessible = is_host_mdspan_v<dst_type>;
auto static constexpr const src_host_accessible = is_host_mdspan_v<src_type>;
auto static constexpr const both_host_accessible = dst_host_accessible && src_host_accessible;
// Allowed copy codepaths
auto static constexpr const can_use_host = both_host_accessible;
#if (defined(__AVX__) || defined(__SSE__) || defined(__ARM_NEON))
// TODO(wphicks): Following should be only necessary restrictions. Test if
// perf actually improves once fully implemented.
// auto static constexpr const can_use_simd = can_use_host && both_contiguous &&
// both_float_or_both_double;
auto static constexpr const can_use_simd =
can_use_host && both_contiguous && both_float && has_matrix_rank;
#else
auto static constexpr const can_use_simd = false;
#endif
auto static constexpr const can_use_std_copy =
std::conjunction_v<std::bool_constant<can_use_host>,
std::bool_constant<compatible_dtype>,
std::bool_constant<both_contiguous>,
std::bool_constant<same_underlying_layout>>;
auto static constexpr const can_use_raft_copy =
std::conjunction_v<std::bool_constant<CUDA_ENABLED>,
std::bool_constant<same_dtype>,
std::bool_constant<both_contiguous>,
std::bool_constant<same_underlying_layout>>;
// Do we need intermediate storage on device in order to perform
// non-trivial layout or dtype conversions after copying source from host or
// before copying converted results back to host?
auto static constexpr const requires_intermediate =
!both_host_accessible && !both_device_accessible && !can_use_raft_copy;
auto static constexpr const use_intermediate_dst =
std::conjunction_v<std::bool_constant<requires_intermediate>,
std::bool_constant<src_device_accessible>>;
auto static constexpr const use_intermediate_src =
std::conjunction_v<std::bool_constant<requires_intermediate>,
std::bool_constant<!use_intermediate_dst>>;
auto static constexpr const can_use_device =
std::conjunction_v<std::bool_constant<CUDA_ENABLED>,
std::disjunction<std::bool_constant<both_device_accessible>,
std::bool_constant<requires_intermediate>,
std::bool_constant<can_use_raft_copy>>>;
auto static constexpr const can_use_cublas =
std::conjunction_v<std::bool_constant<can_use_device>,
std::bool_constant<compatible_dtype>,
std::bool_constant<both_contiguous>,
std::bool_constant<!same_underlying_layout>,
std::bool_constant<has_matrix_rank>,
std::bool_constant<both_float_or_both_double>>;
auto static constexpr const custom_kernel_allowed =
std::conjunction_v<std::bool_constant<can_use_device>,
std::bool_constant<!(can_use_raft_copy || can_use_cublas)>>;
auto static constexpr const custom_kernel_not_allowed = !custom_kernel_allowed;
auto static constexpr const custom_kernel_required =
std::conjunction_v<std::bool_constant<!can_use_host>,
std::bool_constant<!(can_use_raft_copy || can_use_cublas)>>;
// Viable overload?
auto static constexpr const value =
std::conjunction_v<std::bool_constant<is_mdspan_v<src_type>>,
std::bool_constant<is_mdspan_v<dst_type>>,
std::bool_constant<can_use_host || can_use_device>>;
using type = std::enable_if_t<value, T>;
};
template <typename DstType, typename SrcType, typename T = void>
using mdspan_copyable_t = typename mdspan_copyable<true, DstType, SrcType, T>::type;
template <typename DstType, typename SrcType>
auto static constexpr const mdspan_copyable_v =
mdspan_copyable<true, DstType, SrcType, void>::value;
template <typename DstType, typename SrcType>
auto static constexpr const mdspan_copyable_with_kernel_v =
mdspan_copyable<true, DstType, SrcType, void>::custom_kernel_allowed;
template <typename DstType, typename SrcType>
auto static constexpr const mdspan_copyable_not_with_kernel_v =
mdspan_copyable<true, DstType, SrcType, void>::custom_kernel_not_allowed;
template <typename DstType, typename SrcType, typename T = void>
using mdspan_copyable_with_kernel_t =
std::enable_if_t<mdspan_copyable_with_kernel_v<DstType, SrcType>, T>;
template <typename DstType, typename SrcType, typename T = void>
using mdspan_copyable_not_with_kernel_t =
std::enable_if_t<mdspan_copyable_not_with_kernel_v<DstType, SrcType>, T>;
#ifdef __CUDACC__
auto static constexpr const mdspan_copy_tile_dim = 32;
auto static constexpr const mdspan_copy_tile_elems = mdspan_copy_tile_dim * mdspan_copy_tile_dim;
// Helper struct to work around lack of CUDA-native std::apply
template <typename IdxType, IdxType... Idx>
struct index_sequence {};
template <typename IdxType, IdxType N, IdxType... Idx>
struct make_index_sequence
: std::conditional_t<N == IdxType{},
index_sequence<IdxType, Idx...>,
make_index_sequence<IdxType, N - IdxType{1}, N - IdxType{1}, Idx...>> {};
/* template <typename LambdaT, typename ContainerT, typename IdxT, IdxT... Idx>
__host__ __device__ decltype(auto) apply(LambdaT&& lambda, ContainerT&& args, index_sequence<IdxT,
Idx...>)
{
return lambda(args[Idx]...);
}
template <typename LambdaT, typename ContainerT, typename IdxT, IdxT size>
__host__ __device__ decltype(auto) apply(LambdaT&& lambda, ContainerT&& args)
{
return apply(std::forward<LambdaT>(lambda), std::forward<ContainerT>(args),
make_index_sequence<IdxT, size>{});
} */
/*
* Given an mdspan and an array of indices, return a reference to the
* indicated element.
*/
template <typename MdspanType, typename IdxType, IdxType... Idx>
__device__ decltype(auto) get_mdspan_elem(MdspanType md,
IdxType const* indices,
index_sequence<IdxType, Idx...>)
{
return md(indices[Idx]...);
}
template <typename MdspanType, typename IdxType>
__device__ decltype(auto) get_mdspan_elem(MdspanType md, IdxType const* indices)
{
return get_mdspan_elem(
md, indices, make_index_sequence<IdxType, MdspanType::extents_type::rank()>{});
}
/* Advance old_indices forward by the number of mdspan elements specified
* by increment. Store the result in indices. Return true if the new
* indices are valid for the input mdspan.
*/
template <typename MdspanType, typename IdxType, typename IncrType>
__device__ auto increment_indices(IdxType* indices,
MdspanType const& md,
IdxType const* old_indices,
IdxType const* index_strides,
IncrType increment)
{
#pragma unroll
for (auto i = typename MdspanType::extents_type::rank_type{}; i < md.rank(); ++i) {
increment += index_strides[i] * old_indices[i];
}
#pragma unroll
for (auto i = typename MdspanType::extents_type::rank_type{}; i < md.rank(); ++i) {
// Iterate through dimensions in order from slowest to fastest varying for
// layout_right and layout_left. Otherwise, just iterate through dimensions
// in order.
//
// TODO(wphicks): It is possible to always iterate through dimensions in
// the slowest to fastest order. Consider this or at minimum expanding to
// padded layouts.
auto const real_index = [](auto ind) {
if constexpr (std::is_same_v<typename MdspanType::layout_type, layout_f_contiguous>) {
return MdspanType::rank() - ind - 1;
} else {
return ind;
}
}(i);
auto cur_index = IdxType{};
while (cur_index < md.extent(real_index) - 1 && increment >= index_strides[real_index]) {
increment -= index_strides[real_index];
++cur_index;
}
indices[real_index] = cur_index;
}
return increment == IdxType{};
}
/*
* WARNING: This kernel _must_ be launched with mdspan_copy_tile_dim x
* mdspan_copy_tile_dim threads per block. This restriction allows for
* additional optimizations at the expense of generalized launch
* parameters.
*/
template <typename DstType, typename SrcType>
RAFT_KERNEL mdspan_copy_kernel(DstType dst, SrcType src)
{
using config = mdspan_copyable<true, DstType, SrcType>;
// An intermediate storage location for the data to be copied.
__shared__ typename config::dst_value_type tile[mdspan_copy_tile_dim][mdspan_copy_tile_dim + 1];
// Compute the cumulative product of extents in order from fastest to
// slowest varying extent
typename config::index_type index_strides[config::dst_rank];
auto cur_stride = typename config::index_type{1};
#pragma unroll
for (auto i = typename SrcType::extents_type::rank_type{}; i < config::src_rank; ++i) {
// Iterate through dimensions in order from fastest to slowest varying
auto const real_index = [](auto ind) {
if constexpr (std::is_same_v<typename config::src_layout_type, layout_c_contiguous>) {
return config::src_rank - ind - 1;
} else {
return ind;
}
}(i);
index_strides[real_index] = cur_stride;
cur_stride *= src.extent(real_index);
}
// The index of the first element in the mdspan which will be copied via
// the current tile for this block.
typename config::index_type tile_offset[config::dst_rank] = {0};
typename config::index_type cur_indices[config::dst_rank];
auto valid_tile = increment_indices(
tile_offset, src, tile_offset, index_strides, blockIdx.x * mdspan_copy_tile_elems);
while (valid_tile) {
auto tile_read_x = std::is_same_v<typename config::src_layout_type, layout_f_contiguous>
? threadIdx.x
: threadIdx.y;
auto tile_read_y = std::is_same_v<typename config::src_layout_type, layout_f_contiguous>
? threadIdx.y
: threadIdx.x;
auto valid_index = increment_indices(cur_indices,
src,
tile_offset,
index_strides,
tile_read_x * mdspan_copy_tile_dim + tile_read_y);
if constexpr (config::same_underlying_layout || !config::dst_contiguous) {
if (valid_index) {
tile[tile_read_x][tile_read_y] = get_mdspan_elem(src, cur_indices);
get_mdspan_elem(dst, cur_indices) = tile[tile_read_x][tile_read_y];
}
} else {
if (valid_index) { tile[tile_read_x][tile_read_y] = get_mdspan_elem(src, cur_indices); }
__syncthreads();
valid_index = increment_indices(cur_indices,
src,
tile_offset,
index_strides,
tile_read_y * mdspan_copy_tile_dim + tile_read_x);
if (valid_index) { get_mdspan_elem(dst, cur_indices) = tile[tile_read_y][tile_read_x]; }
__syncthreads();
}
valid_tile = increment_indices(
tile_offset, src, tile_offset, index_strides, blockDim.x * mdspan_copy_tile_elems);
}
}
#endif
template <typename DstType, typename SrcType>
mdspan_copyable_t<DstType, SrcType> copy(resources const& res, DstType&& dst, SrcType&& src)
{
using config = mdspan_copyable<true, DstType, SrcType>;
for (auto i = std::size_t{}; i < config::src_rank; ++i) {
RAFT_EXPECTS(src.extent(i) == dst.extent(i), "Must copy between mdspans of the same shape");
}
if constexpr (config::use_intermediate_src) {
#ifndef RAFT_DISABLE_CUDA
// Copy to intermediate source on device, then perform necessary
// changes in layout on device, directly into final destination
using mdarray_t = device_mdarray<typename config::src_value_type,
typename config::src_extents_type,
typename config::src_layout_type>;
auto intermediate = mdarray_t(res,
typename mdarray_t::mapping_type{src.extents()},
typename mdarray_t::container_policy_type{});
detail::copy(res, intermediate.view(), src);
detail::copy(res, dst, intermediate.view());
#else
// Not possible to reach this due to enable_ifs. Included for safety.
throw(raft::non_cuda_build_error("Copying to device in non-CUDA build"));
#endif
} else if constexpr (config::use_intermediate_dst) {
#ifndef RAFT_DISABLE_CUDA
// Perform necessary changes in layout on device, then copy to final
// destination on host
using mdarray_t = device_mdarray<typename config::dst_value_type,
typename config::dst_extents_type,
typename config::dst_layout_type>;
auto intermediate = mdarray_t(res,
typename mdarray_t::mapping_type{dst.extents()},
typename mdarray_t::container_policy_type{});
detail::copy(res, intermediate.view(), src);
detail::copy(res, dst, intermediate.view());
#else
throw(raft::non_cuda_build_error("Copying from device in non-CUDA build"));
#endif
} else if constexpr (config::can_use_raft_copy) {
#ifndef RAFT_DISABLE_CUDA
raft::copy(dst.data_handle(), src.data_handle(), dst.size(), resource::get_cuda_stream(res));
#else
// Not possible to reach this due to enable_ifs. Included for safety.
throw(raft::non_cuda_build_error("Copying to from or on device in non-CUDA build"));
#endif
} else if constexpr (config::can_use_cublas) {
#ifndef RAFT_DISABLE_CUDA
auto constexpr const alpha = typename std::remove_reference_t<DstType>::value_type{1};
auto constexpr const beta = typename std::remove_reference_t<DstType>::value_type{0};
if constexpr (std::is_same_v<typename config::dst_layout_type, layout_c_contiguous>) {
CUBLAS_TRY(linalg::detail::cublasgeam(resource::get_cublas_handle(res),
CUBLAS_OP_T,
CUBLAS_OP_N,
dst.extent(1),
dst.extent(0),
&alpha,
src.data_handle(),
src.extent(0),
&beta,
dst.data_handle(),
dst.extent(1),
dst.data_handle(),
dst.extent(1),
resource::get_cuda_stream(res)));
} else {
CUBLAS_TRY(linalg::detail::cublasgeam(resource::get_cublas_handle(res),
CUBLAS_OP_T,
CUBLAS_OP_N,
dst.extent(0),
dst.extent(1),
&alpha,
src.data_handle(),
src.extent(1),
&beta,
dst.data_handle(),
dst.extent(0),
dst.data_handle(),
dst.extent(0),
resource::get_cuda_stream(res)));
}
#else
// Not possible to reach this due to enable_ifs. Included for safety.
throw(raft::non_cuda_build_error("Copying to from or on device in non-CUDA build"));
#endif
} else if constexpr (config::custom_kernel_allowed) {
#ifdef __CUDACC__
config::check_for_unique_dst(dst);
auto const blocks = std::min(
// This maximum is somewhat arbitrary. Could query the device to see
// how many blocks we could reasonably allow, but this is probably
// sufficient considering that this kernel will likely overlap with
// real computations for most use cases.
typename config::index_type{32},
raft::ceildiv(typename config::index_type(dst.size()),
typename config::index_type(mdspan_copy_tile_elems)));
auto constexpr const threads = dim3{mdspan_copy_tile_dim, mdspan_copy_tile_dim, 1};
mdspan_copy_kernel<<<blocks, threads, 0, resource::get_cuda_stream(res)>>>(dst, src);
#else
// Should never actually reach this because of enable_ifs. Included for
// safety.
RAFT_FAIL(
"raft::copy called in a way that requires custom kernel. Please use "
"raft/core/copy.cuh and include the header in a .cu file");
#endif
} else if constexpr (config::can_use_std_copy) {
std::copy(src.data_handle(), src.data_handle() + dst.size(), dst.data_handle());
} else {
// TODO(wphicks): Make the following cache-oblivious and add SIMD support
auto indices = std::array<typename config::index_type, config::dst_rank>{};
for (auto i = std::size_t{}; i < dst.size(); ++i) {
if (i != 0) {
if constexpr (std::is_same_v<typename config::src_layout_type, layout_c_contiguous>) {
// For layout_right/layout_c_contiguous, we iterate over the
// rightmost extent fastest
auto dim = config::src_rank - 1;
while ((++indices[dim]) == src.extent(dim)) {
indices[dim] = typename config::index_type{};
--dim;
}
} else {
// For layout_left/layout_f_contiguous (and currently all other
// layouts), we iterate over the leftmost extent fastest. The
// cache-oblivious implementation should work through dimensions in
// order of increasing stride.
auto dim = std::size_t{};
while ((++indices[dim]) == src.extent(dim)) {
indices[dim] = typename config::index_type{};
++dim;
}
}
}
std::apply(dst, indices) = std::apply(src, indices);
}
}
}
} // namespace detail
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/detail/nvtx.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/cuda_stream_view.hpp>
#ifdef NVTX_ENABLED
#include <cstdint>
#include <cstdlib>
#include <mutex>
#include <nvtx3/nvToolsExt.h>
#include <string>
#include <type_traits>
#include <unordered_map>
namespace raft::common::nvtx::detail {
/**
* @brief An internal struct to store associated state with the color
* generator
*/
struct color_gen_state {
/** collection of all tagged colors generated so far */
static inline std::unordered_map<std::string, uint32_t> all_colors_;
/** mutex for accessing the above map */
static inline std::mutex map_mutex_;
/** saturation */
static inline constexpr float kS = 0.9f;
/** value */
static inline constexpr float kV = 0.85f;
/** golden ratio */
static inline constexpr float kPhi = 1.61803f;
/** inverse golden ratio */
static inline constexpr float kInvPhi = 1.f / kPhi;
};
// all h, s, v are in range [0, 1]
// Ref: http://en.wikipedia.org/wiki/HSL_and_HSV#Converting_to_RGB
inline auto hsv2rgb(float h, float s, float v) -> uint32_t
{
uint32_t out = 0xff000000u;
if (s <= 0.0f) { return out; }
// convert hue from [0, 1] range to [0, 360]
float h_deg = h * 360.f;
if (0.f > h_deg || h_deg >= 360.f) h_deg = 0.f;
h_deg /= 60.f;
int h_range = static_cast<int>(h_deg);
float h_mod = h_deg - h_range;
float x = v * (1.f - s);
float y = v * (1.f - (s * h_mod));
float z = v * (1.f - (s * (1.f - h_mod)));
float r, g, b;
switch (h_range) {
case 0:
r = v;
g = z;
b = x;
break;
case 1:
r = y;
g = v;
b = x;
break;
case 2:
r = x;
g = v;
b = z;
break;
case 3:
r = x;
g = y;
b = v;
break;
case 4:
r = z;
g = x;
b = v;
break;
case 5:
default:
r = v;
g = x;
b = y;
break;
}
out |= (uint32_t(r * 256.f) << 16);
out |= (uint32_t(g * 256.f) << 8);
out |= uint32_t(b * 256.f);
return out;
}
/**
* @brief Helper method to generate 'visually distinct' colors.
* Inspired from https://martin.ankerl.com/2009/12/09/how-to-create-random-colors-programmatically/
* However, if an associated tag is passed, it will look up in its history for
* any generated color against this tag and if found, just returns it, else
* generates a new color, assigns a tag to it and stores it for future usage.
* Such a thing is very useful for nvtx markers where the ranges associated
* with a specific tag should ideally get the same color for the purpose of
* visualizing it on nsight-systems timeline.
* @param tag look for any previously generated colors with this tag or
* associate the currently generated color with it
* @return returns 32b RGB integer with alpha channel set of 0xff
*/
inline auto generate_next_color(const std::string& tag) -> uint32_t
{
// std::unordered_map<std::string, uint32_t> color_gen_state::all_colors_;
// std::mutex color_gen_state::map_mutex_;
std::lock_guard<std::mutex> guard(color_gen_state::map_mutex_);
if (!tag.empty()) {
auto itr = color_gen_state::all_colors_.find(tag);
if (itr != color_gen_state::all_colors_.end()) { return itr->second; }
}
auto h = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
h += color_gen_state::kInvPhi;
if (h >= 1.f) h -= 1.f;
auto rgb = hsv2rgb(h, color_gen_state::kS, color_gen_state::kV);
if (!tag.empty()) { color_gen_state::all_colors_[tag] = rgb; }
return rgb;
}
template <typename Domain, typename = Domain>
struct domain_store {
/* If `Domain::name` does not exist, this default instance is used and throws the error. */
static_assert(sizeof(Domain) != sizeof(Domain),
"Type used to identify a domain must contain a static member 'char const* name'");
static inline auto value() -> const nvtxDomainHandle_t { return nullptr; }
};
template <typename Domain>
struct domain_store<
Domain,
/* Check if there exists `Domain::name` */
std::enable_if_t<
std::is_same<char const*, typename std::decay<decltype(Domain::name)>::type>::value,
Domain>> {
static inline auto value() -> const nvtxDomainHandle_t
{
// NB: static modifier ensures the domain is created only once
static const nvtxDomainHandle_t kValue = nvtxDomainCreateA(Domain::name);
return kValue;
}
};
template <typename Domain>
inline void push_range_name(const char* name)
{
nvtxEventAttributes_t event_attrib = {0};
event_attrib.version = NVTX_VERSION;
event_attrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
event_attrib.colorType = NVTX_COLOR_ARGB;
event_attrib.color = generate_next_color(name);
event_attrib.messageType = NVTX_MESSAGE_TYPE_ASCII;
event_attrib.message.ascii = name;
nvtxDomainRangePushEx(domain_store<Domain>::value(), &event_attrib);
}
template <typename Domain, typename... Args>
inline void push_range(const char* format, Args... args)
{
if constexpr (sizeof...(args) > 0) {
int length = std::snprintf(nullptr, 0, format, args...);
assert(length >= 0);
std::vector<char> buf(length + 1);
std::snprintf(buf.data(), length + 1, format, args...);
push_range_name<Domain>(buf.data());
} else {
push_range_name<Domain>(format);
}
}
template <typename Domain>
inline void pop_range()
{
nvtxDomainRangePop(domain_store<Domain>::value());
}
} // namespace raft::common::nvtx::detail
#else // NVTX_ENABLED
namespace raft::common::nvtx::detail {
template <typename Domain, typename... Args>
inline void push_range(const char* format, Args... args)
{
}
template <typename Domain>
inline void pop_range()
{
}
} // namespace raft::common::nvtx::detail
#endif // NVTX_ENABLED
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/core | rapidsai_public_repos/raft/cpp/include/raft/core/detail/macros.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef _RAFT_HAS_CUDA
#if defined(__CUDACC__)
#define _RAFT_HAS_CUDA __CUDACC__
#endif
#endif
#if defined(_RAFT_HAS_CUDA)
#define CUDA_CONDITION_ELSE_TRUE(condition) condition
#define CUDA_CONDITION_ELSE_FALSE(condition) condition
#else
#define CUDA_CONDITION_ELSE_TRUE(condition) true
#define CUDA_CONDITION_ELSE_FALSE(condition) false
#endif
#ifndef _RAFT_HOST_DEVICE
#if defined(_RAFT_HAS_CUDA)
#define _RAFT_DEVICE __device__
#define _RAFT_HOST __host__
#define _RAFT_FORCEINLINE __forceinline__
#else
#define _RAFT_DEVICE
#define _RAFT_HOST
#define _RAFT_FORCEINLINE inline
#endif
#endif
#define _RAFT_HOST_DEVICE _RAFT_HOST _RAFT_DEVICE
#ifndef RAFT_INLINE_FUNCTION
#define RAFT_INLINE_FUNCTION _RAFT_HOST_DEVICE _RAFT_FORCEINLINE
#endif
#ifndef RAFT_DEVICE_INLINE_FUNCTION
#define RAFT_DEVICE_INLINE_FUNCTION _RAFT_DEVICE _RAFT_FORCEINLINE
#endif
// The RAFT_INLINE_CONDITIONAL is a conditional inline specifier that removes
// the inline specification when RAFT_COMPILED is defined.
//
// When RAFT_COMPILED is not defined, functions may be defined in multiple
// translation units and we do not want that to lead to linker errors.
//
// When RAFT_COMPILED is defined, this serves two purposes:
//
// 1. It triggers a multiple definition error message when memory_pool-inl.hpp
// (for instance) is accidentally included in multiple translation units.
//
// 2. We function definitions to be non-inline, because non-inline functions
// symbols are always exported in the object symbol table. For inline functions,
// the compiler may elide the external symbol, which results in linker errors.
#ifdef RAFT_COMPILED
#define RAFT_INLINE_CONDITIONAL
#else
#define RAFT_INLINE_CONDITIONAL inline
#endif // RAFT_COMPILED
// The RAFT_WEAK_FUNCTION specificies that:
//
// 1. A function may be defined in multiple translation units (like inline)
//
// 2. Must still emit an external symbol (unlike inline). This enables declaring
// a function signature in an `-ext` header and defining it in a source file.
//
// From
// https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#Common-Function-Attributes:
//
// "The weak attribute causes a declaration of an external symbol to be emitted
// as a weak symbol rather than a global."
#define RAFT_WEAK_FUNCTION __attribute__((weak))
// The RAFT_HIDDEN_FUNCTION specificies that the function will be hidden
// and therefore not callable by consumers of raft when compiled as
// a shared library.
//
// Hidden visibility also ensures that the linker doesn't de-duplicate the
// symbol across multiple `.so`. This allows multiple libraries to embed raft
// without issue
#define RAFT_HIDDEN_FUNCTION __attribute__((visibility("hidden")))
// The RAFT_KERNEL specificies that a kernel has hidden visibility
//
// Raft needs to ensure that the visibility of its __global__ function
// templates have hidden visibility ( default is weak visibility).
//
// When kernls have weak visibility it means that if two dynamic libraries
// both contain identical instantiations of a RAFT template, then the linker
// will discard one of the two instantiations and use only one of them.
//
// Do to unique requirements of how the CUDA works this de-deduplication
// can lead to the wrong kernels being called ( SM version being wrong ),
// silently no kernel being called at all, or cuda runtime errors being
// thrown.
//
// https://github.com/rapidsai/raft/issues/1722
#if defined(__CUDACC_RDC__)
#define RAFT_KERNEL RAFT_HIDDEN_FUNCTION __global__ void
#elif defined(_RAFT_HAS_CUDA)
#define RAFT_KERNEL static __global__ void
#else
#define RAFT_KERNEL static void
#endif
/**
* Some macro magic to remove optional parentheses of a macro argument.
* See https://stackoverflow.com/a/62984543
*/
#ifndef RAFT_DEPAREN_MAGICRAFT_DEPAREN_H1
#define RAFT_DEPAREN(X) RAFT_DEPAREN_H2(RAFT_DEPAREN_H1 X)
#define RAFT_DEPAREN_H1(...) RAFT_DEPAREN_H1 __VA_ARGS__
#define RAFT_DEPAREN_H2(...) RAFT_DEPAREN_H3(__VA_ARGS__)
#define RAFT_DEPAREN_H3(...) RAFT_DEPAREN_MAGIC##__VA_ARGS__
#define RAFT_DEPAREN_MAGICRAFT_DEPAREN_H1
#endif
#ifndef RAFT_STRINGIFY
#define RAFT_STRINGIFY_DETAIL(...) #__VA_ARGS__
#define RAFT_STRINGIFY(...) RAFT_STRINGIFY_DETAIL(__VA_ARGS__)
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/comms/std_comms.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/comms.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/comms/comms.hpp>
#include <raft/comms/detail/std_comms.hpp>
#include <iostream>
#include <nccl.h>
#include <ucp/api/ucp.h>
namespace raft {
namespace comms {
using std_comms = detail::std_comms;
/**
* @defgroup std_comms_factory std_comms Factory functions
* @{
*/
/**
* Factory function to construct a RAFT NCCL communicator and inject it into a
* RAFT handle.
*
* @param handle raft::resources for injecting the comms
* @param nccl_comm initialized NCCL communicator to use for collectives
* @param num_ranks number of ranks in communicator clique
* @param rank rank of local instance
*
* @code{.cpp}
* #include <raft/comms/std_comms.hpp>
* #include <raft/core/device_mdarray.hpp>
*
* ncclComm_t nccl_comm;
* raft::raft::resources handle;
*
* build_comms_nccl_only(&handle, nccl_comm, 5, 0);
* ...
* const auto& comm = resource::get_comms(handle);
* auto gather_data = raft::make_device_vector<float>(handle, comm.get_size());
* ...
* comm.allgather((gather_data.data_handle())[comm.get_rank()],
* gather_data.data_handle(),
* 1,
* resource::get_cuda_stream(handle));
*
* comm.sync_stream(resource::get_cuda_stream(handle));
* @endcode
*/
void build_comms_nccl_only(resources* handle, ncclComm_t nccl_comm, int num_ranks, int rank)
{
cudaStream_t stream = resource::get_cuda_stream(*handle);
auto communicator = std::make_shared<comms_t>(
std::unique_ptr<comms_iface>(new raft::comms::std_comms(nccl_comm, num_ranks, rank, stream)));
resource::set_comms(*handle, communicator);
}
/**
* Factory function to construct a RAFT NCCL+UCX and inject it into a RAFT
* handle.
*
* @param handle raft::resources for injecting the comms
* @param nccl_comm initialized NCCL communicator to use for collectives
* @param ucp_worker of local process
* Note: This is purposefully left as void* so that the ucp_worker_h
* doesn't need to be exposed through the cython layer
* @param eps array of ucp_ep_h instances.
* Note: This is purposefully left as void* so that
* the ucp_ep_h doesn't need to be exposed through the cython layer.
* @param num_ranks number of ranks in communicator clique
* @param rank rank of local instance
*
* @code{.cpp}
* #include <raft/comms/std_comms.hpp>
* #include <raft/core/device_mdarray.hpp>
*
* ncclComm_t nccl_comm;
* raft::raft::resources handle;
* ucp_worker_h ucp_worker;
* ucp_ep_h *ucp_endpoints_arr;
*
* build_comms_nccl_ucx(&handle, nccl_comm, &ucp_worker, ucp_endpoints_arr, 5, 0);
* ...
* const auto& comm = resource::get_comms(handle);
* auto gather_data = raft::make_device_vector<float>(handle, comm.get_size());
* ...
* comm.allgather((gather_data.data_handle())[comm.get_rank()],
* gather_data.data_handle(),
* 1,
* resource::get_cuda_stream(handle));
*
* comm.sync_stream(resource::get_cuda_stream(handle));
* @endcode
*/
void build_comms_nccl_ucx(
resources* handle, ncclComm_t nccl_comm, void* ucp_worker, void* eps, int num_ranks, int rank)
{
auto eps_sp = std::make_shared<ucp_ep_h*>(new ucp_ep_h[num_ranks]);
auto size_t_ep_arr = reinterpret_cast<size_t*>(eps);
for (int i = 0; i < num_ranks; i++) {
size_t ptr = size_t_ep_arr[i];
auto ucp_ep_v = reinterpret_cast<ucp_ep_h*>(*eps_sp);
if (ptr != 0) {
auto eps_ptr = reinterpret_cast<ucp_ep_h>(size_t_ep_arr[i]);
ucp_ep_v[i] = eps_ptr;
} else {
ucp_ep_v[i] = nullptr;
}
}
cudaStream_t stream = resource::get_cuda_stream(*handle);
auto communicator =
std::make_shared<comms_t>(std::unique_ptr<comms_iface>(new raft::comms::std_comms(
nccl_comm, (ucp_worker_h)ucp_worker, eps_sp, num_ranks, rank, stream)));
resource::set_comms(*handle, communicator);
}
/**
* @}
*/
inline void nccl_unique_id_from_char(ncclUniqueId* id, char* uniqueId)
{
memcpy(id->internal, uniqueId, NCCL_UNIQUE_ID_BYTES);
}
inline void get_nccl_unique_id(char* uid)
{
ncclUniqueId id;
ncclGetUniqueId(&id);
memcpy(uid, id.internal, NCCL_UNIQUE_ID_BYTES);
}
}; // namespace comms
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/comms/mpi_comms.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/comms/comms.hpp>
#include <raft/comms/detail/mpi_comms.hpp>
#include <raft/core/resource/comms.hpp>
#include <raft/core/resource/cuda_stream.hpp>
namespace raft {
namespace comms {
using mpi_comms = detail::mpi_comms;
/**
* @defgroup mpi_comms_factory MPI Comms Factory Functions
* @{
*/
/**
* Given a properly initialized MPI_Comm, construct an instance of RAFT's
* MPI Communicator and inject it into the given RAFT handle instance
* @param handle raft handle for managing expensive resources
* @param comm an initialized MPI communicator
*
* @code{.cpp}
* #include <raft/comms/mpi_comms.hpp>
* #include <raft/core/device_mdarray.hpp>
*
* MPI_Comm mpi_comm;
* raft::raft::resources handle;
*
* initialize_mpi_comms(&handle, mpi_comm);
* ...
* const auto& comm = resource::get_comms(handle);
* auto gather_data = raft::make_device_vector<float>(handle, comm.get_size());
* ...
* comm.allgather((gather_data.data_handle())[comm.get_rank()],
* gather_data.data_handle(),
* 1,
* resource::get_cuda_stream(handle));
*
* comm.sync_stream(resource::get_cuda_stream(handle));
* @endcode
*/
inline void initialize_mpi_comms(resources* handle, MPI_Comm comm)
{
auto communicator = std::make_shared<comms_t>(
std::unique_ptr<comms_iface>(new mpi_comms(comm, false, resource::get_cuda_stream(*handle))));
resource::set_comms(*handle, communicator);
};
/**
* @}
*/
}; // namespace comms
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/comms/comms_test.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/comms/comms.hpp>
#include <raft/comms/detail/test.hpp>
#include <raft/core/resources.hpp>
namespace raft {
namespace comms {
/**
* @brief A simple sanity check that NCCL is able to perform a collective operation
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_allreduce(raft::resources const& handle, int root)
{
return detail::test_collective_allreduce(handle, root);
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective operation
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_broadcast(raft::resources const& handle, int root)
{
return detail::test_collective_broadcast(handle, root);
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective reduce
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_reduce(raft::resources const& handle, int root)
{
return detail::test_collective_reduce(handle, root);
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective allgather
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_allgather(raft::resources const& handle, int root)
{
return detail::test_collective_allgather(handle, root);
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective gather
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_gather(raft::resources const& handle, int root)
{
return detail::test_collective_gather(handle, root);
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective gatherv
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_gatherv(raft::resources const& handle, int root)
{
return detail::test_collective_gatherv(handle, root);
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective reducescatter
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_reducescatter(raft::resources const& handle, int root)
{
return detail::test_collective_reducescatter(handle, root);
}
/**
* A simple sanity check that UCX is able to send messages between all ranks
*
* @param[in] h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] numTrials number of iterations of all-to-all messaging to perform
*/
bool test_pointToPoint_simple_send_recv(raft::resources const& h, int numTrials)
{
return detail::test_pointToPoint_simple_send_recv(h, numTrials);
}
/**
* A simple sanity check that device is able to send OR receive.
*
* @param h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param numTrials number of iterations of send or receive messaging to perform
*/
bool test_pointToPoint_device_send_or_recv(raft::resources const& h, int numTrials)
{
return detail::test_pointToPoint_device_send_or_recv(h, numTrials);
}
/**
* A simple sanity check that device is able to send and receive at the same time.
*
* @param h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param numTrials number of iterations of send or receive messaging to perform
*/
bool test_pointToPoint_device_sendrecv(raft::resources const& h, int numTrials)
{
return detail::test_pointToPoint_device_sendrecv(h, numTrials);
}
/**
* A simple sanity check that device is able to perform multiple concurrent sends and receives.
*
* @param h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param numTrials number of iterations of send or receive messaging to perform
*/
bool test_pointToPoint_device_multicast_sendrecv(raft::resources const& h, int numTrials)
{
return detail::test_pointToPoint_device_multicast_sendrecv(h, numTrials);
}
/**
* A simple test that the comms can be split into 2 separate subcommunicators
*
* @param h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param n_colors number of different colors to test
*/
bool test_commsplit(raft::resources const& h, int n_colors)
{
return detail::test_commsplit(h, n_colors);
}
} // namespace comms
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/comms/comms.hpp | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use core/comms.hpp instead.
*/
#pragma once
#include <raft/core/comms.hpp>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/comms | rapidsai_public_repos/raft/cpp/include/raft/comms/detail/util.hpp | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/interruptible.hpp>
#include <nccl.h>
#include <raft/core/error.hpp>
#include <string>
/**
* @brief Error checking macro for NCCL runtime API functions.
*
* Invokes a NCCL runtime API function call, if the call does not return ncclSuccess, throws an
* exception detailing the NCCL error that occurred
*/
#define RAFT_NCCL_TRY(call) \
do { \
ncclResult_t const status = (call); \
if (ncclSuccess != status) { \
std::string msg{}; \
SET_ERROR_MSG(msg, \
"NCCL error encountered at: ", \
"call='%s', Reason=%d:%s", \
#call, \
status, \
ncclGetErrorString(status)); \
throw raft::logic_error(msg); \
} \
} while (0);
// FIXME: Remove after consumer rename
#ifndef NCCL_TRY
#define NCCL_TRY(call) RAFT_NCCL_TRY(call)
#endif
#define RAFT_NCCL_TRY_NO_THROW(call) \
do { \
ncclResult_t status = call; \
if (ncclSuccess != status) { \
printf("NCCL call='%s' failed. Reason:%s\n", #call, ncclGetErrorString(status)); \
} \
} while (0)
// FIXME: Remove after consumer rename
#ifndef NCCL_TRY_NO_THROW
#define NCCL_TRY_NO_THROW(call) RAFT_NCCL_TRY_NO_THROW(call)
#endif
namespace raft {
namespace comms {
namespace detail {
constexpr size_t get_datatype_size(const datatype_t datatype)
{
switch (datatype) {
case datatype_t::CHAR: return sizeof(char);
case datatype_t::UINT8: return sizeof(uint8_t);
case datatype_t::INT32: return sizeof(int);
case datatype_t::UINT32: return sizeof(unsigned int);
case datatype_t::INT64: return sizeof(int64_t);
case datatype_t::UINT64: return sizeof(uint64_t);
case datatype_t::FLOAT32: return sizeof(float);
case datatype_t::FLOAT64: return sizeof(double);
default: throw "Unsupported datatype";
}
}
constexpr ncclDataType_t get_nccl_datatype(const datatype_t datatype)
{
switch (datatype) {
case datatype_t::CHAR: return ncclChar;
case datatype_t::UINT8: return ncclUint8;
case datatype_t::INT32: return ncclInt;
case datatype_t::UINT32: return ncclUint32;
case datatype_t::INT64: return ncclInt64;
case datatype_t::UINT64: return ncclUint64;
case datatype_t::FLOAT32: return ncclFloat;
case datatype_t::FLOAT64: return ncclDouble;
default: throw "Unsupported datatype";
}
}
constexpr ncclRedOp_t get_nccl_op(const op_t op)
{
switch (op) {
case op_t::SUM: return ncclSum;
case op_t::PROD: return ncclProd;
case op_t::MIN: return ncclMin;
case op_t::MAX: return ncclMax;
default: throw "Unsupported datatype";
}
}
inline status_t nccl_sync_stream(ncclComm_t comm, cudaStream_t stream)
{
cudaError_t cudaErr;
ncclResult_t ncclErr, ncclAsyncErr;
while (1) {
cudaErr = cudaStreamQuery(stream);
if (cudaErr == cudaSuccess) return status_t::SUCCESS;
if (cudaErr != cudaErrorNotReady) {
// An error occurred querying the status of the stream_
return status_t::ERROR;
}
ncclErr = ncclCommGetAsyncError(comm, &ncclAsyncErr);
if (ncclErr != ncclSuccess) {
// An error occurred retrieving the asynchronous error
return status_t::ERROR;
}
if (ncclAsyncErr != ncclSuccess || !interruptible::yield_no_throw()) {
// An asynchronous error happened. Stop the operation and destroy
// the communicator
ncclErr = ncclCommAbort(comm);
if (ncclErr != ncclSuccess)
// Caller may abort with an exception or try to re-create a new communicator.
return status_t::ABORT;
// TODO: shouldn't we place status_t::ERROR above under the condition, and
// status_t::ABORT below here (i.e. after successful ncclCommAbort)?
}
// Let other threads (including NCCL threads) use the CPU.
std::this_thread::yield();
}
}
}; // namespace detail
}; // namespace comms
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/comms | rapidsai_public_repos/raft/cpp/include/raft/comms/detail/std_comms.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/comms/comms.hpp>
#include <raft/comms/detail/ucp_helper.hpp>
#include <raft/comms/detail/util.hpp>
#include <raft/core/resources.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <raft/core/error.hpp>
#include <raft/util/cudart_utils.hpp>
#include <thrust/iterator/zip_iterator.h>
#include <cuda_runtime.h>
#include <ucp/api/ucp.h>
#include <ucp/api/ucp_def.h>
#include <nccl.h>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <algorithm>
#include <chrono>
#include <cstdio>
#include <exception>
#include <memory>
#include <stdlib.h>
#include <thread>
#include <time.h>
namespace raft {
namespace comms {
namespace detail {
class std_comms : public comms_iface {
public:
std_comms() = delete;
/**
* @brief Constructor for collective + point-to-point operation.
* @param nccl_comm initialized nccl comm
* @param ucp_worker initialized ucp_worker instance
* @param eps shared pointer to array of ucp endpoints
* @param num_ranks number of ranks in the cluster
* @param rank rank of the current worker
* @param stream cuda stream for synchronizing and ordering collective operations
* @param subcomms_ucp use ucp for subcommunicators
*/
std_comms(ncclComm_t nccl_comm,
ucp_worker_h ucp_worker,
std::shared_ptr<ucp_ep_h*> eps,
int num_ranks,
int rank,
rmm::cuda_stream_view stream,
bool subcomms_ucp = true)
: nccl_comm_(nccl_comm),
stream_(stream),
status_(stream),
num_ranks_(num_ranks),
rank_(rank),
subcomms_ucp_(subcomms_ucp),
ucp_worker_(ucp_worker),
ucp_eps_(eps),
next_request_id_(0)
{
initialize();
};
/**
* @brief constructor for collective-only operation
* @param nccl_comm initialized nccl communicator
* @param num_ranks size of the cluster
* @param rank rank of the current worker
* @param stream stream for ordering collective operations
*/
std_comms(const ncclComm_t nccl_comm, int num_ranks, int rank, rmm::cuda_stream_view stream)
: nccl_comm_(nccl_comm),
stream_(stream),
status_(stream),
num_ranks_(num_ranks),
rank_(rank),
subcomms_ucp_(false)
{
initialize();
};
void initialize()
{
status_.set_value_to_zero_async(stream_);
buf_ = status_.data();
}
~std_comms()
{
requests_in_flight_.clear();
free_requests_.clear();
}
int get_size() const { return num_ranks_; }
int get_rank() const { return rank_; }
std::unique_ptr<comms_iface> comm_split(int color, int key) const
{
rmm::device_uvector<int> d_colors(get_size(), stream_);
rmm::device_uvector<int> d_keys(get_size(), stream_);
update_device(d_colors.data() + get_rank(), &color, 1, stream_);
update_device(d_keys.data() + get_rank(), &key, 1, stream_);
allgather(d_colors.data() + get_rank(), d_colors.data(), 1, datatype_t::INT32, stream_);
allgather(d_keys.data() + get_rank(), d_keys.data(), 1, datatype_t::INT32, stream_);
this->sync_stream(stream_);
std::vector<int> h_colors(get_size());
std::vector<int> h_keys(get_size());
update_host(h_colors.data(), d_colors.data(), get_size(), stream_);
update_host(h_keys.data(), d_keys.data(), get_size(), stream_);
this->sync_stream(stream_);
ncclComm_t nccl_comm;
// Create a structure to allgather...
ncclUniqueId id{};
rmm::device_uvector<ncclUniqueId> d_nccl_ids(get_size(), stream_);
if (key == 0) { RAFT_NCCL_TRY(ncclGetUniqueId(&id)); }
update_device(d_nccl_ids.data() + get_rank(), &id, 1, stream_);
allgather(d_nccl_ids.data() + get_rank(),
d_nccl_ids.data(),
sizeof(ncclUniqueId),
datatype_t::UINT8,
stream_);
auto offset =
std::distance(thrust::make_zip_iterator(h_colors.begin(), h_keys.begin()),
std::find_if(thrust::make_zip_iterator(h_colors.begin(), h_keys.begin()),
thrust::make_zip_iterator(h_colors.end(), h_keys.end()),
[color](auto tuple) { return thrust::get<0>(tuple) == color; }));
auto subcomm_size = std::count(h_colors.begin(), h_colors.end(), color);
update_host(&id, d_nccl_ids.data() + offset, 1, stream_);
this->sync_stream(stream_);
RAFT_NCCL_TRY(ncclCommInitRank(&nccl_comm, subcomm_size, id, key));
return std::unique_ptr<comms_iface>(new std_comms(nccl_comm, subcomm_size, key, stream_));
}
void barrier() const
{
allreduce(buf_, buf_, 1, datatype_t::INT32, op_t::SUM, stream_);
ASSERT(sync_stream(stream_) == status_t::SUCCESS,
"ERROR: syncStream failed. This can be caused by a failed rank_.");
}
void get_request_id(request_t* req) const
{
request_t req_id;
if (this->free_requests_.empty())
req_id = this->next_request_id_++;
else {
auto it = this->free_requests_.begin();
req_id = *it;
this->free_requests_.erase(it);
}
*req = req_id;
}
void isend(const void* buf, size_t size, int dest, int tag, request_t* request) const
{
ASSERT(ucp_worker_ != nullptr, "ERROR: UCX comms not initialized on communicator.");
get_request_id(request);
ucp_ep_h ep_ptr = (*ucp_eps_)[dest];
ucp_request* ucp_req = (ucp_request*)malloc(sizeof(ucp_request));
this->ucp_handler_.ucp_isend(ucp_req, ep_ptr, buf, size, tag, default_tag_mask, get_rank());
requests_in_flight_.insert(std::make_pair(*request, ucp_req));
}
void irecv(void* buf, size_t size, int source, int tag, request_t* request) const
{
ASSERT(ucp_worker_ != nullptr, "ERROR: UCX comms not initialized on communicator.");
get_request_id(request);
ucp_ep_h ep_ptr = (*ucp_eps_)[source];
ucp_tag_t tag_mask = default_tag_mask;
ucp_request* ucp_req = (ucp_request*)malloc(sizeof(ucp_request));
ucp_handler_.ucp_irecv(ucp_req, ucp_worker_, ep_ptr, buf, size, tag, tag_mask, source);
requests_in_flight_.insert(std::make_pair(*request, ucp_req));
}
void waitall(int count, request_t array_of_requests[]) const
{
ASSERT(ucp_worker_ != nullptr, "ERROR: UCX comms not initialized on communicator.");
std::vector<ucp_request*> requests;
requests.reserve(count);
time_t start = time(NULL);
for (int i = 0; i < count; ++i) {
auto req_it = requests_in_flight_.find(array_of_requests[i]);
ASSERT(requests_in_flight_.end() != req_it,
"ERROR: waitall on invalid request: %d",
array_of_requests[i]);
requests.push_back(req_it->second);
free_requests_.insert(req_it->first);
requests_in_flight_.erase(req_it);
}
while (requests.size() > 0) {
time_t now = time(NULL);
// Timeout if we have not gotten progress or completed any requests
// in 10 or more seconds.
ASSERT(now - start < 10, "Timed out waiting for requests.");
for (std::vector<ucp_request*>::iterator it = requests.begin(); it != requests.end();) {
bool restart = false; // resets the timeout when any progress was made
// Causes UCP to progress through the send/recv message queue
while (ucp_worker_progress(ucp_worker_) != 0) {
restart = true;
}
auto req = *it;
// If the message needs release, we know it will be sent/received
// asynchronously, so we will need to track and verify its state
if (req->needs_release) {
ASSERT(UCS_PTR_IS_PTR(req->req), "UCX Request Error. Request is not valid UCX pointer");
ASSERT(!UCS_PTR_IS_ERR(req->req), "UCX Request Error: %d\n", UCS_PTR_STATUS(req->req));
ASSERT(req->req->completed == 1 || req->req->completed == 0,
"request->completed not a valid value: %d\n",
req->req->completed);
}
// If a message was sent synchronously (eg. completed before
// `isend`/`irecv` completed) or an asynchronous message
// is complete, we can go ahead and clean it up.
if (!req->needs_release || req->req->completed == 1) {
restart = true;
// perform cleanup
ucp_handler_.free_ucp_request(req);
// remove from pending requests
it = requests.erase(it);
} else {
++it;
}
// if any progress was made, reset the timeout start time
if (restart) { start = time(NULL); }
}
}
}
void allreduce(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
op_t op,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclAllReduce(
sendbuff, recvbuff, count, get_nccl_datatype(datatype), get_nccl_op(op), nccl_comm_, stream));
}
void bcast(void* buff, size_t count, datatype_t datatype, int root, cudaStream_t stream) const
{
RAFT_NCCL_TRY(
ncclBroadcast(buff, buff, count, get_nccl_datatype(datatype), root, nccl_comm_, stream));
}
void bcast(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
int root,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclBroadcast(
sendbuff, recvbuff, count, get_nccl_datatype(datatype), root, nccl_comm_, stream));
}
void reduce(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
op_t op,
int root,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclReduce(sendbuff,
recvbuff,
count,
get_nccl_datatype(datatype),
get_nccl_op(op),
root,
nccl_comm_,
stream));
}
void allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
datatype_t datatype,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclAllGather(
sendbuff, recvbuff, sendcount, get_nccl_datatype(datatype), nccl_comm_, stream));
}
void allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
datatype_t datatype,
cudaStream_t stream) const
{
// From: "An Empirical Evaluation of Allgatherv on Multi-GPU Systems" -
// https://arxiv.org/pdf/1812.05964.pdf Listing 1 on page 4.
RAFT_EXPECTS(num_ranks_ <= 2048,
"# NCCL operations between ncclGroupStart & ncclGroupEnd shouldn't exceed 2048.");
RAFT_NCCL_TRY(ncclGroupStart());
for (int root = 0; root < num_ranks_; ++root) {
size_t dtype_size = get_datatype_size(datatype);
RAFT_NCCL_TRY(ncclBroadcast(sendbuf,
static_cast<char*>(recvbuf) + displs[root] * dtype_size,
recvcounts[root],
get_nccl_datatype(datatype),
root,
nccl_comm_,
stream));
}
RAFT_NCCL_TRY(ncclGroupEnd());
}
void gather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
datatype_t datatype,
int root,
cudaStream_t stream) const
{
size_t dtype_size = get_datatype_size(datatype);
RAFT_NCCL_TRY(ncclGroupStart());
if (get_rank() == root) {
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuff) + sendcount * r * dtype_size,
sendcount,
get_nccl_datatype(datatype),
r,
nccl_comm_,
stream));
}
}
RAFT_NCCL_TRY(
ncclSend(sendbuff, sendcount, get_nccl_datatype(datatype), root, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void gatherv(const void* sendbuff,
void* recvbuff,
size_t sendcount,
const size_t* recvcounts,
const size_t* displs,
datatype_t datatype,
int root,
cudaStream_t stream) const
{
size_t dtype_size = get_datatype_size(datatype);
RAFT_NCCL_TRY(ncclGroupStart());
if (get_rank() == root) {
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuff) + displs[r] * dtype_size,
recvcounts[r],
get_nccl_datatype(datatype),
r,
nccl_comm_,
stream));
}
}
RAFT_NCCL_TRY(
ncclSend(sendbuff, sendcount, get_nccl_datatype(datatype), root, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void reducescatter(const void* sendbuff,
void* recvbuff,
size_t recvcount,
datatype_t datatype,
op_t op,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclReduceScatter(sendbuff,
recvbuff,
recvcount,
get_nccl_datatype(datatype),
get_nccl_op(op),
nccl_comm_,
stream));
}
status_t sync_stream(cudaStream_t stream) const { return nccl_sync_stream(nccl_comm_, stream); }
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void device_send(const void* buf, size_t size, int dest, cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclSend(buf, size, ncclUint8, dest, nccl_comm_, stream));
}
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void device_recv(void* buf, size_t size, int source, cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclRecv(buf, size, ncclUint8, source, nccl_comm_, stream));
}
void device_sendrecv(const void* sendbuf,
size_t sendsize,
int dest,
void* recvbuf,
size_t recvsize,
int source,
cudaStream_t stream) const
{
// ncclSend/ncclRecv pair needs to be inside ncclGroupStart/ncclGroupEnd to avoid deadlock
RAFT_NCCL_TRY(ncclGroupStart());
RAFT_NCCL_TRY(ncclSend(sendbuf, sendsize, ncclUint8, dest, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclRecv(recvbuf, recvsize, ncclUint8, source, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void device_multicast_sendrecv(const void* sendbuf,
std::vector<size_t> const& sendsizes,
std::vector<size_t> const& sendoffsets,
std::vector<int> const& dests,
void* recvbuf,
std::vector<size_t> const& recvsizes,
std::vector<size_t> const& recvoffsets,
std::vector<int> const& sources,
cudaStream_t stream) const
{
// ncclSend/ncclRecv pair needs to be inside ncclGroupStart/ncclGroupEnd to avoid deadlock
RAFT_NCCL_TRY(ncclGroupStart());
for (size_t i = 0; i < sendsizes.size(); ++i) {
RAFT_NCCL_TRY(ncclSend(static_cast<const char*>(sendbuf) + sendoffsets[i],
sendsizes[i],
ncclUint8,
dests[i],
nccl_comm_,
stream));
}
for (size_t i = 0; i < recvsizes.size(); ++i) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuf) + recvoffsets[i],
recvsizes[i],
ncclUint8,
sources[i],
nccl_comm_,
stream));
}
RAFT_NCCL_TRY(ncclGroupEnd());
}
void group_start() const { RAFT_NCCL_TRY(ncclGroupStart()); }
void group_end() const { RAFT_NCCL_TRY(ncclGroupEnd()); }
private:
ncclComm_t nccl_comm_;
cudaStream_t stream_;
rmm::device_scalar<int32_t> status_;
int32_t* buf_;
int num_ranks_;
int rank_;
bool subcomms_ucp_;
comms_ucp_handler ucp_handler_;
ucp_worker_h ucp_worker_;
std::shared_ptr<ucp_ep_h*> ucp_eps_;
mutable request_t next_request_id_;
mutable std::unordered_map<request_t, struct ucp_request*> requests_in_flight_;
mutable std::unordered_set<request_t> free_requests_;
};
} // namespace detail
} // end namespace comms
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/comms | rapidsai_public_repos/raft/cpp/include/raft/comms/detail/mpi_comms.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdio>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <mpi.h>
#include <nccl.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/detail/util.hpp>
#include <raft/core/error.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#define RAFT_MPI_TRY(call) \
do { \
int status = call; \
if (MPI_SUCCESS != status) { \
int mpi_error_string_lenght = 0; \
char mpi_error_string[MPI_MAX_ERROR_STRING]; \
MPI_Error_string(status, mpi_error_string, &mpi_error_string_lenght); \
RAFT_EXPECTS( \
MPI_SUCCESS == status, "ERROR: MPI call='%s'. Reason:%s\n", #call, mpi_error_string); \
} \
} while (0)
// FIXME: Remove after consumer rename
#ifndef MPI_TRY
#define MPI_TRY(call) RAFT_MPI_TRY(call)
#endif
#define RAFT_MPI_TRY_NO_THROW(call) \
do { \
int status = call; \
if (MPI_SUCCESS != status) { \
int mpi_error_string_lenght = 0; \
char mpi_error_string[MPI_MAX_ERROR_STRING]; \
MPI_Error_string(status, mpi_error_string, &mpi_error_string_lenght); \
printf("MPI call='%s' at file=%s line=%d failed with %s ", \
#call, \
__FILE__, \
__LINE__, \
mpi_error_string); \
} \
} while (0)
// FIXME: Remove after consumer rename
#ifndef MPI_TRY_NO_THROW
#define MPI_TRY_NO_THROW(call) RAFT_MPI_TRY_NO_THROW(call)
#endif
namespace raft {
namespace comms {
namespace detail {
constexpr MPI_Datatype get_mpi_datatype(const datatype_t datatype)
{
switch (datatype) {
case datatype_t::CHAR: return MPI_CHAR;
case datatype_t::UINT8: return MPI_UNSIGNED_CHAR;
case datatype_t::INT32: return MPI_INT;
case datatype_t::UINT32: return MPI_UNSIGNED;
case datatype_t::INT64: return MPI_LONG_LONG;
case datatype_t::UINT64: return MPI_UNSIGNED_LONG_LONG;
case datatype_t::FLOAT32: return MPI_FLOAT;
case datatype_t::FLOAT64: return MPI_DOUBLE;
default:
// Execution should never reach here. This takes care of compiler warning.
return MPI_DOUBLE;
}
}
constexpr MPI_Op get_mpi_op(const op_t op)
{
switch (op) {
case op_t::SUM: return MPI_SUM;
case op_t::PROD: return MPI_PROD;
case op_t::MIN: return MPI_MIN;
case op_t::MAX: return MPI_MAX;
default:
// Execution should never reach here. This takes care of compiler warning.
return MPI_MAX;
}
}
class mpi_comms : public comms_iface {
public:
mpi_comms(MPI_Comm comm, const bool owns_mpi_comm, rmm::cuda_stream_view stream)
: owns_mpi_comm_(owns_mpi_comm),
mpi_comm_(comm),
size_(0),
rank_(1),
status_(stream),
next_request_id_(0),
stream_(stream)
{
int mpi_is_initialized = 0;
RAFT_MPI_TRY(MPI_Initialized(&mpi_is_initialized));
RAFT_EXPECTS(mpi_is_initialized, "ERROR: MPI is not initialized!");
RAFT_MPI_TRY(MPI_Comm_size(mpi_comm_, &size_));
RAFT_MPI_TRY(MPI_Comm_rank(mpi_comm_, &rank_));
// get NCCL unique ID at rank 0 and broadcast it to all others
ncclUniqueId id;
if (0 == rank_) RAFT_NCCL_TRY(ncclGetUniqueId(&id));
RAFT_MPI_TRY(MPI_Bcast((void*)&id, sizeof(id), MPI_BYTE, 0, mpi_comm_));
// initializing NCCL
RAFT_NCCL_TRY(ncclCommInitRank(&nccl_comm_, size_, id, rank_));
initialize();
}
void initialize()
{
status_.set_value_to_zero_async(stream_);
buf_ = status_.data();
}
virtual ~mpi_comms()
{
// finalizing NCCL
RAFT_NCCL_TRY_NO_THROW(ncclCommDestroy(nccl_comm_));
if (owns_mpi_comm_) { RAFT_MPI_TRY_NO_THROW(MPI_Comm_free(&mpi_comm_)); }
}
int get_size() const { return size_; }
int get_rank() const { return rank_; }
std::unique_ptr<comms_iface> comm_split(int color, int key) const
{
MPI_Comm new_comm;
RAFT_MPI_TRY(MPI_Comm_split(mpi_comm_, color, key, &new_comm));
return std::unique_ptr<comms_iface>(new mpi_comms(new_comm, true, stream_));
}
void barrier() const
{
allreduce(buf_, buf_, 1, datatype_t::INT32, op_t::SUM, stream_);
ASSERT(sync_stream(stream_) == status_t::SUCCESS,
"ERROR: syncStream failed. This can be caused by a failed rank_.");
}
void isend(const void* buf, size_t size, int dest, int tag, request_t* request) const
{
MPI_Request mpi_req;
request_t req_id;
if (free_requests_.empty()) {
req_id = next_request_id_++;
} else {
auto it = free_requests_.begin();
req_id = *it;
free_requests_.erase(it);
}
RAFT_MPI_TRY(MPI_Isend(buf, size, MPI_BYTE, dest, tag, mpi_comm_, &mpi_req));
requests_in_flight_.insert(std::make_pair(req_id, mpi_req));
*request = req_id;
}
void irecv(void* buf, size_t size, int source, int tag, request_t* request) const
{
MPI_Request mpi_req;
request_t req_id;
if (free_requests_.empty()) {
req_id = next_request_id_++;
} else {
auto it = free_requests_.begin();
req_id = *it;
free_requests_.erase(it);
}
RAFT_MPI_TRY(MPI_Irecv(buf, size, MPI_BYTE, source, tag, mpi_comm_, &mpi_req));
requests_in_flight_.insert(std::make_pair(req_id, mpi_req));
*request = req_id;
}
void waitall(int count, request_t array_of_requests[]) const
{
std::vector<MPI_Request> requests;
requests.reserve(count);
for (int i = 0; i < count; ++i) {
auto req_it = requests_in_flight_.find(array_of_requests[i]);
RAFT_EXPECTS(requests_in_flight_.end() != req_it,
"ERROR: waitall on invalid request: %d",
array_of_requests[i]);
requests.push_back(req_it->second);
free_requests_.insert(req_it->first);
requests_in_flight_.erase(req_it);
}
RAFT_MPI_TRY(MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE));
}
void allreduce(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
op_t op,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclAllReduce(
sendbuff, recvbuff, count, get_nccl_datatype(datatype), get_nccl_op(op), nccl_comm_, stream));
}
void bcast(void* buff, size_t count, datatype_t datatype, int root, cudaStream_t stream) const
{
RAFT_NCCL_TRY(
ncclBroadcast(buff, buff, count, get_nccl_datatype(datatype), root, nccl_comm_, stream));
}
void bcast(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
int root,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclBroadcast(
sendbuff, recvbuff, count, get_nccl_datatype(datatype), root, nccl_comm_, stream));
}
void reduce(const void* sendbuff,
void* recvbuff,
size_t count,
datatype_t datatype,
op_t op,
int root,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclReduce(sendbuff,
recvbuff,
count,
get_nccl_datatype(datatype),
get_nccl_op(op),
root,
nccl_comm_,
stream));
}
void allgather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
datatype_t datatype,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclAllGather(
sendbuff, recvbuff, sendcount, get_nccl_datatype(datatype), nccl_comm_, stream));
}
void allgatherv(const void* sendbuf,
void* recvbuf,
const size_t* recvcounts,
const size_t* displs,
datatype_t datatype,
cudaStream_t stream) const
{
RAFT_EXPECTS(size_ <= 2048,
"# NCCL operations between ncclGroupStart & ncclGroupEnd shouldn't exceed 2048.");
// From: "An Empirical Evaluation of Allgatherv on Multi-GPU Systems" -
// https://arxiv.org/pdf/1812.05964.pdf Listing 1 on page 4.
RAFT_NCCL_TRY(ncclGroupStart());
for (int root = 0; root < size_; ++root) {
RAFT_NCCL_TRY(
ncclBroadcast(sendbuf,
static_cast<char*>(recvbuf) + displs[root] * get_datatype_size(datatype),
recvcounts[root],
get_nccl_datatype(datatype),
root,
nccl_comm_,
stream));
}
RAFT_NCCL_TRY(ncclGroupEnd());
}
void gather(const void* sendbuff,
void* recvbuff,
size_t sendcount,
datatype_t datatype,
int root,
cudaStream_t stream) const
{
size_t dtype_size = get_datatype_size(datatype);
RAFT_NCCL_TRY(ncclGroupStart());
if (get_rank() == root) {
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuff) + sendcount * r * dtype_size,
sendcount,
get_nccl_datatype(datatype),
r,
nccl_comm_,
stream));
}
}
RAFT_NCCL_TRY(
ncclSend(sendbuff, sendcount, get_nccl_datatype(datatype), root, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void gatherv(const void* sendbuff,
void* recvbuff,
size_t sendcount,
const size_t* recvcounts,
const size_t* displs,
datatype_t datatype,
int root,
cudaStream_t stream) const
{
size_t dtype_size = get_datatype_size(datatype);
RAFT_NCCL_TRY(ncclGroupStart());
if (get_rank() == root) {
for (int r = 0; r < get_size(); ++r) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuff) + displs[r] * dtype_size,
recvcounts[r],
get_nccl_datatype(datatype),
r,
nccl_comm_,
stream));
}
}
RAFT_NCCL_TRY(
ncclSend(sendbuff, sendcount, get_nccl_datatype(datatype), root, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void reducescatter(const void* sendbuff,
void* recvbuff,
size_t recvcount,
datatype_t datatype,
op_t op,
cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclReduceScatter(sendbuff,
recvbuff,
recvcount,
get_nccl_datatype(datatype),
get_nccl_op(op),
nccl_comm_,
stream));
}
status_t sync_stream(cudaStream_t stream) const { return nccl_sync_stream(nccl_comm_, stream); }
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void device_send(const void* buf, size_t size, int dest, cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclSend(buf, size, ncclUint8, dest, nccl_comm_, stream));
}
// if a thread is sending & receiving at the same time, use device_sendrecv to avoid deadlock
void device_recv(void* buf, size_t size, int source, cudaStream_t stream) const
{
RAFT_NCCL_TRY(ncclRecv(buf, size, ncclUint8, source, nccl_comm_, stream));
}
void device_sendrecv(const void* sendbuf,
size_t sendsize,
int dest,
void* recvbuf,
size_t recvsize,
int source,
cudaStream_t stream) const
{
// ncclSend/ncclRecv pair needs to be inside ncclGroupStart/ncclGroupEnd to avoid deadlock
RAFT_NCCL_TRY(ncclGroupStart());
RAFT_NCCL_TRY(ncclSend(sendbuf, sendsize, ncclUint8, dest, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclRecv(recvbuf, recvsize, ncclUint8, source, nccl_comm_, stream));
RAFT_NCCL_TRY(ncclGroupEnd());
}
void device_multicast_sendrecv(const void* sendbuf,
std::vector<size_t> const& sendsizes,
std::vector<size_t> const& sendoffsets,
std::vector<int> const& dests,
void* recvbuf,
std::vector<size_t> const& recvsizes,
std::vector<size_t> const& recvoffsets,
std::vector<int> const& sources,
cudaStream_t stream) const
{
// ncclSend/ncclRecv pair needs to be inside ncclGroupStart/ncclGroupEnd to avoid deadlock
RAFT_NCCL_TRY(ncclGroupStart());
for (size_t i = 0; i < sendsizes.size(); ++i) {
RAFT_NCCL_TRY(ncclSend(static_cast<const char*>(sendbuf) + sendoffsets[i],
sendsizes[i],
ncclUint8,
dests[i],
nccl_comm_,
stream));
}
for (size_t i = 0; i < recvsizes.size(); ++i) {
RAFT_NCCL_TRY(ncclRecv(static_cast<char*>(recvbuf) + recvoffsets[i],
recvsizes[i],
ncclUint8,
sources[i],
nccl_comm_,
stream));
}
RAFT_NCCL_TRY(ncclGroupEnd());
}
void group_start() const { RAFT_NCCL_TRY(ncclGroupStart()); }
void group_end() const { RAFT_NCCL_TRY(ncclGroupEnd()); }
private:
bool owns_mpi_comm_;
MPI_Comm mpi_comm_;
cudaStream_t stream_;
rmm::device_scalar<int32_t> status_;
int32_t* buf_;
ncclComm_t nccl_comm_;
int size_;
int rank_;
mutable request_t next_request_id_;
mutable std::unordered_map<request_t, MPI_Request> requests_in_flight_;
mutable std::unordered_set<request_t> free_requests_;
};
} // end namespace detail
}; // end namespace comms
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/comms | rapidsai_public_repos/raft/cpp/include/raft/comms/detail/test.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/comms/comms.hpp>
#include <raft/core/resource/comms.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <iostream>
#include <numeric>
#include <iostream>
#include <numeric>
namespace raft {
namespace comms {
namespace detail {
/**
* @brief A simple sanity check that NCCL is able to perform a collective operation
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_allreduce(raft::resources const& handle, int root)
{
comms_t const& communicator = resource::get_comms(handle);
int const send = 1;
cudaStream_t stream = resource::get_cuda_stream(handle);
rmm::device_scalar<int> temp_d(stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(temp_d.data(), &send, 1, cudaMemcpyHostToDevice, stream));
communicator.allreduce(temp_d.data(), temp_d.data(), 1, op_t::SUM, stream);
int temp_h = 0;
RAFT_CUDA_TRY(cudaMemcpyAsync(&temp_h, temp_d.data(), 1, cudaMemcpyDeviceToHost, stream));
resource::sync_stream(handle, stream);
communicator.barrier();
std::cout << "Clique size: " << communicator.get_size() << std::endl;
std::cout << "final_size: " << temp_h << std::endl;
return temp_h == communicator.get_size();
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective operation
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_broadcast(raft::resources const& handle, int root)
{
comms_t const& communicator = resource::get_comms(handle);
int const send = root;
cudaStream_t stream = resource::get_cuda_stream(handle);
rmm::device_scalar<int> temp_d(stream);
if (communicator.get_rank() == root)
RAFT_CUDA_TRY(
cudaMemcpyAsync(temp_d.data(), &send, sizeof(int), cudaMemcpyHostToDevice, stream));
communicator.bcast(temp_d.data(), 1, root, stream);
communicator.sync_stream(stream);
int temp_h = -1; // Verify more than one byte is being sent
RAFT_CUDA_TRY(
cudaMemcpyAsync(&temp_h, temp_d.data(), sizeof(int), cudaMemcpyDeviceToHost, stream));
resource::sync_stream(handle, stream);
communicator.barrier();
std::cout << "Clique size: " << communicator.get_size() << std::endl;
std::cout << "final_size: " << temp_h << std::endl;
return temp_h == root;
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective reduce
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_reduce(raft::resources const& handle, int root)
{
comms_t const& communicator = resource::get_comms(handle);
int const send = root;
cudaStream_t stream = resource::get_cuda_stream(handle);
rmm::device_scalar<int> temp_d(stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(temp_d.data(), &send, sizeof(int), cudaMemcpyHostToDevice, stream));
communicator.reduce(temp_d.data(), temp_d.data(), 1, op_t::SUM, root, stream);
communicator.sync_stream(stream);
int temp_h = -1; // Verify more than one byte is being sent
RAFT_CUDA_TRY(
cudaMemcpyAsync(&temp_h, temp_d.data(), sizeof(int), cudaMemcpyDeviceToHost, stream));
resource::sync_stream(handle, stream);
communicator.barrier();
std::cout << "Clique size: " << communicator.get_size() << std::endl;
std::cout << "final_size: " << temp_h << std::endl;
if (communicator.get_rank() == root)
return temp_h == root * communicator.get_size();
else
return true;
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective allgather
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_allgather(raft::resources const& handle, int root)
{
comms_t const& communicator = resource::get_comms(handle);
int const send = communicator.get_rank();
cudaStream_t stream = resource::get_cuda_stream(handle);
rmm::device_scalar<int> temp_d(stream);
rmm::device_uvector<int> recv_d(communicator.get_size(), stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(temp_d.data(), &send, sizeof(int), cudaMemcpyHostToDevice, stream));
communicator.allgather(temp_d.data(), recv_d.data(), 1, stream);
communicator.sync_stream(stream);
int temp_h[communicator.get_size()]; // Verify more than one byte is being sent
RAFT_CUDA_TRY(cudaMemcpyAsync(
&temp_h, recv_d.data(), sizeof(int) * communicator.get_size(), cudaMemcpyDeviceToHost, stream));
resource::sync_stream(handle, stream);
communicator.barrier();
std::cout << "Clique size: " << communicator.get_size() << std::endl;
std::cout << "final_size: " << temp_h << std::endl;
for (int i = 0; i < communicator.get_size(); i++) {
if (temp_h[i] != i) return false;
}
return true;
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective gather
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_gather(raft::resources const& handle, int root)
{
comms_t const& communicator = resource::get_comms(handle);
int const send = communicator.get_rank();
cudaStream_t stream = resource::get_cuda_stream(handle);
rmm::device_scalar<int> temp_d(stream);
rmm::device_uvector<int> recv_d(communicator.get_rank() == root ? communicator.get_size() : 0,
stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(temp_d.data(), &send, sizeof(int), cudaMemcpyHostToDevice, stream));
communicator.gather(temp_d.data(), recv_d.data(), 1, root, stream);
communicator.sync_stream(stream);
if (communicator.get_rank() == root) {
std::vector<int> temp_h(communicator.get_size(), 0);
RAFT_CUDA_TRY(cudaMemcpyAsync(
temp_h.data(), recv_d.data(), sizeof(int) * temp_h.size(), cudaMemcpyDeviceToHost, stream));
resource::sync_stream(handle, stream);
for (int i = 0; i < communicator.get_size(); i++) {
if (temp_h[i] != i) return false;
}
}
return true;
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective gatherv
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_gatherv(raft::resources const& handle, int root)
{
comms_t const& communicator = resource::get_comms(handle);
std::vector<size_t> sendcounts(communicator.get_size());
std::iota(sendcounts.begin(), sendcounts.end(), size_t{1});
std::vector<size_t> displacements(communicator.get_size() + 1, 0);
std::partial_sum(sendcounts.begin(), sendcounts.end(), displacements.begin() + 1);
std::vector<int> sends(
displacements[communicator.get_rank() + 1] - displacements[communicator.get_rank()],
communicator.get_rank());
cudaStream_t stream = resource::get_cuda_stream(handle);
rmm::device_uvector<int> temp_d(sends.size(), stream);
rmm::device_uvector<int> recv_d(communicator.get_rank() == root ? displacements.back() : 0,
stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(
temp_d.data(), sends.data(), sends.size() * sizeof(int), cudaMemcpyHostToDevice, stream));
communicator.gatherv(
temp_d.data(),
recv_d.data(),
temp_d.size(),
communicator.get_rank() == root ? sendcounts.data() : static_cast<size_t*>(nullptr),
communicator.get_rank() == root ? displacements.data() : static_cast<size_t*>(nullptr),
root,
stream);
communicator.sync_stream(stream);
if (communicator.get_rank() == root) {
std::vector<int> temp_h(displacements.back(), 0);
RAFT_CUDA_TRY(cudaMemcpyAsync(temp_h.data(),
recv_d.data(),
sizeof(int) * displacements.back(),
cudaMemcpyDeviceToHost,
stream));
resource::sync_stream(handle, stream);
for (int i = 0; i < communicator.get_size(); i++) {
if (std::count_if(temp_h.begin() + displacements[i],
temp_h.begin() + displacements[i + 1],
[i](auto val) { return val != i; }) != 0) {
return false;
}
}
}
return true;
}
/**
* @brief A simple sanity check that NCCL is able to perform a collective reducescatter
*
* @param[in] handle the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] root the root rank id
*/
bool test_collective_reducescatter(raft::resources const& handle, int root)
{
comms_t const& communicator = resource::get_comms(handle);
std::vector<int> sends(communicator.get_size(), 1);
cudaStream_t stream = resource::get_cuda_stream(handle);
rmm::device_uvector<int> temp_d(sends.size(), stream);
rmm::device_scalar<int> recv_d(stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(
temp_d.data(), sends.data(), sends.size() * sizeof(int), cudaMemcpyHostToDevice, stream));
communicator.reducescatter(temp_d.data(), recv_d.data(), 1, op_t::SUM, stream);
communicator.sync_stream(stream);
int temp_h = -1; // Verify more than one byte is being sent
RAFT_CUDA_TRY(
cudaMemcpyAsync(&temp_h, recv_d.data(), sizeof(int), cudaMemcpyDeviceToHost, stream));
resource::sync_stream(handle, stream);
communicator.barrier();
std::cout << "Clique size: " << communicator.get_size() << std::endl;
std::cout << "final_size: " << temp_h << std::endl;
return temp_h == communicator.get_size();
}
/**
* A simple sanity check that UCX is able to send messages between all ranks
*
* @param[in] h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param[in] numTrials number of iterations of all-to-all messaging to perform
*/
bool test_pointToPoint_simple_send_recv(raft::resources const& h, int numTrials)
{
comms_t const& communicator = resource::get_comms(h);
int const rank = communicator.get_rank();
bool ret = true;
for (int i = 0; i < numTrials; i++) {
std::vector<int> received_data((communicator.get_size() - 1), -1);
std::vector<request_t> requests;
requests.resize(2 * (communicator.get_size() - 1));
int request_idx = 0;
// post receives
for (int r = 0; r < communicator.get_size(); ++r) {
if (r != rank) {
communicator.irecv(
received_data.data() + request_idx, 1, r, 0, requests.data() + request_idx);
++request_idx;
}
}
for (int r = 0; r < communicator.get_size(); ++r) {
if (r != rank) {
communicator.isend(&rank, 1, r, 0, requests.data() + request_idx);
++request_idx;
}
}
communicator.waitall(requests.size(), requests.data());
communicator.barrier();
if (communicator.get_rank() == 0) {
std::cout << "=========================" << std::endl;
std::cout << "Trial " << i << std::endl;
}
for (int printrank = 0; printrank < communicator.get_size(); ++printrank) {
if (communicator.get_rank() == printrank) {
std::cout << "Rank " << communicator.get_rank() << " received: [";
for (size_t i = 0; i < received_data.size(); i++) {
auto rec = received_data[i];
std::cout << rec;
if (rec == -1) ret = false;
communicator.barrier();
if (i < received_data.size() - 1) std::cout << ", ";
}
std::cout << "]" << std::endl;
}
communicator.barrier();
}
if (communicator.get_rank() == 0) std::cout << "=========================" << std::endl;
}
return ret;
}
/**
* A simple sanity check that device is able to send OR receive.
*
* @param h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param numTrials number of iterations of send or receive messaging to perform
*/
bool test_pointToPoint_device_send_or_recv(raft::resources const& h, int numTrials)
{
comms_t const& communicator = resource::get_comms(h);
int const rank = communicator.get_rank();
cudaStream_t stream = resource::get_cuda_stream(h);
bool ret = true;
for (int i = 0; i < numTrials; i++) {
if (communicator.get_rank() == 0) {
std::cout << "=========================" << std::endl;
std::cout << "Trial " << i << std::endl;
}
bool sender = (rank % 2) == 0 ? true : false;
rmm::device_scalar<int> received_data(-1, stream);
rmm::device_scalar<int> sent_data(rank, stream);
if (sender) {
if (rank + 1 < communicator.get_size()) {
communicator.device_send(sent_data.data(), 1, rank + 1, stream);
}
} else {
communicator.device_recv(received_data.data(), 1, rank - 1, stream);
}
communicator.sync_stream(stream);
if (!sender && received_data.value(stream) != rank - 1) { ret = false; }
if (communicator.get_rank() == 0) { std::cout << "=========================" << std::endl; }
}
return ret;
}
/**
* A simple sanity check that device is able to send and receive at the same time.
*
* @param h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param numTrials number of iterations of send or receive messaging to perform
*/
bool test_pointToPoint_device_sendrecv(raft::resources const& h, int numTrials)
{
comms_t const& communicator = resource::get_comms(h);
int const rank = communicator.get_rank();
cudaStream_t stream = resource::get_cuda_stream(h);
bool ret = true;
for (int i = 0; i < numTrials; i++) {
if (communicator.get_rank() == 0) {
std::cout << "=========================" << std::endl;
std::cout << "Trial " << i << std::endl;
}
rmm::device_scalar<int> received_data(-1, stream);
rmm::device_scalar<int> sent_data(rank, stream);
if (rank % 2 == 0) {
if (rank + 1 < communicator.get_size()) {
communicator.device_sendrecv(
sent_data.data(), 1, rank + 1, received_data.data(), 1, rank + 1, stream);
}
} else {
communicator.device_sendrecv(
sent_data.data(), 1, rank - 1, received_data.data(), 1, rank - 1, stream);
}
communicator.sync_stream(stream);
if (((rank % 2 == 0) && (received_data.value(stream) != rank + 1)) ||
((rank % 2 == 1) && (received_data.value(stream) != rank - 1))) {
ret = false;
}
if (communicator.get_rank() == 0) { std::cout << "=========================" << std::endl; }
}
return ret;
}
/**
* A simple sanity check that device is able to perform multiple concurrent sends and receives.
*
* @param h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param numTrials number of iterations of send or receive messaging to perform
*/
bool test_pointToPoint_device_multicast_sendrecv(raft::resources const& h, int numTrials)
{
comms_t const& communicator = resource::get_comms(h);
int const rank = communicator.get_rank();
cudaStream_t stream = resource::get_cuda_stream(h);
bool ret = true;
for (int i = 0; i < numTrials; i++) {
if (communicator.get_rank() == 0) {
std::cout << "=========================" << std::endl;
std::cout << "Trial " << i << std::endl;
}
rmm::device_uvector<int> received_data(communicator.get_size(), stream);
rmm::device_scalar<int> sent_data(rank, stream);
std::vector<size_t> sendsizes(communicator.get_size(), 1);
std::vector<size_t> sendoffsets(communicator.get_size(), 0);
std::vector<int> dests(communicator.get_size());
std::iota(dests.begin(), dests.end(), int{0});
std::vector<size_t> recvsizes(communicator.get_size(), 1);
std::vector<size_t> recvoffsets(communicator.get_size());
std::iota(recvoffsets.begin(), recvoffsets.end(), size_t{0});
std::vector<int> srcs(communicator.get_size());
std::iota(srcs.begin(), srcs.end(), int{0});
communicator.device_multicast_sendrecv(sent_data.data(),
sendsizes,
sendoffsets,
dests,
received_data.data(),
recvsizes,
recvoffsets,
srcs,
stream);
communicator.sync_stream(stream);
std::vector<int> h_received_data(communicator.get_size());
raft::update_host(h_received_data.data(), received_data.data(), received_data.size(), stream);
resource::sync_stream(h, stream);
for (int i = 0; i < communicator.get_size(); ++i) {
if (h_received_data[i] != i) { ret = false; }
}
if (communicator.get_rank() == 0) { std::cout << "=========================" << std::endl; }
}
return ret;
}
/**
* A simple test that the comms can be split into 2 separate subcommunicators
*
* @param h the raft handle to use. This is expected to already have an
* initialized comms instance.
* @param n_colors number of different colors to test
*/
bool test_commsplit(raft::resources const& h, int n_colors)
{
comms_t const& communicator = resource::get_comms(h);
int const rank = communicator.get_rank();
int const size = communicator.get_size();
if (n_colors > size) n_colors = size;
// first we need to assign to a color, then assign the rank within the color
int color = rank % n_colors;
int key = rank / n_colors;
auto stream_pool = std::make_shared<rmm::cuda_stream_pool>(1);
handle_t new_handle(rmm::cuda_stream_default, stream_pool);
auto shared_comm = std::make_shared<comms_t>(communicator.comm_split(color, key));
new_handle.set_comms(shared_comm);
return test_collective_allreduce(new_handle, 0);
}
} // namespace detail
} // namespace comms
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/comms | rapidsai_public_repos/raft/cpp/include/raft/comms/detail/ucp_helper.hpp | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cudart_utils.hpp>
#include <stdio.h>
#include <ucp/api/ucp.h>
#include <ucp/api/ucp_def.h>
namespace raft {
namespace comms {
namespace detail {
/**
* Standard UCX request object that will be passed
* around asynchronously. This object is really
* opaque and the comms layer only cares that it
* has been completed. Because raft comms do not
* initialize the ucx application context, it doesn't
* own this object and thus it's important not to
* modify this struct.
*/
struct ucx_context {
int completed;
};
/**
* Wraps the `ucx_context` request and adds a few
* other fields for trace logging and cleanup.
*/
class ucp_request {
public:
struct ucx_context* req;
bool needs_release = true;
int other_rank = -1;
bool is_send_request = false;
};
// by default, match the whole tag
static const ucp_tag_t default_tag_mask = (ucp_tag_t)-1;
/**
* @brief Asynchronous send callback sets request to completed
*/
static void send_callback(void* request, ucs_status_t status)
{
struct ucx_context* context = (struct ucx_context*)request;
context->completed = 1;
}
/**
* @brief Asynchronous recv callback sets request to completed
*/
static void recv_callback(void* request, ucs_status_t status, ucp_tag_recv_info_t* info)
{
struct ucx_context* context = (struct ucx_context*)request;
context->completed = 1;
}
/**
* Helper class for interacting with ucp.
*/
class comms_ucp_handler {
private:
ucp_tag_t build_message_tag(int rank, int tag) const
{
// keeping the rank in the lower bits enables debugging.
return ((uint32_t)tag << 31) | (uint32_t)rank;
}
public:
/**
* @brief Frees any memory underlying the given ucp request object
*/
void free_ucp_request(ucp_request* request) const
{
if (request->needs_release) {
request->req->completed = 0;
ucp_request_free(request->req);
}
free(request);
}
/**
* @brief Asynchronously send data to the given endpoint using the given tag
*/
void ucp_isend(ucp_request* req,
ucp_ep_h ep_ptr,
const void* buf,
size_t size,
int tag,
ucp_tag_t tag_mask,
int rank) const
{
ucp_tag_t ucp_tag = build_message_tag(rank, tag);
ucs_status_ptr_t send_result =
ucp_tag_send_nb(ep_ptr, buf, size, ucp_dt_make_contig(1), ucp_tag, send_callback);
struct ucx_context* ucp_req = (struct ucx_context*)send_result;
if (UCS_PTR_IS_ERR(send_result)) {
ASSERT(!UCS_PTR_IS_ERR(send_result),
"unable to send UCX data message (%d)\n",
UCS_PTR_STATUS(send_result));
/**
* If the request didn't fail, but it's not OK, it is in flight.
* Expect the handler to be invoked
*/
} else if (UCS_PTR_STATUS(send_result) != UCS_OK) {
/**
* If the request is OK, it's already been completed and we don't need to wait on it.
* The request will be a nullptr, however, so we need to create a new request
* and set it to completed to make the "waitall()" function work properly.
*/
req->needs_release = true;
} else {
req->needs_release = false;
}
req->other_rank = rank;
req->is_send_request = true;
req->req = ucp_req;
}
/**
* @brief Asynchronously receive data from given endpoint with the given tag.
*/
void ucp_irecv(ucp_request* req,
ucp_worker_h worker,
ucp_ep_h ep_ptr,
void* buf,
size_t size,
int tag,
ucp_tag_t tag_mask,
int sender_rank) const
{
ucp_tag_t ucp_tag = build_message_tag(sender_rank, tag);
ucs_status_ptr_t recv_result =
ucp_tag_recv_nb(worker, buf, size, ucp_dt_make_contig(1), ucp_tag, tag_mask, recv_callback);
struct ucx_context* ucp_req = (struct ucx_context*)recv_result;
req->req = ucp_req;
req->needs_release = true;
req->is_send_request = false;
req->other_rank = sender_rank;
ASSERT(!UCS_PTR_IS_ERR(recv_result),
"unable to receive UCX data message (%d)\n",
UCS_PTR_STATUS(recv_result));
}
};
} // end namespace detail
} // end namespace comms
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ivf_pq_types.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft::neighbors version instead.")
#include <raft/neighbors/ivf_pq_types.hpp>
namespace raft::spatial::knn::ivf_pq {
using raft::neighbors::ivf_pq::codebook_gen;
using raft::neighbors::ivf_pq::index;
using raft::neighbors::ivf_pq::index_params;
using raft::neighbors::ivf_pq::search_params;
} // namespace raft::spatial::knn::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ann_common.h | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma message( \
__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the other approximate KNN implementations defined in spatial/knn/*.")
#pragma once
#include "detail/processing.hpp"
#include "ivf_flat_types.hpp"
#include <raft/neighbors/ivf_pq_types.hpp>
#include <raft/distance/distance_types.hpp>
namespace raft {
namespace spatial {
namespace knn {
struct knnIndex {
raft::distance::DistanceType metric;
float metricArg;
int nprobe;
std::unique_ptr<MetricProcessor<float>> metric_processor;
std::unique_ptr<const ivf_flat::index<float, int64_t>> ivf_flat_float_;
std::unique_ptr<const ivf_flat::index<uint8_t, int64_t>> ivf_flat_uint8_t_;
std::unique_ptr<const ivf_flat::index<int8_t, int64_t>> ivf_flat_int8_t_;
std::unique_ptr<const raft::neighbors::ivf_pq::index<int64_t>> ivf_pq;
int device;
template <typename T, typename IdxT>
auto ivf_flat() -> std::unique_ptr<const ivf_flat::index<T, IdxT>>&;
};
template <>
inline auto knnIndex::ivf_flat<float, int64_t>()
-> std::unique_ptr<const ivf_flat::index<float, int64_t>>&
{
return ivf_flat_float_;
}
template <>
inline auto knnIndex::ivf_flat<uint8_t, int64_t>()
-> std::unique_ptr<const ivf_flat::index<uint8_t, int64_t>>&
{
return ivf_flat_uint8_t_;
}
template <>
inline auto knnIndex::ivf_flat<int8_t, int64_t>()
-> std::unique_ptr<const ivf_flat::index<int8_t, int64_t>>&
{
return ivf_flat_int8_t_;
}
struct knnIndexParam {
virtual ~knnIndexParam() {}
};
struct IVFParam : knnIndexParam {
int nlist;
int nprobe;
};
struct IVFFlatParam : IVFParam {};
struct IVFPQParam : IVFParam {
int M;
int n_bits;
bool usePrecomputedTables;
};
inline auto from_legacy_index_params(const IVFFlatParam& legacy,
raft::distance::DistanceType metric,
float metric_arg)
{
ivf_flat::index_params params;
params.metric = metric;
params.metric_arg = metric_arg;
params.n_lists = legacy.nlist;
return params;
}
}; // namespace knn
}; // namespace spatial
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ivf_flat.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft::neighbors version instead.")
#include <raft/neighbors/ivf_flat.cuh>
namespace raft::spatial::knn::ivf_flat {
using raft::neighbors::ivf_flat::build;
using raft::neighbors::ivf_flat::extend;
using raft::neighbors::ivf_flat::search;
}; // namespace raft::spatial::knn::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/epsilon_neighborhood.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft::neighbors version instead.")
#include <raft/neighbors/epsilon_neighborhood.cuh>
namespace raft::spatial::knn {
using raft::neighbors::epsilon_neighborhood::eps_neighbors_l2sq;
using raft::neighbors::epsilon_neighborhood::epsUnexpL2SqNeighborhood;
} // namespace raft::spatial::knn
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/knn.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/matrix/detail/select_radix.cuh>
#include <raft/matrix/detail/select_warpsort.cuh>
#include <raft/neighbors/detail/knn_brute_force.cuh>
#include <raft/neighbors/detail/selection_faiss.cuh>
namespace raft::spatial::knn {
/**
* Performs a k-select across row partitioned index/distance
* matrices formatted like the following:
* row1: k0, k1, k2
* row2: k0, k1, k2
* row3: k0, k1, k2
* row1: k0, k1, k2
* row2: k0, k1, k2
* row3: k0, k1, k2
*
* etc...
*
* @tparam idx_t
* @tparam value_t
* @param in_keys
* @param in_values
* @param out_keys
* @param out_values
* @param n_samples
* @param n_parts
* @param k
* @param stream
* @param translations
*/
template <typename idx_t = int64_t, typename value_t = float>
inline void knn_merge_parts(const value_t* in_keys,
const idx_t* in_values,
value_t* out_keys,
idx_t* out_values,
size_t n_samples,
int n_parts,
int k,
cudaStream_t stream,
idx_t* translations)
{
raft::neighbors::detail::knn_merge_parts(
in_keys, in_values, out_keys, out_values, n_samples, n_parts, k, stream, translations);
}
/** Choose an implementation for the select-top-k, */
enum class SelectKAlgo {
/** Adapted from the faiss project. Result: sorted (not stable). */
FAISS,
/** Incomplete series of radix sort passes, comparing 8 bits per pass. Result: unsorted. */
RADIX_8_BITS,
/** Incomplete series of radix sort passes, comparing 11 bits per pass. Result: unsorted. */
RADIX_11_BITS,
/** Filtering with a bitonic-sort-based priority queue. Result: sorted (not stable). */
WARP_SORT
};
/**
* Select k smallest or largest key/values from each row in the input data.
*
* If you think of the input data `in_keys` as a row-major matrix with input_len columns and
* n_inputs rows, then this function selects k smallest/largest values in each row and fills
* in the row-major matrix `out_keys` of size (n_inputs, k).
*
* Note, depending on the selected algorithm, the values within rows of `out_keys` are not
* necessarily sorted. See the `SelectKAlgo` enumeration for more details.
*
* Note: This call is deprecated, please use `raft/matrix/select_k.cuh`
*
* @tparam idx_t
* the payload type (what is being selected together with the keys).
* @tparam value_t
* the type of the keys (what is being compared).
*
* @param[in] in_keys
* contiguous device array of inputs of size (input_len * n_inputs);
* these are compared and selected.
* @param[in] in_values
* contiguous device array of inputs of size (input_len * n_inputs);
* typically, these are indices of the corresponding in_keys.
* You can pass `NULL` as an argument here; this would imply `in_values` is a homogeneous array
* of indices from `0` to `input_len - 1` for every input and reduce the usage of memory
* bandwidth.
* @param[in] n_inputs
* number of input rows, i.e. the batch size.
* @param[in] input_len
* length of a single input array (row); also sometimes referred as n_cols.
* Invariant: input_len >= k.
* @param[out] out_keys
* contiguous device array of outputs of size (k * n_inputs);
* the k smallest/largest values from each row of the `in_keys`.
* @param[out] out_values
* contiguous device array of outputs of size (k * n_inputs);
* the payload selected together with `out_keys`.
* @param[in] select_min
* whether to select k smallest (true) or largest (false) keys.
* @param[in] k
* the number of outputs to select in each input row.
* @param[in] stream
* @param[in] algo
* the implementation of the algorithm
*/
template <typename idx_t = int, typename value_t = float>
[[deprecated("Use function `select_k` from `raft/matrix/select_k.cuh`")]] inline void select_k(
const value_t* in_keys,
const idx_t* in_values,
size_t n_inputs,
size_t input_len,
value_t* out_keys,
idx_t* out_values,
bool select_min,
int k,
cudaStream_t stream,
SelectKAlgo algo = SelectKAlgo::FAISS)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("select-%s-%d (%zu, %zu) algo-%d",
select_min ? "min" : "max",
k,
n_inputs,
input_len,
int(algo));
ASSERT(size_t(input_len) >= size_t(k),
"Size of the input (input_len = %zu) must be not smaller than the selection (k = %zu).",
size_t(input_len),
size_t(k));
switch (algo) {
case SelectKAlgo::FAISS:
neighbors::detail::select_k(
in_keys, in_values, n_inputs, input_len, out_keys, out_values, select_min, k, stream);
break;
case SelectKAlgo::RADIX_8_BITS:
matrix::detail::select::radix::select_k<value_t, idx_t, 8, 512>(
in_keys, in_values, n_inputs, input_len, k, out_keys, out_values, select_min, true, stream);
break;
case SelectKAlgo::RADIX_11_BITS:
matrix::detail::select::radix::select_k<value_t, idx_t, 11, 512>(
in_keys, in_values, n_inputs, input_len, k, out_keys, out_values, select_min, true, stream);
break;
case SelectKAlgo::WARP_SORT:
matrix::detail::select::warpsort::select_k<value_t, idx_t>(
in_keys, in_values, n_inputs, input_len, k, out_keys, out_values, select_min, stream);
break;
default: ASSERT(false, "Unknown algorithm (id = %d)", int(algo));
}
}
/**
* @brief Flat C++ API function to perform a brute force knn on
* a series of input arrays and combine the results into a single
* output array for indexes and distances.
*
* @param[in] handle the cuml handle to use
* @param[in] input vector of pointers to the input arrays
* @param[in] sizes vector of sizes of input arrays
* @param[in] D the dimensionality of the arrays
* @param[in] search_items array of items to search of dimensionality D
* @param[in] n number of rows in search_items
* @param[out] res_I the resulting index array of size n * k
* @param[out] res_D the resulting distance array of size n * k
* @param[in] k the number of nearest neighbors to return
* @param[in] rowMajorIndex are the index arrays in row-major order?
* @param[in] rowMajorQuery are the query arrays in row-major order?
* @param[in] metric distance metric to use. Euclidean (L2) is used by
* default
* @param[in] metric_arg the value of `p` for Minkowski (l-p) distances. This
* is ignored if the metric_type is not Minkowski.
* @param[in] translations starting offsets for partitions. should be the same size
* as input vector.
*/
template <typename idx_t = std::int64_t, typename value_t = float, typename value_int = int>
void brute_force_knn(raft::resources const& handle,
std::vector<value_t*>& input,
std::vector<value_int>& sizes,
value_int D,
value_t* search_items,
value_int n,
idx_t* res_I,
value_t* res_D,
value_int k,
bool rowMajorIndex = true,
bool rowMajorQuery = true,
std::vector<idx_t>* translations = nullptr,
distance::DistanceType metric = distance::DistanceType::L2Unexpanded,
float metric_arg = 2.0f)
{
ASSERT(input.size() == sizes.size(), "input and sizes vectors must be the same size");
raft::neighbors::detail::brute_force_knn_impl(handle,
input,
sizes,
D,
search_items,
n,
res_I,
res_D,
k,
rowMajorIndex,
rowMajorQuery,
translations,
metric,
metric_arg);
}
} // namespace raft::spatial::knn
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ann_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/distance/distance_types.hpp>
namespace raft::spatial::knn {
/** The base for approximate KNN index structures. */
struct index {};
/** The base for KNN index parameters. */
struct index_params {
/** Distance type. */
raft::distance::DistanceType metric = distance::DistanceType::L2Expanded;
/** The argument used by some distance metrics. */
float metric_arg = 2.0f;
/**
* Whether to add the dataset content to the index, i.e.:
*
* - `true` means the index is filled with the dataset vectors and ready to search after calling
* `build`.
* - `false` means `build` only trains the underlying model (e.g. quantizer or clustering), but
* the index is left empty; you'd need to call `extend` on the index afterwards to populate it.
*/
bool add_data_on_build = true;
};
struct search_params {};
}; // namespace raft::spatial::knn
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ivf_pq.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft::neighbors version instead.")
#include <raft/neighbors/ivf_pq.cuh>
namespace raft::spatial::knn::ivf_pq {
using raft::neighbors::ivf_pq::build;
using raft::neighbors::ivf_pq::extend;
using raft::neighbors::ivf_pq::search;
} // namespace raft::spatial::knn::ivf_pq
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ball_cover.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft::neighbors version instead.")
#include <raft/neighbors/ball_cover.cuh>
#include <raft/spatial/knn/ball_cover_types.hpp>
namespace raft::spatial::knn {
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void rbc_build_index(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index)
{
raft::neighbors::ball_cover::build_index(handle, index);
}
template <typename idx_t, typename value_t, typename int_t, typename matrix_idx_t>
void rbc_all_knn_query(raft::resources const& handle,
BallCoverIndex<idx_t, value_t, int_t, matrix_idx_t>& index,
int_t k,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0)
{
raft::neighbors::ball_cover::all_knn_query(
handle, index, k, inds, dists, perform_post_filtering, weight);
}
template <typename idx_t, typename value_t, typename int_t>
void rbc_knn_query(raft::resources const& handle,
const BallCoverIndex<idx_t, value_t, int_t>& index,
int_t k,
const value_t* query,
int_t n_query_pts,
idx_t* inds,
value_t* dists,
bool perform_post_filtering = true,
float weight = 1.0)
{
raft::neighbors::ball_cover::knn_query(
handle, index, k, query, n_query_pts, inds, dists, perform_post_filtering, weight);
}
} // namespace raft::spatial::knn
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/specializations.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ann.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_common.h"
#include "detail/ann_quantized.cuh"
#include <raft/core/nvtx.hpp>
namespace raft::spatial::knn {
/**
* @brief Flat C++ API function to build an approximate nearest neighbors index
* from an index array and a set of parameters.
*
* @param[in] handle RAFT handle
* @param[out] index index to be built
* @param[in] params parametrization of the index to be built
* @param[in] metric distance metric to use. Euclidean (L2) is used by default
* @param[in] metricArg metric argument
* @param[in] index_array the index array to build the index with
* @param[in] n number of rows in the index array
* @param[in] D the dimensionality of the index array
*/
template <typename T = float, typename value_idx = int>
[[deprecated("Consider using new-style raft::spatial::knn::*::build functions")]] inline void
approx_knn_build_index(raft::resources& handle,
raft::spatial::knn::knnIndex* index,
knnIndexParam* params,
raft::distance::DistanceType metric,
float metricArg,
T* index_array,
value_idx n,
value_idx D)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"legacy approx_knn_build_index(n_rows = %u, dim = %u)", n, D);
detail::approx_knn_build_index(handle, index, params, metric, metricArg, index_array, n, D);
}
/**
* @brief Flat C++ API function to perform an approximate nearest neighbors
* search from previously built index and a query array
*
* @param[in] handle RAFT handle
* @param[out] distances distances of the nearest neighbors toward
* their query point
* @param[out] indices indices of the nearest neighbors
* @param[in] index index to perform a search with
* @param[in] k the number of nearest neighbors to search for
* @param[in] query_array the query to perform a search with
* @param[in] n number of rows in the query array
*/
template <typename T = float, typename value_idx = int>
[[deprecated("Consider using new-style raft::spatial::knn::*::search functions")]] inline void
approx_knn_search(raft::resources& handle,
float* distances,
int64_t* indices,
raft::spatial::knn::knnIndex* index,
value_idx k,
T* query_array,
value_idx n)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"legacy approx_knn_search(k = %u, n_queries = %u)", k, n);
detail::approx_knn_search(handle, distances, indices, index, k, query_array, n);
}
} // namespace raft::spatial::knn
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ball_cover_types.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft::neighbors version instead.")
#include <raft/neighbors/ball_cover_types.hpp>
namespace raft::spatial::knn {
using raft::neighbors::ball_cover::BallCoverIndex;
} // namespace raft::spatial::knn
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/ivf_flat_types.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use epsilon_neighborhood.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft::neighbors version instead.")
#include <raft/neighbors/ivf_flat_types.hpp>
namespace raft::spatial::knn::ivf_flat {
using raft::neighbors::ivf_flat::index;
using raft::neighbors::ivf_flat::index_params;
using raft::neighbors::ivf_flat::kIndexGroupSize;
using raft::neighbors::ivf_flat::search_params;
}; // namespace raft::spatial::knn::ivf_flat
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/common.hpp | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in a future release.
* Please use the ann_types.hpp version instead.
*/
#pragma once
#include <raft/spatial/knn/ann_types.hpp>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/fused_l2_knn-ext.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef> // size_t
#include <cstdint> // uint32_t
#include <raft/distance/distance_types.hpp> // DistanceType
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#if defined(RAFT_EXPLICIT_INSTANTIATE_ONLY)
namespace raft::spatial::knn::detail {
template <typename value_idx, typename value_t, bool usePrevTopKs = false>
void fusedL2Knn(size_t D,
value_idx* out_inds,
value_t* out_dists,
const value_t* index,
const value_t* query,
size_t n_index_rows,
size_t n_query_rows,
int k,
bool rowMajorIndex,
bool rowMajorQuery,
cudaStream_t stream,
raft::distance::DistanceType metric,
const value_t* index_norms = NULL,
const value_t* query_norms = NULL) RAFT_EXPLICIT;
} // namespace raft::spatial::knn::detail
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_spatial_knn_detail_fusedL2Knn(Mvalue_idx, Mvalue_t, MusePrevTopKs) \
extern template void \
raft::spatial::knn::detail::fusedL2Knn<Mvalue_idx, Mvalue_t, MusePrevTopKs>( \
size_t D, \
Mvalue_idx * out_inds, \
Mvalue_t * out_dists, \
const Mvalue_t* index, \
const Mvalue_t* query, \
size_t n_index_rows, \
size_t n_query_rows, \
int k, \
bool rowMajorIndex, \
bool rowMajorQuery, \
cudaStream_t stream, \
raft::distance::DistanceType metric, \
const Mvalue_t* index_norms, \
const Mvalue_t* query_norms);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int32_t, float, false);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int64_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(int64_t, float, false);
// These are used by brute_force_knn:
instantiate_raft_spatial_knn_detail_fusedL2Knn(uint32_t, float, true);
instantiate_raft_spatial_knn_detail_fusedL2Knn(uint32_t, float, false);
#undef instantiate_raft_spatial_knn_detail_fusedL2Knn
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/haversine_distance.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/pow2_utils.cuh>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/detail/faiss_select/Select.cuh>
namespace raft {
namespace spatial {
namespace knn {
namespace detail {
template <typename value_t>
DI value_t compute_haversine(value_t x1, value_t y1, value_t x2, value_t y2)
{
value_t sin_0 = raft::sin(0.5 * (x1 - y1));
value_t sin_1 = raft::sin(0.5 * (x2 - y2));
value_t rdist = sin_0 * sin_0 + raft::cos(x1) * raft::cos(y1) * sin_1 * sin_1;
return 2 * raft::asin(raft::sqrt(rdist));
}
/**
* @tparam value_idx data type of indices
* @tparam value_t data type of values and distances
* @tparam warp_q
* @tparam thread_q
* @tparam tpb
* @param[out] out_inds output indices
* @param[out] out_dists output distances
* @param[in] index index array
* @param[in] query query array
* @param[in] n_index_rows number of rows in index array
* @param[in] k number of closest neighbors to return
*/
template <typename value_idx, typename value_t, int warp_q = 1024, int thread_q = 8, int tpb = 128>
RAFT_KERNEL haversine_knn_kernel(value_idx* out_inds,
value_t* out_dists,
const value_t* index,
const value_t* query,
size_t n_index_rows,
int k)
{
constexpr int kNumWarps = tpb / WarpSize;
__shared__ value_t smemK[kNumWarps * warp_q];
__shared__ value_idx smemV[kNumWarps * warp_q];
using namespace raft::neighbors::detail::faiss_select;
BlockSelect<value_t, value_idx, false, Comparator<value_t>, warp_q, thread_q, tpb> heap(
std::numeric_limits<value_t>::max(), std::numeric_limits<value_idx>::max(), smemK, smemV, k);
// Grid is exactly sized to rows available
int limit = Pow2<WarpSize>::roundDown(n_index_rows);
const value_t* query_ptr = query + (blockIdx.x * 2);
value_t x1 = query_ptr[0];
value_t x2 = query_ptr[1];
int i = threadIdx.x;
for (; i < limit; i += tpb) {
const value_t* idx_ptr = index + (i * 2);
value_t y1 = idx_ptr[0];
value_t y2 = idx_ptr[1];
value_t dist = compute_haversine(x1, y1, x2, y2);
heap.add(dist, i);
}
// Handle last remainder fraction of a warp of elements
if (i < n_index_rows) {
const value_t* idx_ptr = index + (i * 2);
value_t y1 = idx_ptr[0];
value_t y2 = idx_ptr[1];
value_t dist = compute_haversine(x1, y1, x2, y2);
heap.addThreadQ(dist, i);
}
heap.reduce();
for (int i = threadIdx.x; i < k; i += tpb) {
out_dists[blockIdx.x * k + i] = smemK[i];
out_inds[blockIdx.x * k + i] = smemV[i];
}
}
/**
* Conmpute the k-nearest neighbors using the Haversine
* (great circle arc) distance. Input is assumed to have
* 2 dimensions (latitude, longitude) in radians.
* @tparam value_idx
* @tparam value_t
* @param[out] out_inds output indices array on device (size n_query_rows * k)
* @param[out] out_dists output dists array on device (size n_query_rows * k)
* @param[in] index input index array on device (size n_index_rows * 2)
* @param[in] query input query array on device (size n_query_rows * 2)
* @param[in] n_index_rows number of rows in index array
* @param[in] n_query_rows number of rows in query array
* @param[in] k number of closest neighbors to return
* @param[in] stream stream to order kernel launch
*/
template <typename value_idx, typename value_t>
void haversine_knn(value_idx* out_inds,
value_t* out_dists,
const value_t* index,
const value_t* query,
size_t n_index_rows,
size_t n_query_rows,
int k,
cudaStream_t stream)
{
haversine_knn_kernel<<<n_query_rows, 128, 0, stream>>>(
out_inds, out_dists, index, query, n_index_rows, k);
}
} // namespace detail
} // namespace knn
} // namespace spatial
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/processing.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft {
namespace spatial {
namespace knn {
/**
* @brief A virtual class defining pre- and post-processing
* for metrics. This class will temporarily modify its given
* state in `preprocess()` and undo those modifications in
* `postprocess()`
*/
template <typename math_t>
class MetricProcessor {
public:
virtual void preprocess(math_t* data) {}
virtual void revert(math_t* data) {}
virtual void postprocess(math_t* data) {}
virtual void set_num_queries(int k) {}
virtual ~MetricProcessor() = default;
};
} // namespace knn
} // namespace spatial
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/fused_l2_knn-inl.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <limits>
#include <raft/linalg/norm.cuh>
#include <raft/neighbors/detail/faiss_select/Select.cuh>
// TODO: Need to hide the PairwiseDistance class impl and expose to public API
#include "processing.cuh"
#include <raft/core/operators.hpp>
#include <raft/distance/detail/distance.cuh>
#include <raft/distance/detail/distance_ops/l2_exp.cuh>
#include <raft/distance/detail/distance_ops/l2_unexp.cuh>
#include <raft/distance/detail/pairwise_distance_base.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft {
namespace spatial {
namespace knn {
namespace detail {
template <typename Policy, typename Pair, typename myWarpSelect, typename IdxT>
DI void loadAllWarpQShmem(myWarpSelect** heapArr,
Pair* shDumpKV,
const IdxT m,
const unsigned int numOfNN)
{
const int lid = raft::laneId();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
if (rowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const int idx = j * warpSize + lid;
if (idx < numOfNN) {
Pair KVPair = shDumpKV[rowId * numOfNN + idx];
heapArr[i]->warpV[j] = KVPair.key;
heapArr[i]->warpK[j] = KVPair.value;
}
}
}
}
}
template <typename Policy, typename Pair, typename myWarpSelect>
DI void loadWarpQShmem(myWarpSelect* heapArr,
Pair* shDumpKV,
const int rowId,
const unsigned int numOfNN)
{
const int lid = raft::laneId();
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const int idx = j * warpSize + lid;
if (idx < numOfNN) {
Pair KVPair = shDumpKV[rowId * numOfNN + idx];
heapArr->warpV[j] = KVPair.key;
heapArr->warpK[j] = KVPair.value;
}
}
}
template <typename Policy, typename Pair, typename myWarpSelect, typename IdxT>
DI void storeWarpQShmem(myWarpSelect* heapArr,
Pair* shDumpKV,
const IdxT rowId,
const unsigned int numOfNN)
{
const int lid = raft::laneId();
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const int idx = j * warpSize + lid;
if (idx < numOfNN) {
Pair otherKV = Pair(heapArr->warpV[j], heapArr->warpK[j]);
shDumpKV[rowId * numOfNN + idx] = otherKV;
}
}
}
template <typename Policy, typename Pair, typename myWarpSelect, typename IdxT, typename OutT>
DI void storeWarpQGmem(myWarpSelect** heapArr,
volatile OutT* out_dists,
volatile IdxT* out_inds,
const IdxT m,
const unsigned int numOfNN,
const IdxT starty)
{
const int lid = raft::laneId();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto gmemRowId = starty + i * Policy::AccThRows;
if (gmemRowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const auto idx = j * warpSize + lid;
if (idx < numOfNN) {
out_dists[std::size_t(gmemRowId) * numOfNN + idx] = heapArr[i]->warpK[j];
out_inds[std::size_t(gmemRowId) * numOfNN + idx] = (IdxT)heapArr[i]->warpV[j];
}
}
}
}
}
template <typename Policy, typename Pair, typename myWarpSelect, typename IdxT, typename OutT>
DI void loadPrevTopKsGmemWarpQ(myWarpSelect** heapArr,
volatile OutT* out_dists,
volatile IdxT* out_inds,
const IdxT m,
const unsigned int numOfNN,
const IdxT starty)
{
const int lid = raft::laneId();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto gmemRowId = starty + i * Policy::AccThRows;
if (gmemRowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
const auto idx = j * warpSize + lid;
if (idx < numOfNN) {
heapArr[i]->warpK[j] = out_dists[std::size_t(gmemRowId) * numOfNN + idx];
heapArr[i]->warpV[j] = (uint32_t)out_inds[std::size_t(gmemRowId) * numOfNN + idx];
}
}
static constexpr auto kLaneWarpKTop = myWarpSelect::kNumWarpQRegisters - 1;
heapArr[i]->warpKTop = raft::shfl(heapArr[i]->warpK[kLaneWarpKTop], heapArr[i]->kLane);
}
}
}
template <typename Pair, int NumWarpQRegs, typename myWarpSelect>
DI void updateSortedWarpQ(
myWarpSelect& heapArr, Pair* allWarpTopKs, int rowId, int finalNumVals, int startId = 0)
{
constexpr uint32_t mask = 0xffffffffu;
const int lid = raft::laneId();
// calculate srcLane such that tid 0 -> 31, 1 -> 0,... 31 -> 30.
// warp around 0 to 31 required for NN > 32
const auto srcLane = (warpSize + (lid - 1)) & (warpSize - 1);
for (int k = startId; k < finalNumVals; k++) {
Pair KVPair = allWarpTopKs[rowId * (256) + k];
#pragma unroll
for (int i = 0; i < NumWarpQRegs; i++) {
unsigned activeLanes = __ballot_sync(mask, KVPair.value < heapArr->warpK[i]);
if (activeLanes) {
Pair tempKV;
tempKV.value = raft::shfl(heapArr->warpK[i], srcLane);
tempKV.key = raft::shfl(heapArr->warpV[i], srcLane);
const auto firstActiveLane = __ffs(activeLanes) - 1;
if (firstActiveLane == lid) {
heapArr->warpK[i] = KVPair.value;
heapArr->warpV[i] = KVPair.key;
} else if (lid > firstActiveLane) {
heapArr->warpK[i] = tempKV.value;
heapArr->warpV[i] = tempKV.key;
}
if (i == 0 && NumWarpQRegs > 1) {
heapArr->warpK[1] = __shfl_up_sync(mask, heapArr->warpK[1], 1);
heapArr->warpV[1] = __shfl_up_sync(mask, heapArr->warpV[1], 1);
if (lid == 0) {
heapArr->warpK[1] = tempKV.value;
heapArr->warpV[1] = tempKV.key;
}
break;
}
}
}
}
}
template <typename DataT,
typename OutT,
typename IdxT,
typename Policy,
typename OpT,
typename FinalLambda,
int NumWarpQ,
int NumThreadQ,
bool usePrevTopKs = false,
bool isRowMajor = true>
__launch_bounds__(Policy::Nthreads, 2) RAFT_KERNEL fusedL2kNN(const DataT* x,
const DataT* y,
const DataT* _xn,
const DataT* _yn,
const IdxT m,
const IdxT n,
const IdxT k,
const IdxT lda,
const IdxT ldb,
const IdxT ldd,
OpT distance_op,
FinalLambda fin_op,
unsigned int numOfNN,
volatile int* mutexes,
volatile OutT* out_dists,
volatile IdxT* out_inds)
{
using AccT = typename OpT::AccT;
extern __shared__ char smem[];
typedef cub::KeyValuePair<uint32_t, AccT> Pair;
constexpr auto identity = std::numeric_limits<AccT>::max();
constexpr auto keyMax = std::numeric_limits<uint32_t>::max();
constexpr auto Dir = false;
using namespace raft::neighbors::detail::faiss_select;
typedef WarpSelect<AccT, uint32_t, Dir, Comparator<AccT>, NumWarpQ, NumThreadQ, 32> myWarpSelect;
auto rowEpilog_lambda =
[m, n, &distance_op, numOfNN, out_dists, out_inds, mutexes] __device__(IdxT gridStrideY) {
if (gridDim.x == 1) { return; }
// Use ::template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
int smem_offset = OpT::template shared_mem_size<Policy>();
Pair* shDumpKV = (Pair*)(&smem[smem_offset]);
const int lid = threadIdx.x % warpSize;
const IdxT starty = gridStrideY + (threadIdx.x / Policy::AccThCols);
// 0 -> consumer done consuming the buffer.
// -1 -> consumer started consuming the buffer
// -2 -> producer done filling the buffer
// 1 -> prod acquired to fill the buffer
if (blockIdx.x == 0) {
auto cta_processed = 0;
myWarpSelect heapArr1(identity, keyMax, numOfNN);
myWarpSelect heapArr2(identity, keyMax, numOfNN);
myWarpSelect* heapArr[] = {&heapArr1, &heapArr2};
__syncwarp();
loadAllWarpQShmem<Policy, Pair>(heapArr, &shDumpKV[0], m, numOfNN);
while (cta_processed < gridDim.x - 1) {
if (threadIdx.x == 0) {
while (atomicCAS((int*)&mutexes[gridStrideY / Policy::Mblk], -2, -1) != -2)
;
}
__threadfence();
__syncthreads();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
if (rowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
Pair otherKV;
otherKV.value = identity;
otherKV.key = keyMax;
const auto idx = j * warpSize + lid;
if (idx < numOfNN) {
otherKV.value = out_dists[rowId * numOfNN + idx];
otherKV.key = (uint32_t)out_inds[rowId * numOfNN + idx];
const auto shMemRowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
shDumpKV[shMemRowId * numOfNN + idx] = otherKV;
}
}
}
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) { atomicExch((int*)&mutexes[gridStrideY / Policy::Mblk], 0); }
__threadfence();
// Perform merging of otherKV with topk's across warp.
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
if (rowId < m) {
#pragma unroll
for (int j = 0; j < myWarpSelect::kNumWarpQRegisters; ++j) {
Pair otherKV;
otherKV.value = identity;
otherKV.key = keyMax;
const auto idx = j * warpSize + lid;
if (idx < numOfNN) {
const auto shMemRowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
otherKV = shDumpKV[shMemRowId * numOfNN + idx];
}
heapArr[i]->add(otherKV.value, otherKV.key);
}
}
}
cta_processed++;
}
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
if (rowId < m) {
bool needSort = (heapArr[i]->numVals > 0);
needSort = __any_sync(0xffffffff, needSort);
if (needSort) { heapArr[i]->reduce(); }
}
}
storeWarpQGmem<Policy, Pair>(heapArr, out_dists, out_inds, m, numOfNN, starty);
} else {
if (threadIdx.x == 0) {
while (atomicCAS((int*)&mutexes[gridStrideY / Policy::Mblk], 0, 1) != 0)
;
}
__threadfence();
__syncthreads();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
if (rowId < m) {
for (int idx = lid; idx < numOfNN; idx += warpSize) {
const auto shMemRowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
Pair KVPair = shDumpKV[shMemRowId * numOfNN + idx];
out_dists[rowId * numOfNN + idx] = KVPair.value;
out_inds[rowId * numOfNN + idx] = (IdxT)KVPair.key;
}
}
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) { atomicExch((int*)&mutexes[gridStrideY / Policy::Mblk], -2); }
__threadfence();
}
};
// epilogue operation lambda for final value calculation
auto epilog_lambda =
[&distance_op, numOfNN, m, n, ldd, out_dists, out_inds, keyMax, identity] __device__(
AccT acc[Policy::AccRowsPerTh][Policy::AccColsPerTh],
DataT * regxn,
DataT * regyn,
IdxT gridStrideX,
IdxT gridStrideY) {
// Use ::template to disambiguate (See:
// https://en.cppreference.com/w/cpp/language/dependent_name)
int smem_offset = OpT::template shared_mem_size<Policy>();
Pair* shDumpKV = (Pair*)(&smem[smem_offset]);
constexpr uint32_t mask = 0xffffffffu;
const IdxT starty = gridStrideY + (threadIdx.x / Policy::AccThCols);
const IdxT startx = gridStrideX + (threadIdx.x % Policy::AccThCols);
const int lid = raft::laneId();
myWarpSelect heapArr1(identity, keyMax, numOfNN);
myWarpSelect heapArr2(identity, keyMax, numOfNN);
myWarpSelect* heapArr[] = {&heapArr1, &heapArr2};
if (usePrevTopKs) {
if (gridStrideX == blockIdx.x * Policy::Nblk) {
loadPrevTopKsGmemWarpQ<Policy, Pair>(heapArr, out_dists, out_inds, m, numOfNN, starty);
}
}
if (gridStrideX > blockIdx.x * Policy::Nblk) {
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
Pair tempKV = shDumpKV[(rowId * numOfNN) + numOfNN - 1];
heapArr[i]->warpKTop = tempKV.value;
}
// total vals can atmost be 256, (32*8)
int numValsWarpTopK[Policy::AccRowsPerTh];
int anyWarpTopKs = 0;
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto rowId = starty + i * Policy::AccThRows;
numValsWarpTopK[i] = 0;
if (rowId < m) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
const auto colId = startx + j * Policy::AccThCols;
if (colId < ldd) {
if (acc[i][j] < heapArr[i]->warpKTop) { numValsWarpTopK[i]++; }
}
}
anyWarpTopKs += numValsWarpTopK[i];
}
}
anyWarpTopKs = __syncthreads_or(anyWarpTopKs > 0);
if (anyWarpTopKs) {
Pair* allWarpTopKs = (Pair*)(&smem[0]);
uint32_t needScanSort[Policy::AccRowsPerTh];
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto gmemRowId = starty + i * Policy::AccThRows;
needScanSort[i] = 0;
if (gmemRowId < m) {
int myVals = numValsWarpTopK[i];
needScanSort[i] = __ballot_sync(mask, myVals > 0);
if (needScanSort[i]) {
#pragma unroll
for (unsigned int k = 1; k <= 16; k *= 2) {
const unsigned int n = __shfl_up_sync(mask, numValsWarpTopK[i], k);
if (lid >= k) { numValsWarpTopK[i] += n; }
}
}
// As each thread will know its total vals to write.
// we only store its starting location.
numValsWarpTopK[i] -= myVals;
}
if (needScanSort[i]) {
const auto rowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
if (gmemRowId < m) {
if (needScanSort[i] & ((uint32_t)1 << lid)) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
const auto colId = startx + j * Policy::AccThCols;
if (colId < ldd) {
if (acc[i][j] < heapArr[i]->warpKTop) {
Pair otherKV = {colId, acc[i][j]};
allWarpTopKs[rowId * (256) + numValsWarpTopK[i]] = otherKV;
numValsWarpTopK[i]++;
}
}
}
}
__syncwarp();
const int finalNumVals = raft::shfl(numValsWarpTopK[i], 31);
loadWarpQShmem<Policy, Pair>(heapArr[i], &shDumpKV[0], rowId, numOfNN);
updateSortedWarpQ<Pair, myWarpSelect::kNumWarpQRegisters>(
heapArr[i], &allWarpTopKs[0], rowId, finalNumVals);
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
if (needScanSort[i]) {
const auto rowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
const auto gmemRowId = starty + i * Policy::AccThRows;
if (gmemRowId < m) {
storeWarpQShmem<Policy, Pair>(heapArr[i], shDumpKV, rowId, numOfNN);
}
}
}
}
} else {
#pragma unroll
for (int i = 0; i < Policy::AccRowsPerTh; ++i) {
const auto gmemRowId = starty + i * Policy::AccThRows;
const auto shMemRowId = (threadIdx.x / Policy::AccThCols) + i * Policy::AccThRows;
if (gmemRowId < m) {
#pragma unroll
for (int j = 0; j < Policy::AccColsPerTh; ++j) {
const auto colId = startx + j * Policy::AccThCols;
Pair otherKV = {keyMax, identity};
if (colId < ldd) {
otherKV.value = acc[i][j];
otherKV.key = colId;
}
heapArr[i]->add(otherKV.value, otherKV.key);
}
bool needSort = (heapArr[i]->numVals > 0);
needSort = __any_sync(mask, needSort);
if (needSort) { heapArr[i]->reduce(); }
storeWarpQShmem<Policy, Pair>(heapArr[i], shDumpKV, shMemRowId, numOfNN);
}
}
}
if (((gridStrideX + Policy::Nblk * gridDim.x) >= n) && gridDim.x == 1) {
// This is last iteration of grid stride X
loadAllWarpQShmem<Policy, Pair>(heapArr, &shDumpKV[0], m, numOfNN);
storeWarpQGmem<Policy, Pair>(heapArr, out_dists, out_inds, m, numOfNN, starty);
}
};
constexpr bool write_out = false;
raft::distance::detail::PairwiseDistances<DataT,
OutT,
IdxT,
Policy,
OpT,
decltype(epilog_lambda),
FinalLambda,
decltype(rowEpilog_lambda),
isRowMajor,
write_out>
obj(x,
y,
m,
n,
k,
lda,
ldb,
ldd,
_xn,
_yn,
nullptr, // output ptr, can be null as write_out == false.
smem,
distance_op,
epilog_lambda,
fin_op,
rowEpilog_lambda);
obj.run();
}
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
int VecLen,
bool usePrevTopKs,
bool isRowMajor>
void fusedL2UnexpKnnImpl(const DataT* x,
const DataT* y,
IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
bool sqrt,
OutT* out_dists,
IdxT* out_inds,
IdxT numOfNN,
cudaStream_t stream,
void* workspace,
size_t& worksize)
{
typedef typename raft::linalg::Policy2x8<DataT, 1>::Policy RowPolicy;
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::ColPolicy ColPolicy;
typedef typename std::conditional<true, RowPolicy, ColPolicy>::type KPolicy;
ASSERT(isRowMajor, "Only Row major inputs are allowed");
dim3 blk(KPolicy::Nthreads);
// Accumulation operation lambda
typedef cub::KeyValuePair<uint32_t, AccT> Pair;
raft::distance::detail::ops::l2_unexp_distance_op<DataT, AccT, IdxT> distance_op{sqrt};
raft::identity_op fin_op{};
if constexpr (isRowMajor) {
constexpr auto fusedL2UnexpKnn32RowMajor = fusedL2kNN<DataT,
OutT,
IdxT,
KPolicy,
decltype(distance_op),
decltype(fin_op),
32,
2,
usePrevTopKs,
isRowMajor>;
constexpr auto fusedL2UnexpKnn64RowMajor = fusedL2kNN<DataT,
OutT,
IdxT,
KPolicy,
decltype(distance_op),
decltype(fin_op),
64,
3,
usePrevTopKs,
isRowMajor>;
auto fusedL2UnexpKnnRowMajor = fusedL2UnexpKnn32RowMajor;
if (numOfNN <= 32) {
fusedL2UnexpKnnRowMajor = fusedL2UnexpKnn32RowMajor;
} else if (numOfNN <= 64) {
fusedL2UnexpKnnRowMajor = fusedL2UnexpKnn64RowMajor;
} else {
ASSERT(numOfNN <= 64, "fusedL2kNN: num of nearest neighbors must be <= 64");
}
const auto sharedMemSize =
distance_op.template shared_mem_size<KPolicy>() + KPolicy::Mblk * numOfNN * sizeof(Pair);
dim3 grid = raft::distance::detail::launchConfigGenerator<KPolicy>(
m, n, sharedMemSize, fusedL2UnexpKnnRowMajor);
if (grid.x > 1) {
const auto numMutexes = raft::ceildiv<int>(m, KPolicy::Mblk);
if (workspace == nullptr || worksize < (sizeof(int32_t) * numMutexes)) {
worksize = sizeof(int32_t) * numMutexes;
return;
} else {
RAFT_CUDA_TRY(cudaMemsetAsync(workspace, 0, sizeof(int32_t) * numMutexes, stream));
}
}
fusedL2UnexpKnnRowMajor<<<grid, blk, sharedMemSize, stream>>>(x,
y,
nullptr,
nullptr,
m,
n,
k,
lda,
ldb,
ldd,
distance_op,
fin_op,
(uint32_t)numOfNN,
(int*)workspace,
out_dists,
out_inds);
} else {
}
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
bool usePrevTopKs,
bool isRowMajor>
void fusedL2UnexpKnn(IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
const DataT* x,
const DataT* y,
bool sqrt,
OutT* out_dists,
IdxT* out_inds,
IdxT numOfNN,
cudaStream_t stream,
void* workspace,
size_t& worksize)
{
size_t bytesA = sizeof(DataT) * lda;
size_t bytesB = sizeof(DataT) * ldb;
if (16 % sizeof(DataT) == 0 && bytesA % 16 == 0 && bytesB % 16 == 0) {
fusedL2UnexpKnnImpl<DataT, AccT, OutT, IdxT, 16 / sizeof(DataT), usePrevTopKs, isRowMajor>(
x,
y,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
} else if (8 % sizeof(DataT) == 0 && bytesA % 8 == 0 && bytesB % 8 == 0) {
fusedL2UnexpKnnImpl<DataT, AccT, OutT, IdxT, 8 / sizeof(DataT), usePrevTopKs, isRowMajor>(
x,
y,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
} else {
fusedL2UnexpKnnImpl<DataT, AccT, OutT, IdxT, 1, usePrevTopKs, isRowMajor>(x,
y,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
}
}
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
int VecLen,
bool usePrevTopKs,
bool isRowMajor>
void fusedL2ExpKnnImpl(const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
bool sqrt,
OutT* out_dists,
IdxT* out_inds,
IdxT numOfNN,
cudaStream_t stream,
void* workspace,
size_t& worksize)
{
typedef typename raft::linalg::Policy2x8<DataT, 1>::Policy RowPolicy;
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::ColPolicy ColPolicy;
typedef typename std::conditional<true, RowPolicy, ColPolicy>::type KPolicy;
ASSERT(isRowMajor, "Only Row major inputs are allowed");
ASSERT(!(((x != y) && (worksize < (m + n) * sizeof(AccT))) || (worksize < m * sizeof(AccT))),
"workspace size error");
ASSERT(workspace != nullptr, "workspace is null");
dim3 blk(KPolicy::Nthreads);
typedef cub::KeyValuePair<uint32_t, AccT> Pair;
raft::distance::detail::ops::l2_exp_distance_op<DataT, AccT, IdxT> distance_op{sqrt};
raft::identity_op fin_op{};
if constexpr (isRowMajor) {
constexpr auto fusedL2ExpKnn32RowMajor = fusedL2kNN<DataT,
OutT,
IdxT,
KPolicy,
decltype(distance_op),
decltype(fin_op),
32,
2,
usePrevTopKs,
isRowMajor>;
constexpr auto fusedL2ExpKnn64RowMajor = fusedL2kNN<DataT,
OutT,
IdxT,
KPolicy,
decltype(distance_op),
decltype(fin_op),
64,
3,
usePrevTopKs,
isRowMajor>;
auto fusedL2ExpKnnRowMajor = fusedL2ExpKnn32RowMajor;
if (numOfNN <= 32) {
fusedL2ExpKnnRowMajor = fusedL2ExpKnn32RowMajor;
} else if (numOfNN <= 64) {
fusedL2ExpKnnRowMajor = fusedL2ExpKnn64RowMajor;
} else {
ASSERT(numOfNN <= 64, "fusedL2kNN: num of nearest neighbors must be <= 64");
}
const auto sharedMemSize =
distance_op.template shared_mem_size<KPolicy>() + (KPolicy::Mblk * numOfNN * sizeof(Pair));
dim3 grid = raft::distance::detail::launchConfigGenerator<KPolicy>(
m, n, sharedMemSize, fusedL2ExpKnnRowMajor);
int32_t* mutexes = nullptr;
if (grid.x > 1) {
const auto numMutexes = raft::ceildiv<int>(m, KPolicy::Mblk);
const auto normsSize = (x != y) ? (m + n) * sizeof(DataT) : n * sizeof(DataT);
const auto requiredSize = sizeof(int32_t) * numMutexes + normsSize;
if (worksize < requiredSize) {
worksize = requiredSize;
return;
} else {
mutexes = (int32_t*)((char*)workspace + normsSize);
RAFT_CUDA_TRY(cudaMemsetAsync(mutexes, 0, sizeof(int32_t) * numMutexes, stream));
}
}
// calculate norms if they haven't been passed in
if (!xn) {
DataT* xn_ = (DataT*)workspace;
workspace = xn_ + m;
raft::linalg::rowNorm(
xn_, x, k, m, raft::linalg::L2Norm, isRowMajor, stream, raft::identity_op{});
xn = xn_;
}
if (!yn) {
if (x == y) {
yn = xn;
} else {
DataT* yn_ = (DataT*)(workspace);
raft::linalg::rowNorm(
yn_, y, k, n, raft::linalg::L2Norm, isRowMajor, stream, raft::identity_op{});
yn = yn_;
}
}
fusedL2ExpKnnRowMajor<<<grid, blk, sharedMemSize, stream>>>(x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
distance_op,
fin_op,
(uint32_t)numOfNN,
mutexes,
out_dists,
out_inds);
} else {
}
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename DataT,
typename AccT,
typename OutT,
typename IdxT,
bool usePrevTopKs,
bool isRowMajor>
void fusedL2ExpKnn(IdxT m,
IdxT n,
IdxT k,
IdxT lda,
IdxT ldb,
IdxT ldd,
const DataT* x,
const DataT* y,
const DataT* xn,
const DataT* yn,
bool sqrt,
OutT* out_dists,
IdxT* out_inds,
IdxT numOfNN,
cudaStream_t stream,
void* workspace,
size_t& worksize)
{
size_t bytesA = sizeof(DataT) * lda;
size_t bytesB = sizeof(DataT) * ldb;
if (16 % sizeof(DataT) == 0 && bytesA % 16 == 0 && bytesB % 16 == 0) {
fusedL2ExpKnnImpl<DataT, AccT, OutT, IdxT, 16 / sizeof(DataT), usePrevTopKs, isRowMajor>(
x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
} else if (8 % sizeof(DataT) == 0 && bytesA % 8 == 0 && bytesB % 8 == 0) {
fusedL2ExpKnnImpl<DataT, AccT, OutT, IdxT, 8 / sizeof(DataT), usePrevTopKs, isRowMajor>(
x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
} else {
fusedL2ExpKnnImpl<DataT, AccT, OutT, IdxT, 1, usePrevTopKs, isRowMajor>(x,
y,
xn,
yn,
m,
n,
k,
lda,
ldb,
ldd,
sqrt,
out_dists,
out_inds,
numOfNN,
stream,
workspace,
worksize);
}
}
/**
* Compute the k-nearest neighbors using L2 expanded/unexpanded distance.
* @tparam value_idx
* @tparam value_t
* @param[out] out_inds output indices array on device (size n_query_rows * k)
* @param[out] out_dists output dists array on device (size n_query_rows * k)
* @param[in] index input index array on device (size n_index_rows * D)
* @param[in] query input query array on device (size n_query_rows * D)
* @param[in] n_index_rows number of rows in index array
* @param[in] n_query_rows number of rows in query array
* @param[in] k number of closest neighbors to return
* @param[in] rowMajorIndex are the index arrays in row-major layout?
* @param[in] rowMajorQuery are the query array in row-major layout?
* @param[in] stream stream to order kernel launch
*/
template <typename value_idx, typename value_t, bool usePrevTopKs = false>
void fusedL2Knn(size_t D,
value_idx* out_inds,
value_t* out_dists,
const value_t* index,
const value_t* query,
size_t n_index_rows,
size_t n_query_rows,
int k,
bool rowMajorIndex,
bool rowMajorQuery,
cudaStream_t stream,
raft::distance::DistanceType metric,
const value_t* index_norms = NULL,
const value_t* query_norms = NULL)
{
// Validate the input data
ASSERT(k > 0, "l2Knn: k must be > 0");
ASSERT(D > 0, "l2Knn: D must be > 0");
ASSERT(n_index_rows > 0, "l2Knn: n_index_rows must be > 0");
ASSERT(index, "l2Knn: index must be provided (passed null)");
ASSERT(n_query_rows > 0, "l2Knn: n_query_rows must be > 0");
ASSERT(query, "l2Knn: query must be provided (passed null)");
ASSERT(out_dists, "l2Knn: out_dists must be provided (passed null)");
ASSERT(out_inds, "l2Knn: out_inds must be provided (passed null)");
// Currently we only support same layout for x & y inputs.
ASSERT(rowMajorIndex == rowMajorQuery,
"l2Knn: rowMajorIndex and rowMajorQuery should have same layout");
// TODO: Add support for column major layout
ASSERT(rowMajorIndex == true, "l2Knn: only rowMajor inputs are supported for now.");
// Even for L2 Sqrt distance case we use non-sqrt version as FAISS bfKNN only support
// non-sqrt metric & some tests in RAFT/cuML (like Linkage) fails if we use L2 sqrt.
constexpr bool sqrt = false;
size_t worksize = 0, tempWorksize = 0;
rmm::device_uvector<char> workspace(worksize, stream);
value_idx lda = D, ldb = D, ldd = n_index_rows;
switch (metric) {
case raft::distance::DistanceType::L2SqrtExpanded:
case raft::distance::DistanceType::L2Expanded:
tempWorksize = raft::distance::detail::
getWorkspaceSize<raft::distance::DistanceType::L2Expanded, float, float, float, value_idx>(
query, index, n_query_rows, n_index_rows, D);
worksize = tempWorksize;
workspace.resize(worksize, stream);
fusedL2ExpKnn<value_t, value_t, value_t, value_idx, usePrevTopKs, true>(n_query_rows,
n_index_rows,
D,
lda,
ldb,
ldd,
query,
index,
query_norms,
index_norms,
sqrt,
out_dists,
out_inds,
k,
stream,
workspace.data(),
worksize);
if (worksize > tempWorksize) {
workspace.resize(worksize, stream);
fusedL2ExpKnn<value_t, value_t, value_t, value_idx, usePrevTopKs, true>(n_query_rows,
n_index_rows,
D,
lda,
ldb,
ldd,
query,
index,
query_norms,
index_norms,
sqrt,
out_dists,
out_inds,
k,
stream,
workspace.data(),
worksize);
}
break;
case raft::distance::DistanceType::L2Unexpanded:
case raft::distance::DistanceType::L2SqrtUnexpanded:
fusedL2UnexpKnn<value_t, value_t, value_t, value_idx, usePrevTopKs, true>(n_query_rows,
n_index_rows,
D,
lda,
ldb,
ldd,
query,
index,
sqrt,
out_dists,
out_inds,
k,
stream,
workspace.data(),
worksize);
if (worksize) {
workspace.resize(worksize, stream);
fusedL2UnexpKnn<value_t, value_t, value_t, value_idx, usePrevTopKs, true>(n_query_rows,
n_index_rows,
D,
lda,
ldb,
ldd,
query,
index,
sqrt,
out_dists,
out_inds,
k,
stream,
workspace.data(),
worksize);
}
break;
default: printf("only L2 distance metric is supported\n"); break;
};
}
} // namespace detail
} // namespace knn
} // namespace spatial
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/fused_l2_knn.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "fused_l2_knn-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "fused_l2_knn-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/epsilon_neighborhood.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/contractions.cuh>
#include <raft/util/device_utils.cuh>
namespace raft {
namespace spatial {
namespace knn {
namespace detail {
template <typename DataT,
typename IdxT,
typename Policy,
typename BaseClass = raft::linalg::Contractions_NT<DataT, IdxT, Policy>>
struct EpsUnexpL2SqNeighborhood : public BaseClass {
private:
typedef Policy P;
bool* adj;
DataT eps;
IdxT* vd;
char* smem; // for final reductions
DataT acc[P::AccRowsPerTh][P::AccColsPerTh];
public:
DI EpsUnexpL2SqNeighborhood(bool* _adj,
IdxT* _vd,
const DataT* _x,
const DataT* _y,
IdxT _m,
IdxT _n,
IdxT _k,
DataT _eps,
char* _smem)
: BaseClass(_x, _y, _m, _n, _k, _smem), adj(_adj), eps(_eps), vd(_vd), smem(_smem)
{
}
DI void run()
{
prolog();
loop();
epilog();
}
private:
DI void prolog()
{
this->ldgXY(IdxT(blockIdx.x) * P::Mblk, IdxT(blockIdx.y) * P::Nblk, 0);
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
acc[i][j] = BaseClass::Zero;
}
}
this->stsXY();
__syncthreads();
this->switch_write_buffer();
}
DI void loop()
{
for (int kidx = P::Kblk; kidx < this->k; kidx += P::Kblk) {
this->ldgXY(IdxT(blockIdx.x) * P::Mblk, IdxT(blockIdx.y) * P::Nblk, kidx);
accumulate(); // on the previous k-block
this->stsXY();
__syncthreads();
this->switch_write_buffer();
this->switch_read_buffer();
}
accumulate(); // last iteration
}
DI void epilog()
{
IdxT startx = blockIdx.x * P::Mblk + this->accrowid;
IdxT starty = blockIdx.y * P::Nblk + this->acccolid;
auto lid = raft::laneId();
IdxT sums[P::AccColsPerTh];
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
sums[j] = 0;
}
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
auto xid = startx + i * P::AccThRows;
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
auto yid = starty + j * P::AccThCols;
auto is_neigh = acc[i][j] <= eps;
///@todo: fix uncoalesced writes using shared mem
if (xid < this->m && yid < this->n) {
adj[xid * this->n + yid] = is_neigh;
sums[j] += is_neigh;
}
}
}
// perform reduction of adjacency values to compute vertex degrees
if (vd != nullptr) { updateVertexDegree(sums); }
}
DI void accumulate()
{
#pragma unroll
for (int ki = 0; ki < P::Kblk; ki += P::Veclen) {
this->ldsXY(ki);
#pragma unroll
for (int i = 0; i < P::AccRowsPerTh; ++i) {
#pragma unroll
for (int j = 0; j < P::AccColsPerTh; ++j) {
#pragma unroll
for (int v = 0; v < P::Veclen; ++v) {
auto diff = this->regx[i][v] - this->regy[j][v];
acc[i][j] += diff * diff;
}
}
}
}
}
DI void updateVertexDegree(IdxT (&sums)[P::AccColsPerTh])
{
__syncthreads(); // so that we can safely reuse smem
int gid = threadIdx.x / P::AccThCols;
int lid = threadIdx.x % P::AccThCols;
auto cidx = IdxT(blockIdx.y) * P::Nblk + lid;
IdxT totalSum = 0;
// update the individual vertex degrees
#pragma unroll
for (int i = 0; i < P::AccColsPerTh; ++i) {
sums[i] = batchedBlockReduce<IdxT, P::AccThCols>(sums[i], smem);
auto cid = cidx + i * P::AccThCols;
if (gid == 0 && cid < this->n) {
atomicUpdate(cid, sums[i]);
totalSum += sums[i];
}
__syncthreads(); // for safe smem reuse
}
// update the total edge count
totalSum = raft::blockReduce<IdxT>(totalSum, smem);
if (threadIdx.x == 0) { atomicUpdate(this->n, totalSum); }
}
DI void atomicUpdate(IdxT addrId, IdxT val)
{
if (sizeof(IdxT) == 4) {
raft::myAtomicAdd<unsigned>((unsigned*)(vd + addrId), val);
} else if (sizeof(IdxT) == 8) {
raft::myAtomicAdd<unsigned long long>((unsigned long long*)(vd + addrId), val);
}
}
}; // struct EpsUnexpL2SqNeighborhood
template <typename DataT, typename IdxT, typename Policy>
__launch_bounds__(Policy::Nthreads, 2) RAFT_KERNEL epsUnexpL2SqNeighKernel(
bool* adj, IdxT* vd, const DataT* x, const DataT* y, IdxT m, IdxT n, IdxT k, DataT eps)
{
extern __shared__ char smem[];
EpsUnexpL2SqNeighborhood<DataT, IdxT, Policy> obj(adj, vd, x, y, m, n, k, eps, smem);
obj.run();
}
template <typename DataT, typename IdxT, int VecLen>
void epsUnexpL2SqNeighImpl(bool* adj,
IdxT* vd,
const DataT* x,
const DataT* y,
IdxT m,
IdxT n,
IdxT k,
DataT eps,
cudaStream_t stream)
{
typedef typename raft::linalg::Policy4x4<DataT, VecLen>::Policy Policy;
dim3 grid(raft::ceildiv<int>(m, Policy::Mblk), raft::ceildiv<int>(n, Policy::Nblk));
dim3 blk(Policy::Nthreads);
epsUnexpL2SqNeighKernel<DataT, IdxT, Policy>
<<<grid, blk, Policy::SmemSize, stream>>>(adj, vd, x, y, m, n, k, eps);
RAFT_CUDA_TRY(cudaGetLastError());
}
/**
* @brief Computes epsilon neighborhood for the L2-Squared distance metric
*
* @tparam DataT IO and math type
* @tparam IdxT Index type
*
* @param[out] adj adjacency matrix [row-major] [on device] [dim = m x n]
* @param[out] vd vertex degree array [on device] [len = m + 1]
* `vd + m` stores the total number of edges in the adjacency
* matrix. Pass a nullptr if you don't need this info.
* @param[in] x first matrix [row-major] [on device] [dim = m x k]
* @param[in] y second matrix [row-major] [on device] [dim = n x k]
* @param[in] eps defines epsilon neighborhood radius (should be passed as
* squared as we compute L2-squared distance in this method)
* @param[in] fop device lambda to do any other custom functions
* @param[in] stream cuda stream
*/
template <typename DataT, typename IdxT>
void epsUnexpL2SqNeighborhood(bool* adj,
IdxT* vd,
const DataT* x,
const DataT* y,
IdxT m,
IdxT n,
IdxT k,
DataT eps,
cudaStream_t stream)
{
size_t bytes = sizeof(DataT) * k;
if (16 % sizeof(DataT) == 0 && bytes % 16 == 0) {
epsUnexpL2SqNeighImpl<DataT, IdxT, 16 / sizeof(DataT)>(adj, vd, x, y, m, n, k, eps, stream);
} else if (8 % sizeof(DataT) == 0 && bytes % 8 == 0) {
epsUnexpL2SqNeighImpl<DataT, IdxT, 8 / sizeof(DataT)>(adj, vd, x, y, m, n, k, eps, stream);
} else {
epsUnexpL2SqNeighImpl<DataT, IdxT, 1>(adj, vd, x, y, m, n, k, eps, stream);
}
}
} // namespace detail
} // namespace knn
} // namespace spatial
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/ann_utils.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/logger.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/integer_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <memory>
#include <optional>
#include <cuda_fp16.hpp>
namespace raft::spatial::knn::detail::utils {
/** Whether pointers are accessible on the device or on the host. */
enum class pointer_residency {
/** Some of the pointers are on the device, some on the host. */
mixed,
/** All pointers accessible from both the device and the host. */
host_and_device,
/** All pointers are host accessible. */
host_only,
/** All poitners are device accessible. */
device_only
};
template <typename... Types>
struct pointer_residency_count {};
template <>
struct pointer_residency_count<> {
static inline auto run() -> std::tuple<int, int> { return std::make_tuple(0, 0); }
};
template <typename Type, typename... Types>
struct pointer_residency_count<Type, Types...> {
static inline auto run(const Type* ptr, const Types*... ptrs) -> std::tuple<int, int>
{
auto [on_device, on_host] = pointer_residency_count<Types...>::run(ptrs...);
cudaPointerAttributes attr;
RAFT_CUDA_TRY(cudaPointerGetAttributes(&attr, ptr));
switch (attr.type) {
case cudaMemoryTypeUnregistered: return std::make_tuple(on_device, on_host + 1);
case cudaMemoryTypeHost:
return std::make_tuple(on_device + int(attr.devicePointer == ptr), on_host + 1);
case cudaMemoryTypeDevice: return std::make_tuple(on_device + 1, on_host);
case cudaMemoryTypeManaged: return std::make_tuple(on_device + 1, on_host + 1);
default: return std::make_tuple(on_device, on_host);
}
}
};
/** Check if all argument pointers reside on the host or on the device. */
template <typename... Types>
auto check_pointer_residency(const Types*... ptrs) -> pointer_residency
{
auto [on_device, on_host] = pointer_residency_count<Types...>::run(ptrs...);
int n_args = sizeof...(Types);
if (on_device == n_args && on_host == n_args) { return pointer_residency::host_and_device; }
if (on_device == n_args) { return pointer_residency::device_only; }
if (on_host == n_args) { return pointer_residency::host_only; }
return pointer_residency::mixed;
}
/** RAII helper to access the host data from gpu when necessary. */
template <typename PtrT, typename Action>
struct with_mapped_memory_t {
with_mapped_memory_t(PtrT ptr, size_t size, Action action) : action_(action)
{
if (ptr == nullptr) { return; }
switch (utils::check_pointer_residency(ptr)) {
case utils::pointer_residency::device_only:
case utils::pointer_residency::host_and_device: {
dev_ptr_ = (void*)ptr; // NOLINT
} break;
default: {
host_ptr_ = (void*)ptr; // NOLINT
RAFT_CUDA_TRY(cudaHostRegister(host_ptr_, size, choose_flags(ptr)));
RAFT_CUDA_TRY(cudaHostGetDevicePointer(&dev_ptr_, host_ptr_, 0));
} break;
}
}
~with_mapped_memory_t()
{
if (host_ptr_ != nullptr) { cudaHostUnregister(host_ptr_); }
}
auto operator()() { return action_((PtrT)dev_ptr_); } // NOLINT
private:
Action action_;
void* host_ptr_ = nullptr;
void* dev_ptr_ = nullptr;
template <typename T>
static auto choose_flags(const T*) -> unsigned int
{
int dev_id, readonly_supported;
RAFT_CUDA_TRY(cudaGetDevice(&dev_id));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(
&readonly_supported, cudaDevAttrHostRegisterReadOnlySupported, dev_id));
if (readonly_supported) {
return cudaHostRegisterMapped | cudaHostRegisterReadOnly;
} else {
return cudaHostRegisterMapped;
}
}
template <typename T>
static auto choose_flags(T*) -> unsigned int
{
return cudaHostRegisterMapped;
}
};
template <typename T>
struct config {};
template <>
struct config<double> {
using value_t = double;
static constexpr double kDivisor = 1.0;
};
template <>
struct config<float> {
using value_t = float;
static constexpr double kDivisor = 1.0;
};
template <>
struct config<half> {
using value_t = half;
static constexpr double kDivisor = 1.0;
};
template <>
struct config<uint8_t> {
using value_t = uint32_t;
static constexpr double kDivisor = 256.0;
};
template <>
struct config<int8_t> {
using value_t = int32_t;
static constexpr double kDivisor = 128.0;
};
/**
* @brief Converting values between the types taking into account scaling factors
* for the integral types.
*
* @tparam T target type of the mapping.
*/
template <typename T>
struct mapping {
/**
* @defgroup
* @brief Cast and possibly scale a value of the source type `S` to the target type `T`.
*
* @tparam S source type
* @param x source value
* @{
*/
template <typename S>
HDI constexpr auto operator()(const S& x) const -> std::enable_if_t<std::is_same_v<S, T>, T>
{
return x;
};
template <typename S>
HDI constexpr auto operator()(const S& x) const -> std::enable_if_t<!std::is_same_v<S, T>, T>
{
constexpr double kMult = config<T>::kDivisor / config<S>::kDivisor;
if constexpr (std::is_floating_point_v<S>) { return static_cast<T>(x * static_cast<S>(kMult)); }
if constexpr (std::is_floating_point_v<T>) { return static_cast<T>(x) * static_cast<T>(kMult); }
return static_cast<T>(static_cast<float>(x) * static_cast<float>(kMult));
};
/** @} */
};
/**
* @brief Sets the first num bytes of the block of memory pointed by ptr to the specified value.
*
* @param[out] ptr host or device pointer
* @param[in] value
* @param[in] n_bytes
*/
template <typename T, typename IdxT>
inline void memzero(T* ptr, IdxT n_elems, rmm::cuda_stream_view stream)
{
switch (check_pointer_residency(ptr)) {
case pointer_residency::host_and_device:
case pointer_residency::device_only: {
RAFT_CUDA_TRY(cudaMemsetAsync(ptr, 0, n_elems * sizeof(T), stream));
} break;
case pointer_residency::host_only: {
stream.synchronize();
::memset(ptr, 0, n_elems * sizeof(T));
} break;
default: RAFT_FAIL("memset: unreachable code");
}
}
template <typename T, typename IdxT>
RAFT_KERNEL outer_add_kernel(const T* a, IdxT len_a, const T* b, IdxT len_b, T* c)
{
IdxT gid = threadIdx.x + blockDim.x * static_cast<IdxT>(blockIdx.x);
IdxT i = gid / len_b;
IdxT j = gid % len_b;
if (i >= len_a) return;
c[gid] = (a == nullptr ? T(0) : a[i]) + (b == nullptr ? T(0) : b[j]);
}
template <typename T, typename IdxT>
RAFT_KERNEL block_copy_kernel(const IdxT* in_offsets,
const IdxT* out_offsets,
IdxT n_blocks,
const T* in_data,
T* out_data,
IdxT n_mult)
{
IdxT i = static_cast<IdxT>(blockDim.x) * static_cast<IdxT>(blockIdx.x) + threadIdx.x;
// find the source offset using the binary search.
uint32_t l = 0;
uint32_t r = n_blocks;
IdxT in_offset = 0;
if (in_offsets[r] * n_mult <= i) return;
while (l + 1 < r) {
uint32_t c = (l + r) >> 1;
IdxT o = in_offsets[c] * n_mult;
if (o <= i) {
l = c;
in_offset = o;
} else {
r = c;
}
}
// copy the data
out_data[out_offsets[l] * n_mult - in_offset + i] = in_data[i];
}
/**
* Copy chunks of data from one array to another at given offsets.
*
* @tparam T element type
* @tparam IdxT index type
*
* @param[in] in_offsets
* @param[in] out_offsets
* @param n_blocks size of the offset arrays minus one.
* @param[in] in_data
* @param[out] out_data
* @param n_mult constant multiplier for offset values (such as e.g. `dim`)
* @param stream
*/
template <typename T, typename IdxT>
void block_copy(const IdxT* in_offsets,
const IdxT* out_offsets,
IdxT n_blocks,
const T* in_data,
T* out_data,
IdxT n_mult,
rmm::cuda_stream_view stream)
{
IdxT in_size;
update_host(&in_size, in_offsets + n_blocks, 1, stream);
stream.synchronize();
dim3 threads(128, 1, 1);
dim3 blocks(ceildiv<IdxT>(in_size * n_mult, threads.x), 1, 1);
block_copy_kernel<<<blocks, threads, 0, stream>>>(
in_offsets, out_offsets, n_blocks, in_data, out_data, n_mult);
}
/**
* @brief Fill matrix `c` with all combinations of sums of vectors `a` and `b`.
*
* NB: device-only function
*
* @tparam T element type
* @tparam IdxT index type
*
* @param[in] a device pointer to a vector [len_a]
* @param len_a number of elements in `a`
* @param[in] b device pointer to a vector [len_b]
* @param len_b number of elements in `b`
* @param[out] c row-major matrix [len_a, len_b]
* @param stream
*/
template <typename T, typename IdxT>
void outer_add(const T* a, IdxT len_a, const T* b, IdxT len_b, T* c, rmm::cuda_stream_view stream)
{
dim3 threads(128, 1, 1);
dim3 blocks(ceildiv<IdxT>(len_a * len_b, threads.x), 1, 1);
outer_add_kernel<<<blocks, threads, 0, stream>>>(a, len_a, b, len_b, c);
}
template <typename T, typename S, typename IdxT, typename LabelT>
RAFT_KERNEL copy_selected_kernel(
IdxT n_rows, IdxT n_cols, const S* src, const LabelT* row_ids, IdxT ld_src, T* dst, IdxT ld_dst)
{
IdxT gid = threadIdx.x + blockDim.x * static_cast<IdxT>(blockIdx.x);
IdxT j = gid % n_cols;
IdxT i_dst = gid / n_cols;
if (i_dst >= n_rows) return;
auto i_src = static_cast<IdxT>(row_ids[i_dst]);
dst[ld_dst * i_dst + j] = mapping<T>{}(src[ld_src * i_src + j]);
}
/**
* @brief Copy selected rows of a matrix while mapping the data from the source to the target
* type.
*
* @tparam T target type
* @tparam S source type
* @tparam IdxT index type
* @tparam LabelT label type
*
* @param n_rows
* @param n_cols
* @param[in] src input matrix [..., ld_src]
* @param[in] row_ids selection of rows to be copied [n_rows]
* @param ld_src number of cols in the input (ld_src >= n_cols)
* @param[out] dst output matrix [n_rows, ld_dst]
* @param ld_dst number of cols in the output (ld_dst >= n_cols)
* @param stream
*/
template <typename T, typename S, typename IdxT, typename LabelT>
void copy_selected(IdxT n_rows,
IdxT n_cols,
const S* src,
const LabelT* row_ids,
IdxT ld_src,
T* dst,
IdxT ld_dst,
rmm::cuda_stream_view stream)
{
switch (check_pointer_residency(src, dst, row_ids)) {
case pointer_residency::host_and_device:
case pointer_residency::device_only: {
IdxT block_dim = 128;
IdxT grid_dim = ceildiv(n_rows * n_cols, block_dim);
copy_selected_kernel<T, S>
<<<grid_dim, block_dim, 0, stream>>>(n_rows, n_cols, src, row_ids, ld_src, dst, ld_dst);
} break;
case pointer_residency::host_only: {
stream.synchronize();
for (IdxT i_dst = 0; i_dst < n_rows; i_dst++) {
auto i_src = static_cast<IdxT>(row_ids[i_dst]);
for (IdxT j = 0; j < n_cols; j++) {
dst[ld_dst * i_dst + j] = mapping<T>{}(src[ld_src * i_src + j]);
}
}
stream.synchronize();
} break;
default: RAFT_FAIL("All pointers must reside on the same side, host or device.");
}
}
/**
* A batch input iterator over the data source.
* Given an input pointer, it decides whether the current device has the access to the data and
* gives it back to the user in batches. Three scenarios are possible:
*
* 1. if `source == nullptr`: then `batch.data() == nullptr`
* 2. if `source` is accessible from the device, `batch.data()` points directly at the source at
* the proper offsets on each iteration.
* 3. if `source` is not accessible from the device, `batch.data()` points to an intermediate
* buffer; the corresponding data is copied in the given `stream` on every iterator dereference
* (i.e. batches can be skipped). Dereferencing the same batch two times in a row does not force
* the copy.
*
* In all three scenarios, the number of iterations, batch offsets and sizes are the same.
*
* The iterator can be reused. If the number of iterations is one, at most one copy will ever be
* invoked (i.e. small datasets are not reloaded multiple times).
*/
template <typename T>
struct batch_load_iterator {
using size_type = size_t;
/** A single batch of data residing in device memory. */
struct batch {
/** Logical width of a single row in a batch, in elements of type `T`. */
[[nodiscard]] auto row_width() const -> size_type { return row_width_; }
/** Logical offset of the batch, in rows (`row_width()`) */
[[nodiscard]] auto offset() const -> size_type { return pos_.value_or(0) * batch_size_; }
/** Logical size of the batch, in rows (`row_width()`) */
[[nodiscard]] auto size() const -> size_type { return batch_len_; }
/** Logical size of the batch, in rows (`row_width()`) */
[[nodiscard]] auto data() const -> const T* { return const_cast<const T*>(dev_ptr_); }
/** Whether this batch copies the data (i.e. the source is inaccessible from the device). */
[[nodiscard]] auto does_copy() const -> bool { return needs_copy_; }
private:
batch(const T* source,
size_type n_rows,
size_type row_width,
size_type batch_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
: stream_(stream),
buf_(0, stream, mr),
source_(source),
dev_ptr_(nullptr),
n_rows_(n_rows),
row_width_(row_width),
batch_size_(std::min(batch_size, n_rows)),
pos_(std::nullopt),
n_iters_(raft::div_rounding_up_safe(n_rows, batch_size)),
needs_copy_(false)
{
if (source_ == nullptr) { return; }
cudaPointerAttributes attr;
RAFT_CUDA_TRY(cudaPointerGetAttributes(&attr, source_));
dev_ptr_ = reinterpret_cast<T*>(attr.devicePointer);
if (dev_ptr_ == nullptr) {
buf_.resize(row_width_ * batch_size_, stream);
dev_ptr_ = buf_.data();
needs_copy_ = true;
}
}
rmm::cuda_stream_view stream_;
rmm::device_uvector<T> buf_;
const T* source_;
size_type n_rows_;
size_type row_width_;
size_type batch_size_;
size_type n_iters_;
bool needs_copy_;
std::optional<size_type> pos_;
size_type batch_len_;
T* dev_ptr_;
friend class batch_load_iterator<T>;
/**
* Changes the state of the batch to point at the `pos` index.
* If necessary, copies the data from the source in the registered stream.
*/
void load(const size_type& pos)
{
// No-op if the data is already loaded, or it's the end of the input.
if (pos == pos_ || pos >= n_iters_) { return; }
pos_.emplace(pos);
batch_len_ = std::min(batch_size_, n_rows_ - std::min(offset(), n_rows_));
if (source_ == nullptr) { return; }
if (needs_copy_) {
if (size() > 0) {
RAFT_LOG_TRACE("batch_load_iterator::copy(offset = %zu, size = %zu, row_width = %zu)",
size_t(offset()),
size_t(size()),
size_t(row_width()));
copy(dev_ptr_, source_ + offset() * row_width(), size() * row_width(), stream_);
}
} else {
dev_ptr_ = const_cast<T*>(source_) + offset() * row_width();
}
}
};
using value_type = batch;
using reference = const value_type&;
using pointer = const value_type*;
/**
* Create a batch iterator over the data `source`.
*
* For convenience, the data `source` is read in logical units of size `row_width`; batch sizes
* and offsets are calculated in logical rows. Hence, can interpret the data as a contiguous
* row-major matrix of size [n_rows, row_width], and the batches are the sub-matrices of size
* [x<=batch_size, n_rows].
*
* @param source the input data -- host, device, or nullptr.
* @param n_rows the size of the input in logical rows.
* @param row_width the size of the logical row in the elements of type `T`.
* @param batch_size the desired size of the batch.
* @param stream the ordering for the host->device copies, if applicable.
* @param mr a custom memory resource for the intermediate buffer, if applicable.
*/
batch_load_iterator(const T* source,
size_type n_rows,
size_type row_width,
size_type batch_size,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
: cur_batch_(new batch(source, n_rows, row_width, batch_size, stream, mr)), cur_pos_(0)
{
}
/**
* Whether this iterator copies the data on every iteration
* (i.e. the source is inaccessible from the device).
*/
[[nodiscard]] auto does_copy() const -> bool { return cur_batch_->does_copy(); }
/** Reset the iterator position to `begin()` */
void reset() { cur_pos_ = 0; }
/** Reset the iterator position to `end()` */
void reset_to_end() { cur_pos_ = cur_batch_->n_iters_; }
[[nodiscard]] auto begin() const -> const batch_load_iterator<T>
{
batch_load_iterator<T> x(*this);
x.reset();
return x;
}
[[nodiscard]] auto end() const -> const batch_load_iterator<T>
{
batch_load_iterator<T> x(*this);
x.reset_to_end();
return x;
}
[[nodiscard]] auto operator*() const -> reference
{
cur_batch_->load(cur_pos_);
return *cur_batch_;
}
[[nodiscard]] auto operator->() const -> pointer
{
cur_batch_->load(cur_pos_);
return cur_batch_.get();
}
friend auto operator==(const batch_load_iterator<T>& x, const batch_load_iterator<T>& y) -> bool
{
return x.cur_batch_ == y.cur_batch_ && x.cur_pos_ == y.cur_pos_;
};
friend auto operator!=(const batch_load_iterator<T>& x, const batch_load_iterator<T>& y) -> bool
{
return x.cur_batch_ != y.cur_batch_ || x.cur_pos_ != y.cur_pos_;
};
auto operator++() -> batch_load_iterator<T>&
{
++cur_pos_;
return *this;
}
auto operator++(int) -> batch_load_iterator<T>
{
batch_load_iterator<T> x(*this);
++cur_pos_;
return x;
}
auto operator--() -> batch_load_iterator<T>&
{
--cur_pos_;
return *this;
}
auto operator--(int) -> batch_load_iterator<T>
{
batch_load_iterator<T> x(*this);
--cur_pos_;
return x;
}
private:
std::shared_ptr<value_type> cur_batch_;
size_type cur_pos_;
};
} // namespace raft::spatial::knn::detail::utils
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/ball_cover.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include "../ball_cover_types.hpp"
#include "ball_cover/common.cuh"
#include "ball_cover/registers.cuh"
#include "haversine_distance.cuh"
#include <cstdint>
#include <limits.h>
#include <raft/util/cuda_utils.cuh>
#include <raft/neighbors/detail/faiss_select/key_value_block_select.cuh>
#include <raft/matrix/copy.cuh>
#include <raft/neighbors/brute_force.cuh>
#include <raft/random/rng.cuh>
#include <raft/sparse/convert/csr.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
namespace raft {
namespace spatial {
namespace knn {
namespace detail {
/**
* Given a set of points in row-major order which are to be
* used as a set of index points, uniformly samples a subset
* of points to be used as landmarks.
* @tparam value_idx
* @tparam value_t
* @param handle
* @param index
*/
template <typename value_idx, typename value_t, typename value_int = std::uint32_t>
void sample_landmarks(raft::resources const& handle,
BallCoverIndex<value_idx, value_t, value_int>& index)
{
rmm::device_uvector<value_idx> R_1nn_cols2(index.n_landmarks, resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> R_1nn_ones(index.m, resource::get_cuda_stream(handle));
rmm::device_uvector<value_idx> R_indices(index.n_landmarks, resource::get_cuda_stream(handle));
thrust::sequence(resource::get_thrust_policy(handle),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_cols().data_handle() + index.m,
(value_idx)0);
thrust::fill(resource::get_thrust_policy(handle),
R_1nn_ones.data(),
R_1nn_ones.data() + R_1nn_ones.size(),
1.0);
thrust::fill(resource::get_thrust_policy(handle),
R_indices.data(),
R_indices.data() + R_indices.size(),
0.0);
/**
* 1. Randomly sample sqrt(n) points from X
*/
raft::random::RngState rng_state(12345);
raft::random::sampleWithoutReplacement(handle,
rng_state,
R_indices.data(),
R_1nn_cols2.data(),
index.get_R_1nn_cols().data_handle(),
R_1nn_ones.data(),
(value_idx)index.n_landmarks,
(value_idx)index.m);
// index.get_X() returns the wrong indextype (uint32_t where we need value_idx), so need to
// create new device_matrix_view here
auto x = index.get_X();
auto r = index.get_R();
raft::matrix::copy_rows<value_t, value_idx>(
handle,
make_device_matrix_view<const value_t, value_idx>(x.data_handle(), x.extent(0), x.extent(1)),
make_device_matrix_view<value_t, value_idx>(r.data_handle(), r.extent(0), r.extent(1)),
make_device_vector_view(R_1nn_cols2.data(), index.n_landmarks));
}
/**
* Constructs a 1-nn index mapping each landmark to their closest points.
* @tparam value_idx
* @tparam value_t
* @param handle
* @param R_knn_inds_ptr
* @param R_knn_dists_ptr
* @param k
* @param index
*/
template <typename value_idx, typename value_t, typename value_int = std::uint32_t>
void construct_landmark_1nn(raft::resources const& handle,
const value_idx* R_knn_inds_ptr,
const value_t* R_knn_dists_ptr,
value_int k,
BallCoverIndex<value_idx, value_t, value_int>& index)
{
rmm::device_uvector<value_idx> R_1nn_inds(index.m, resource::get_cuda_stream(handle));
thrust::fill(resource::get_thrust_policy(handle),
R_1nn_inds.data(),
R_1nn_inds.data() + index.m,
std::numeric_limits<value_idx>::max());
value_idx* R_1nn_inds_ptr = R_1nn_inds.data();
value_t* R_1nn_dists_ptr = index.get_R_1nn_dists().data_handle();
auto idxs = thrust::make_counting_iterator<value_idx>(0);
thrust::for_each(
resource::get_thrust_policy(handle), idxs, idxs + index.m, [=] __device__(value_idx i) {
R_1nn_inds_ptr[i] = R_knn_inds_ptr[i * k];
R_1nn_dists_ptr[i] = R_knn_dists_ptr[i * k];
});
auto keys = thrust::make_zip_iterator(
thrust::make_tuple(R_1nn_inds.data(), index.get_R_1nn_dists().data_handle()));
// group neighborhoods for each reference landmark and sort each group by distance
thrust::sort_by_key(resource::get_thrust_policy(handle),
keys,
keys + index.m,
index.get_R_1nn_cols().data_handle(),
NNComp());
// convert to CSR for fast lookup
raft::sparse::convert::sorted_coo_to_csr(R_1nn_inds.data(),
index.m,
index.get_R_indptr().data_handle(),
index.n_landmarks + 1,
resource::get_cuda_stream(handle));
}
/**
* Computes the k closest landmarks to a set of query points.
* @tparam value_idx
* @tparam value_t
* @tparam value_int
* @param handle
* @param index
* @param query_pts
* @param n_query_pts
* @param k
* @param R_knn_inds
* @param R_knn_dists
*/
template <typename value_idx, typename value_t, typename value_int = std::uint32_t>
void k_closest_landmarks(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query_pts,
value_int n_query_pts,
value_int k,
value_idx* R_knn_inds,
value_t* R_knn_dists)
{
std::vector<raft::device_matrix_view<const value_t, value_int>> inputs = {index.get_R()};
raft::neighbors::brute_force::knn<value_idx, value_t, value_int>(
handle,
inputs,
make_device_matrix_view(query_pts, n_query_pts, inputs[0].extent(1)),
make_device_matrix_view(R_knn_inds, n_query_pts, k),
make_device_matrix_view(R_knn_dists, n_query_pts, k),
index.get_metric());
}
/**
* Uses the sorted data points in the 1-nn landmark index to compute
* an array of radii for each landmark.
* @tparam value_idx
* @tparam value_t
* @param handle
* @param index
*/
template <typename value_idx, typename value_t, typename value_int = std::uint32_t>
void compute_landmark_radii(raft::resources const& handle,
BallCoverIndex<value_idx, value_t, value_int>& index)
{
auto entries = thrust::make_counting_iterator<value_idx>(0);
const value_idx* R_indptr_ptr = index.get_R_indptr().data_handle();
const value_t* R_1nn_dists_ptr = index.get_R_1nn_dists().data_handle();
value_t* R_radius_ptr = index.get_R_radius().data_handle();
thrust::for_each(resource::get_thrust_policy(handle),
entries,
entries + index.n_landmarks,
[=] __device__(value_idx input) {
value_idx last_row_idx = R_indptr_ptr[input + 1] - 1;
R_radius_ptr[input] = R_1nn_dists_ptr[last_row_idx];
});
}
/**
* 4. Perform k-select over original KNN, using L_r to filter distances
*
* a. Map 1 row to each warp/block
* b. Add closest k R points to heap
* c. Iterate through batches of R, having each thread in the warp load a set
* of distances y from R (only if d(q, r) < 3 * distance to closest r) and
* marking the distance to be computed between x, y only
* if knn[k].distance >= d(x_i, R_k) + d(R_k, y)
*/
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
typename dist_func>
void perform_rbc_query(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
value_int n_query_pts,
std::uint32_t k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func dfunc,
value_idx* inds,
value_t* dists,
value_int* dists_counter,
value_int* post_dists_counter,
float weight = 1.0,
bool perform_post_filtering = true)
{
// initialize output inds and dists
thrust::fill(resource::get_thrust_policy(handle),
inds,
inds + (k * n_query_pts),
std::numeric_limits<value_idx>::max());
thrust::fill(resource::get_thrust_policy(handle),
dists,
dists + (k * n_query_pts),
std::numeric_limits<value_t>::max());
if (index.n == 2) {
// Compute nearest k for each neighborhood in each closest R
rbc_low_dim_pass_one<value_idx, value_t, value_int, 2>(handle,
index,
query,
n_query_pts,
k,
R_knn_inds,
R_knn_dists,
dfunc,
inds,
dists,
weight,
dists_counter);
if (perform_post_filtering) {
rbc_low_dim_pass_two<value_idx, value_t, value_int, 2>(handle,
index,
query,
n_query_pts,
k,
R_knn_inds,
R_knn_dists,
dfunc,
inds,
dists,
weight,
post_dists_counter);
}
} else if (index.n == 3) {
// Compute nearest k for each neighborhood in each closest R
rbc_low_dim_pass_one<value_idx, value_t, value_int, 3>(handle,
index,
query,
n_query_pts,
k,
R_knn_inds,
R_knn_dists,
dfunc,
inds,
dists,
weight,
dists_counter);
if (perform_post_filtering) {
rbc_low_dim_pass_two<value_idx, value_t, value_int, 3>(handle,
index,
query,
n_query_pts,
k,
R_knn_inds,
R_knn_dists,
dfunc,
inds,
dists,
weight,
post_dists_counter);
}
}
}
/**
* Similar to a ball tree, the random ball cover algorithm
* uses the triangle inequality to prune distance computations
* in any metric space with a guarantee of sqrt(n) * c^{3/2}
* where `c` is an expansion constant based on the distance
* metric.
*
* This function variant performs an all nearest neighbors
* query which is useful for algorithms that need to perform
* A * A.T.
*/
template <typename value_idx = std::int64_t,
typename value_t,
typename value_int = std::uint32_t,
typename distance_func>
void rbc_build_index(raft::resources const& handle,
BallCoverIndex<value_idx, value_t, value_int>& index,
distance_func dfunc)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
ASSERT(!index.is_index_trained(), "index cannot be previously trained");
rmm::device_uvector<value_idx> R_knn_inds(index.m, resource::get_cuda_stream(handle));
// Initialize the uvectors
thrust::fill(resource::get_thrust_policy(handle),
R_knn_inds.begin(),
R_knn_inds.end(),
std::numeric_limits<value_idx>::max());
thrust::fill(resource::get_thrust_policy(handle),
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_closest_landmark_dists().data_handle() + index.m,
std::numeric_limits<value_t>::max());
/**
* 1. Randomly sample sqrt(n) points from X
*/
sample_landmarks<value_idx, value_t>(handle, index);
/**
* 2. Perform knn = bfknn(X, R, k)
*/
value_int k = 1;
k_closest_landmarks(handle,
index,
index.get_X().data_handle(),
index.m,
k,
R_knn_inds.data(),
index.get_R_closest_landmark_dists().data_handle());
/**
* 3. Create L_r = knn[:,0].T (CSR)
*
* Slice closest neighboring R
* Secondary sort by (R_knn_inds, R_knn_dists)
*/
construct_landmark_1nn(
handle, R_knn_inds.data(), index.get_R_closest_landmark_dists().data_handle(), k, index);
/**
* Compute radius of each R for filtering: p(q, r) <= p(q, q_r) + radius(r)
* (need to take the
*/
compute_landmark_radii(handle, index);
}
/**
* Performs an all neighbors knn query (e.g. index == query)
*/
template <typename value_idx = std::int64_t,
typename value_t,
typename value_int = std::uint32_t,
typename distance_func>
void rbc_all_knn_query(raft::resources const& handle,
BallCoverIndex<value_idx, value_t, value_int>& index,
value_int k,
value_idx* inds,
value_t* dists,
distance_func dfunc,
// approximate nn options
bool perform_post_filtering = true,
float weight = 1.0)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
ASSERT(index.n_landmarks >= k, "number of landmark samples must be >= k");
ASSERT(!index.is_index_trained(), "index cannot be previously trained");
rmm::device_uvector<value_idx> R_knn_inds(k * index.m, resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> R_knn_dists(k * index.m, resource::get_cuda_stream(handle));
// Initialize the uvectors
thrust::fill(resource::get_thrust_policy(handle),
R_knn_inds.begin(),
R_knn_inds.end(),
std::numeric_limits<value_idx>::max());
thrust::fill(resource::get_thrust_policy(handle),
R_knn_dists.begin(),
R_knn_dists.end(),
std::numeric_limits<value_t>::max());
thrust::fill(resource::get_thrust_policy(handle),
inds,
inds + (k * index.m),
std::numeric_limits<value_idx>::max());
thrust::fill(resource::get_thrust_policy(handle),
dists,
dists + (k * index.m),
std::numeric_limits<value_t>::max());
// For debugging / verification. Remove before releasing
rmm::device_uvector<value_int> dists_counter(index.m, resource::get_cuda_stream(handle));
rmm::device_uvector<value_int> post_dists_counter(index.m, resource::get_cuda_stream(handle));
sample_landmarks<value_idx, value_t>(handle, index);
k_closest_landmarks(
handle, index, index.get_X().data_handle(), index.m, k, R_knn_inds.data(), R_knn_dists.data());
construct_landmark_1nn(handle, R_knn_inds.data(), R_knn_dists.data(), k, index);
compute_landmark_radii(handle, index);
perform_rbc_query(handle,
index,
index.get_X().data_handle(),
index.m,
k,
R_knn_inds.data(),
R_knn_dists.data(),
dfunc,
inds,
dists,
dists_counter.data(),
post_dists_counter.data(),
weight,
perform_post_filtering);
}
/**
* Performs a knn query against an index. This assumes the index has
* already been built.
*/
template <typename value_idx = std::int64_t,
typename value_t,
typename value_int = std::uint32_t,
typename distance_func>
void rbc_knn_query(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
value_int k,
const value_t* query,
value_int n_query_pts,
value_idx* inds,
value_t* dists,
distance_func dfunc,
// approximate nn options
bool perform_post_filtering = true,
float weight = 1.0)
{
ASSERT(index.n <= 3, "only 2d and 3d vectors are supported in current implementation");
ASSERT(index.n_landmarks >= k, "number of landmark samples must be >= k");
ASSERT(index.is_index_trained(), "index must be previously trained");
rmm::device_uvector<value_idx> R_knn_inds(k * n_query_pts, resource::get_cuda_stream(handle));
rmm::device_uvector<value_t> R_knn_dists(k * n_query_pts, resource::get_cuda_stream(handle));
// Initialize the uvectors
thrust::fill(resource::get_thrust_policy(handle),
R_knn_inds.begin(),
R_knn_inds.end(),
std::numeric_limits<value_idx>::max());
thrust::fill(resource::get_thrust_policy(handle),
R_knn_dists.begin(),
R_knn_dists.end(),
std::numeric_limits<value_t>::max());
thrust::fill(resource::get_thrust_policy(handle),
inds,
inds + (k * n_query_pts),
std::numeric_limits<value_idx>::max());
thrust::fill(resource::get_thrust_policy(handle),
dists,
dists + (k * n_query_pts),
std::numeric_limits<value_t>::max());
k_closest_landmarks(handle, index, query, n_query_pts, k, R_knn_inds.data(), R_knn_dists.data());
// For debugging / verification. Remove before releasing
rmm::device_uvector<value_int> dists_counter(index.m, resource::get_cuda_stream(handle));
rmm::device_uvector<value_int> post_dists_counter(index.m, resource::get_cuda_stream(handle));
thrust::fill(resource::get_thrust_policy(handle),
post_dists_counter.data(),
post_dists_counter.data() + post_dists_counter.size(),
0);
thrust::fill(resource::get_thrust_policy(handle),
dists_counter.data(),
dists_counter.data() + dists_counter.size(),
0);
perform_rbc_query(handle,
index,
query,
n_query_pts,
k,
R_knn_inds.data(),
R_knn_dists.data(),
dfunc,
inds,
dists,
dists_counter.data(),
post_dists_counter.data(),
weight,
perform_post_filtering);
}
}; // namespace detail
}; // namespace knn
}; // namespace spatial
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/ann_quantized.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../ann_common.h"
#include "../ivf_flat.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include "processing.cuh"
#include <raft/core/operators.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/distance/distance.cuh>
#include <raft/distance/distance_types.hpp>
#include <raft/label/classlabels.cuh>
#include <raft/neighbors/ivf_pq.cuh>
#include <raft/core/device_mdspan.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <thrust/iterator/transform_iterator.h>
namespace raft::spatial::knn::detail {
template <typename T = float, typename IntType = int>
void approx_knn_build_index(raft::resources const& handle,
knnIndex* index,
knnIndexParam* params,
raft::distance::DistanceType metric,
float metricArg,
T* index_array,
IntType n,
IntType D)
{
auto stream = resource::get_cuda_stream(handle);
index->metric = metric;
index->metricArg = metricArg;
if (dynamic_cast<const IVFParam*>(params)) {
index->nprobe = dynamic_cast<const IVFParam*>(params)->nprobe;
}
auto ivf_ft_pams = dynamic_cast<IVFFlatParam*>(params);
auto ivf_pq_pams = dynamic_cast<IVFPQParam*>(params);
if constexpr (std::is_same_v<T, float>) {
index->metric_processor = create_processor<float>(metric, n, D, 0, false, stream);
// For cosine/correlation distance, the metric processor translates distance
// to inner product via pre/post processing - pass the translated metric to
// ANN index
if (metric == raft::distance::DistanceType::CosineExpanded ||
metric == raft::distance::DistanceType::CorrelationExpanded) {
metric = index->metric = raft::distance::DistanceType::InnerProduct;
}
}
if constexpr (std::is_same_v<T, float>) { index->metric_processor->preprocess(index_array); }
if (ivf_ft_pams) {
auto new_params = from_legacy_index_params(*ivf_ft_pams, metric, metricArg);
index->ivf_flat<T, int64_t>() = std::make_unique<const ivf_flat::index<T, int64_t>>(
ivf_flat::build(handle, new_params, index_array, int64_t(n), D));
} else if (ivf_pq_pams) {
neighbors::ivf_pq::index_params params;
params.metric = metric;
params.metric_arg = metricArg;
params.n_lists = ivf_pq_pams->nlist;
params.pq_bits = ivf_pq_pams->n_bits;
params.pq_dim = ivf_pq_pams->M;
// TODO: handle ivf_pq_pams.usePrecomputedTables ?
auto index_view = raft::make_device_matrix_view<const T, int64_t>(index_array, n, D);
index->ivf_pq = std::make_unique<const neighbors::ivf_pq::index<int64_t>>(
neighbors::ivf_pq::build(handle, params, index_view));
} else {
RAFT_FAIL("Unrecognized index type.");
}
if constexpr (std::is_same_v<T, float>) { index->metric_processor->revert(index_array); }
}
template <typename T = float, typename IntType = int>
void approx_knn_search(raft::resources const& handle,
float* distances,
int64_t* indices,
knnIndex* index,
IntType k,
T* query_array,
IntType n)
{
if constexpr (std::is_same_v<T, float>) {
index->metric_processor->preprocess(query_array);
index->metric_processor->set_num_queries(k);
}
// search
if (index->ivf_flat<T, int64_t>()) {
ivf_flat::search_params params;
params.n_probes = index->nprobe;
ivf_flat::search(
handle, params, *(index->ivf_flat<T, int64_t>()), query_array, n, k, indices, distances);
} else if (index->ivf_pq) {
neighbors::ivf_pq::search_params params;
params.n_probes = index->nprobe;
auto query_view =
raft::make_device_matrix_view<const T, uint32_t>(query_array, n, index->ivf_pq->dim());
auto indices_view = raft::make_device_matrix_view<int64_t, uint32_t>(indices, n, k);
auto distances_view = raft::make_device_matrix_view<float, uint32_t>(distances, n, k);
neighbors::ivf_pq::search(
handle, params, *index->ivf_pq, query_view, indices_view, distances_view);
} else {
RAFT_FAIL("The model is not trained");
}
// revert changes to the query
if constexpr (std::is_same_v<T, float>) { index->metric_processor->revert(query_array); }
// perform post-processing to show the real distances
if (index->metric == raft::distance::DistanceType::L2SqrtExpanded ||
index->metric == raft::distance::DistanceType::L2SqrtUnexpanded ||
index->metric == raft::distance::DistanceType::LpUnexpanded) {
/**
* post-processing
*/
float p = 0.5; // standard l2
if (index->metric == raft::distance::DistanceType::LpUnexpanded) p = 1.0 / index->metricArg;
raft::linalg::unaryOp<float>(
distances, distances, n * k, raft::pow_const_op<float>(p), resource::get_cuda_stream(handle));
}
if constexpr (std::is_same_v<T, float>) { index->metric_processor->postprocess(distances); }
}
} // namespace raft::spatial::knn::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/processing.cuh | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "processing.hpp"
#include <raft/core/operators.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/stats/mean.cuh>
#include <raft/stats/mean_center.cuh>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace spatial {
namespace knn {
template <typename math_t>
class CosineMetricProcessor : public MetricProcessor<math_t> {
protected:
int k_;
bool row_major_;
size_t n_rows_;
size_t n_cols_;
cudaStream_t stream_;
rmm::device_uvector<math_t> colsums_;
public:
CosineMetricProcessor(size_t n_rows, size_t n_cols, int k, bool row_major, cudaStream_t stream)
: stream_(stream),
colsums_(n_rows, stream),
n_cols_(n_cols),
n_rows_(n_rows),
row_major_(row_major),
k_(k)
{
}
void preprocess(math_t* data)
{
raft::linalg::rowNorm(colsums_.data(),
data,
n_cols_,
n_rows_,
raft::linalg::NormType::L2Norm,
row_major_,
stream_,
raft::sqrt_op{});
raft::linalg::matrixVectorOp(
data, data, colsums_.data(), n_cols_, n_rows_, row_major_, false, raft::div_op{}, stream_);
}
void revert(math_t* data)
{
raft::linalg::matrixVectorOp(
data, data, colsums_.data(), n_cols_, n_rows_, row_major_, false, raft::mul_op{}, stream_);
}
void postprocess(math_t* data)
{
raft::linalg::unaryOp(
data, data, k_ * n_rows_, [] __device__(math_t in) { return 1 - in; }, stream_);
}
void set_num_queries(int k) override { k_ = k; }
~CosineMetricProcessor() = default;
};
template <typename math_t>
class CorrelationMetricProcessor : public CosineMetricProcessor<math_t> {
using cosine = CosineMetricProcessor<math_t>;
public:
CorrelationMetricProcessor(
size_t n_rows, size_t n_cols, int k, bool row_major, cudaStream_t stream)
: CosineMetricProcessor<math_t>(n_rows, n_cols, k, row_major, stream), means_(n_rows, stream)
{
}
void preprocess(math_t* data)
{
math_t normalizer_const = 1.0 / (math_t)cosine::n_cols_;
raft::linalg::reduce(means_.data(),
data,
cosine::n_cols_,
cosine::n_rows_,
(math_t)0.0,
cosine::row_major_,
true,
cosine::stream_);
raft::linalg::unaryOp(means_.data(),
means_.data(),
cosine::n_rows_,
raft::mul_const_op<math_t>(normalizer_const),
cosine::stream_);
raft::stats::meanCenter(data,
data,
means_.data(),
cosine::n_cols_,
cosine::n_rows_,
cosine::row_major_,
false,
cosine::stream_);
CosineMetricProcessor<math_t>::preprocess(data);
}
void revert(math_t* data)
{
CosineMetricProcessor<math_t>::revert(data);
raft::stats::meanAdd(data,
data,
means_.data(),
cosine::n_cols_,
cosine::n_rows_,
cosine::row_major_,
false,
cosine::stream_);
}
void postprocess(math_t* data) { CosineMetricProcessor<math_t>::postprocess(data); }
~CorrelationMetricProcessor() = default;
rmm::device_uvector<math_t> means_;
};
template <typename math_t>
class DefaultMetricProcessor : public MetricProcessor<math_t> {
public:
void preprocess(math_t* data) {}
void revert(math_t* data) {}
void postprocess(math_t* data) {}
~DefaultMetricProcessor() = default;
};
template <typename math_t>
inline std::unique_ptr<MetricProcessor<math_t>> create_processor(
distance::DistanceType metric, int n, int D, int k, bool rowMajorQuery, cudaStream_t userStream)
{
MetricProcessor<math_t>* mp = nullptr;
switch (metric) {
case distance::DistanceType::CosineExpanded:
mp = new CosineMetricProcessor<math_t>(n, D, k, rowMajorQuery, userStream);
break;
case distance::DistanceType::CorrelationExpanded:
mp = new CorrelationMetricProcessor<math_t>(n, D, k, rowMajorQuery, userStream);
break;
default: mp = new DefaultMetricProcessor<math_t>();
}
return std::unique_ptr<MetricProcessor<math_t>>(mp);
}
// Currently only being used by floats
template class MetricProcessor<float>;
template class CosineMetricProcessor<float>;
template class CorrelationMetricProcessor<float>;
template class DefaultMetricProcessor<float>;
} // namespace knn
} // namespace spatial
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/ball_cover/registers-ext.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../../ball_cover_types.hpp" // BallCoverIndex
#include "registers_types.cuh" // DistFunc
#include <cstdint> // uint32_t
#include <raft/util/raft_explicit.hpp> //RAFT_EXPLICIT
#if defined(RAFT_EXPLICIT_INSTANTIATE_ONLY)
namespace raft::spatial::knn::detail {
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int dims = 2,
typename dist_func>
void rbc_low_dim_pass_one(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
const value_int n_query_rows,
value_int k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func& dfunc,
value_idx* inds,
value_t* dists,
float weight,
value_int* dists_counter) RAFT_EXPLICIT;
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int dims = 2,
typename dist_func>
void rbc_low_dim_pass_two(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
const value_int n_query_rows,
value_int k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func& dfunc,
value_idx* inds,
value_t* dists,
float weight,
value_int* post_dists_counter) RAFT_EXPLICIT;
}; // namespace raft::spatial::knn::detail
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
extern template void \
raft::spatial::knn::detail::rbc_low_dim_pass_one<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
#define instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two( \
Mvalue_idx, Mvalue_t, Mvalue_int, Mdims, Mdist_func) \
extern template void \
raft::spatial::knn::detail::rbc_low_dim_pass_two<Mvalue_idx, Mvalue_t, Mvalue_int, Mdims>( \
raft::resources const& handle, \
const BallCoverIndex<Mvalue_idx, Mvalue_t, Mvalue_int>& index, \
const Mvalue_t* query, \
const Mvalue_int n_query_rows, \
Mvalue_int k, \
const Mvalue_idx* R_knn_inds, \
const Mvalue_t* R_knn_dists, \
Mdist_func<Mvalue_t, Mvalue_int>& dfunc, \
Mvalue_idx* inds, \
Mvalue_t* dists, \
float weight, \
Mvalue_int* dists_counter)
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, raft::spatial::knn::detail::HaversineFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, raft::spatial::knn::detail::HaversineFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, raft::spatial::knn::detail::EuclideanFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, raft::spatial::knn::detail::EuclideanFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 2, raft::spatial::knn::detail::DistFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one(
std::int64_t, float, std::uint32_t, 3, raft::spatial::knn::detail::DistFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, raft::spatial::knn::detail::HaversineFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, raft::spatial::knn::detail::HaversineFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, raft::spatial::knn::detail::EuclideanFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, raft::spatial::knn::detail::EuclideanFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 2, raft::spatial::knn::detail::DistFunc);
instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two(
std::int64_t, float, std::uint32_t, 3, raft::spatial::knn::detail::DistFunc);
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_two
#undef instantiate_raft_spatial_knn_detail_rbc_low_dim_pass_one
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/ball_cover/registers.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "registers-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "registers-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/ball_cover/registers-inl.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "common.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include "../../ball_cover_types.hpp"
#include "../haversine_distance.cuh"
#include "registers_types.cuh" // DistFunc
#include <cstdint>
#include <limits.h>
#include <raft/neighbors/detail/faiss_select/key_value_block_select.cuh>
#include <raft/util/cuda_utils.cuh>
#include <thrust/fill.h>
namespace raft {
namespace spatial {
namespace knn {
namespace detail {
/**
* To find exact neighbors, we perform a post-processing stage
* that filters out those points which might have neighbors outside
* of their k closest landmarks. This is usually a very small portion
* of the total points.
* @tparam value_idx
* @tparam value_t
* @tparam value_int
* @tparam tpb
* @param X
* @param n_cols
* @param R_knn_inds
* @param R_knn_dists
* @param R_radius
* @param landmarks
* @param n_landmarks
* @param bitset_size
* @param k
* @param output
* @param weight
*/
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int col_q = 2,
int tpb = 32,
typename distance_func>
RAFT_KERNEL perform_post_filter_registers(const value_t* X,
value_int n_cols,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
const value_t* R_radius,
const value_t* landmarks,
int n_landmarks,
value_int bitset_size,
value_int k,
distance_func dfunc,
std::uint32_t* output,
float weight = 1.0)
{
// allocate array of size n_landmarks / 32 ints
extern __shared__ std::uint32_t shared_mem[];
// Start with all bits on
for (value_int i = threadIdx.x; i < bitset_size; i += tpb) {
shared_mem[i] = 0xffffffff;
}
__syncthreads();
// TODO: Would it be faster to use L1 for this?
value_t local_x_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_x_ptr[j] = X[n_cols * blockIdx.x + j];
}
value_t closest_R_dist = R_knn_dists[blockIdx.x * k + (k - 1)];
// zero out bits for closest k landmarks
for (value_int j = threadIdx.x; j < k; j += tpb) {
_zero_bit(shared_mem, (std::uint32_t)R_knn_inds[blockIdx.x * k + j]);
}
__syncthreads();
// Discard any landmarks where p(q, r) > p(q, r_q) + radius(r)
// That is, the distance between the current point and the current
// landmark is > the distance between the current point and
// its closest landmark + the radius of the current landmark.
for (value_int l = threadIdx.x; l < n_landmarks; l += tpb) {
// compute p(q, r)
value_t dist = dfunc(local_x_ptr, landmarks + (n_cols * l), n_cols);
if (dist > weight * (closest_R_dist + R_radius[l]) || dist > 3 * closest_R_dist) {
_zero_bit(shared_mem, l);
}
}
__syncthreads();
/**
* Output bitset
*/
for (value_int l = threadIdx.x; l < bitset_size; l += tpb) {
output[blockIdx.x * bitset_size + l] = shared_mem[l];
}
}
/**
* @tparam value_idx
* @tparam value_t
* @tparam value_int
* @tparam bitset_type
* @tparam warp_q number of registers to use per warp
* @tparam thread_q number of registers to use within each thread
* @tparam tpb number of threads per block
* @param X
* @param n_cols
* @param bitset
* @param bitset_size
* @param R_knn_dists
* @param R_indptr
* @param R_1nn_inds
* @param R_1nn_dists
* @param knn_inds
* @param knn_dists
* @param n_landmarks
* @param k
* @param dist_counter
*/
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
typename bitset_type = std::uint32_t,
typename dist_func,
int warp_q = 32,
int thread_q = 2,
int tpb = 128,
int col_q = 2>
RAFT_KERNEL compute_final_dists_registers(const value_t* X_index,
const value_t* X,
const value_int n_cols,
bitset_type* bitset,
value_int bitset_size,
const value_t* R_closest_landmark_dists,
const value_idx* R_indptr,
const value_idx* R_1nn_inds,
const value_t* R_1nn_dists,
value_idx* knn_inds,
value_t* knn_dists,
value_int n_landmarks,
value_int k,
dist_func dfunc,
value_int* dist_counter)
{
static constexpr int kNumWarps = tpb / WarpSize;
__shared__ value_t shared_memK[kNumWarps * warp_q];
__shared__ KeyValuePair<value_t, value_idx> shared_memV[kNumWarps * warp_q];
const value_t* x_ptr = X + (n_cols * blockIdx.x);
value_t local_x_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_x_ptr[j] = x_ptr[j];
}
using namespace raft::neighbors::detail::faiss_select;
KeyValueBlockSelect<value_t, value_idx, false, Comparator<value_t>, warp_q, thread_q, tpb> heap(
std::numeric_limits<value_t>::max(),
std::numeric_limits<value_t>::max(),
-1,
shared_memK,
shared_memV,
k);
const value_int n_k = Pow2<WarpSize>::roundDown(k);
value_int i = threadIdx.x;
for (; i < n_k; i += tpb) {
value_idx ind = knn_inds[blockIdx.x * k + i];
heap.add(knn_dists[blockIdx.x * k + i], R_closest_landmark_dists[ind], ind);
}
if (i < k) {
value_idx ind = knn_inds[blockIdx.x * k + i];
heap.addThreadQ(knn_dists[blockIdx.x * k + i], R_closest_landmark_dists[ind], ind);
}
heap.checkThreadQ();
for (value_int cur_R_ind = 0; cur_R_ind < n_landmarks; ++cur_R_ind) {
// if cur R overlaps cur point's closest R, it could be a
// candidate
if (_get_val(bitset + (blockIdx.x * bitset_size), cur_R_ind)) {
value_idx R_start_offset = R_indptr[cur_R_ind];
value_idx R_stop_offset = R_indptr[cur_R_ind + 1];
value_idx R_size = R_stop_offset - R_start_offset;
// Loop through R's neighborhood in parallel
// Round R_size to the nearest warp threads so they can
// all be computing in parallel.
const value_int limit = Pow2<WarpSize>::roundDown(R_size);
i = threadIdx.x;
for (; i < limit; i += tpb) {
value_idx cur_candidate_ind = R_1nn_inds[R_start_offset + i];
value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i];
value_t z = heap.warpKTopRDist == 0.00 ? 0.0
: (abs(heap.warpKTop - heap.warpKTopRDist) *
abs(heap.warpKTopRDist - cur_candidate_dist) -
heap.warpKTop * cur_candidate_dist) /
heap.warpKTopRDist;
z = isnan(z) || isinf(z) ? 0.0 : z;
// If lower bound on distance could possibly be in
// the closest k neighbors, compute it and add to k-select
value_t dist = std::numeric_limits<value_t>::max();
if (z <= heap.warpKTop) {
const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind);
value_t local_y_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_y_ptr[j] = y_ptr[j];
}
dist = dfunc(local_x_ptr, local_y_ptr, n_cols);
}
heap.add(dist, cur_candidate_dist, cur_candidate_ind);
}
// second round guarantees to be only a single warp.
if (i < R_size) {
value_idx cur_candidate_ind = R_1nn_inds[R_start_offset + i];
value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i];
value_t z = heap.warpKTopRDist == 0.00 ? 0.0
: (abs(heap.warpKTop - heap.warpKTopRDist) *
abs(heap.warpKTopRDist - cur_candidate_dist) -
heap.warpKTop * cur_candidate_dist) /
heap.warpKTopRDist;
z = isnan(z) || isinf(z) ? 0.0 : z;
// If lower bound on distance could possibly be in
// the closest k neighbors, compute it and add to k-select
value_t dist = std::numeric_limits<value_t>::max();
if (z <= heap.warpKTop) {
const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind);
value_t local_y_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_y_ptr[j] = y_ptr[j];
}
dist = dfunc(local_x_ptr, local_y_ptr, n_cols);
}
heap.addThreadQ(dist, cur_candidate_dist, cur_candidate_ind);
}
heap.checkThreadQ();
}
}
heap.reduce();
for (value_int i = threadIdx.x; i < k; i += tpb) {
knn_dists[blockIdx.x * k + i] = shared_memK[i];
knn_inds[blockIdx.x * k + i] = shared_memV[i].value;
}
}
/**
* Random ball cover kernel for n_dims == 2
* @tparam value_idx
* @tparam value_t
* @tparam warp_q
* @tparam thread_q
* @tparam tpb
* @tparam value_idx
* @tparam value_t
* @param R_knn_inds
* @param R_knn_dists
* @param m
* @param k
* @param R_indptr
* @param R_1nn_cols
* @param R_1nn_dists
*/
template <typename value_idx = std::int64_t,
typename value_t,
int warp_q = 32,
int thread_q = 2,
int tpb = 128,
int col_q = 2,
typename value_int = std::uint32_t,
typename distance_func>
RAFT_KERNEL block_rbc_kernel_registers(const value_t* X_index,
const value_t* X,
value_int n_cols, // n_cols should be 2 or 3 dims
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
value_int m,
value_int k,
const value_idx* R_indptr,
const value_idx* R_1nn_cols,
const value_t* R_1nn_dists,
value_idx* out_inds,
value_t* out_dists,
value_int* dist_counter,
const value_t* R_radius,
distance_func dfunc,
float weight = 1.0)
{
static constexpr value_int kNumWarps = tpb / WarpSize;
__shared__ value_t shared_memK[kNumWarps * warp_q];
__shared__ KeyValuePair<value_t, value_idx> shared_memV[kNumWarps * warp_q];
// TODO: Separate kernels for different widths:
// 1. Very small (between 3 and 32) just use registers for columns of "blockIdx.x"
// 2. Can fit comfortably in shared memory (32 to a few thousand?)
// 3. Load each time individually.
const value_t* x_ptr = X + (n_cols * blockIdx.x);
// Use registers only for 2d or 3d
value_t local_x_ptr[col_q];
for (value_int i = 0; i < n_cols; ++i) {
local_x_ptr[i] = x_ptr[i];
}
// Each warp works on 1 R
using namespace raft::neighbors::detail::faiss_select;
KeyValueBlockSelect<value_t, value_idx, false, Comparator<value_t>, warp_q, thread_q, tpb> heap(
std::numeric_limits<value_t>::max(),
std::numeric_limits<value_t>::max(),
-1,
shared_memK,
shared_memV,
k);
value_t min_R_dist = R_knn_dists[blockIdx.x * k + (k - 1)];
value_int n_dists_computed = 0;
/**
* First add distances for k closest neighbors of R
* to the heap
*/
// Start iterating through elements of each set from closest R elements,
// determining if the distance could even potentially be in the heap.
for (value_int cur_k = 0; cur_k < k; ++cur_k) {
// index and distance to current blockIdx.x's closest landmark
value_t cur_R_dist = R_knn_dists[blockIdx.x * k + cur_k];
value_idx cur_R_ind = R_knn_inds[blockIdx.x * k + cur_k];
// Equation (2) in Cayton's paper- prune out R's which are > 3 * p(q, r_q)
if (cur_R_dist > weight * (min_R_dist + R_radius[cur_R_ind])) continue;
if (cur_R_dist > 3 * min_R_dist) return;
// The whole warp should iterate through the elements in the current R
value_idx R_start_offset = R_indptr[cur_R_ind];
value_idx R_stop_offset = R_indptr[cur_R_ind + 1];
value_idx R_size = R_stop_offset - R_start_offset;
value_int limit = Pow2<WarpSize>::roundDown(R_size);
value_int i = threadIdx.x;
for (; i < limit; i += tpb) {
// Index and distance of current candidate's nearest landmark
value_idx cur_candidate_ind = R_1nn_cols[R_start_offset + i];
value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i];
// Take 2 landmarks l_1 and l_2 where l_1 is the furthest point in the heap
// and l_2 is the current landmark R. s is the current data point and
// t is the new candidate data point. We know that:
// d(s, t) cannot possibly be any smaller than | d(s, l_1) - d(l_1, l_2) | * | d(l_1, l_2) -
// d(l_2, t) | - d(s, l_1) * d(l_2, t)
// Therefore, if d(s, t) >= d(s, l_1) from the computation above, we know that the distance to
// the candidate point cannot possibly be in the nearest neighbors. However, if d(s, t) < d(s,
// l_1) then we should compute the distance because it's possible it could be smaller.
//
value_t z = heap.warpKTopRDist == 0.00 ? 0.0
: (abs(heap.warpKTop - heap.warpKTopRDist) *
abs(heap.warpKTopRDist - cur_candidate_dist) -
heap.warpKTop * cur_candidate_dist) /
heap.warpKTopRDist;
z = isnan(z) || isinf(z) ? 0.0 : z;
value_t dist = std::numeric_limits<value_t>::max();
if (z <= heap.warpKTop) {
const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind);
value_t local_y_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_y_ptr[j] = y_ptr[j];
}
dist = dfunc(local_x_ptr, local_y_ptr, n_cols);
++n_dists_computed;
}
heap.add(dist, cur_candidate_dist, cur_candidate_ind);
}
if (i < R_size) {
value_idx cur_candidate_ind = R_1nn_cols[R_start_offset + i];
value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i];
value_t z = heap.warpKTopRDist == 0.0 ? 0.0
: (abs(heap.warpKTop - heap.warpKTopRDist) *
abs(heap.warpKTopRDist - cur_candidate_dist) -
heap.warpKTop * cur_candidate_dist) /
heap.warpKTopRDist;
z = isnan(z) || isinf(z) ? 0.0 : z;
value_t dist = std::numeric_limits<value_t>::max();
if (z <= heap.warpKTop) {
const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind);
value_t local_y_ptr[col_q];
for (value_int j = 0; j < n_cols; ++j) {
local_y_ptr[j] = y_ptr[j];
}
dist = dfunc(local_x_ptr, local_y_ptr, n_cols);
++n_dists_computed;
}
heap.addThreadQ(dist, cur_candidate_dist, cur_candidate_ind);
}
heap.checkThreadQ();
}
heap.reduce();
for (int i = threadIdx.x; i < k; i += tpb) {
out_dists[blockIdx.x * k + i] = shared_memK[i];
out_inds[blockIdx.x * k + i] = shared_memV[i].value;
}
}
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int dims = 2,
typename dist_func>
void rbc_low_dim_pass_one(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
const value_int n_query_rows,
value_int k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func& dfunc,
value_idx* inds,
value_t* dists,
float weight,
value_int* dists_counter)
{
if (k <= 32)
block_rbc_kernel_registers<value_idx, value_t, 32, 2, 128, dims, value_int>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 64)
block_rbc_kernel_registers<value_idx, value_t, 64, 3, 128, 2, value_int>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 128)
block_rbc_kernel_registers<value_idx, value_t, 128, 3, 128, dims, value_int>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 256)
block_rbc_kernel_registers<value_idx, value_t, 256, 4, 128, dims, value_int>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 512)
block_rbc_kernel_registers<value_idx, value_t, 512, 8, 64, dims, value_int>
<<<n_query_rows, 64, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
else if (k <= 1024)
block_rbc_kernel_registers<value_idx, value_t, 1024, 8, 64, dims, value_int>
<<<n_query_rows, 64, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
R_knn_inds,
R_knn_dists,
index.m,
k,
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
dists_counter,
index.get_R_radius().data_handle(),
dfunc,
weight);
}
template <typename value_idx,
typename value_t,
typename value_int = std::uint32_t,
int dims = 2,
typename dist_func>
void rbc_low_dim_pass_two(raft::resources const& handle,
const BallCoverIndex<value_idx, value_t, value_int>& index,
const value_t* query,
const value_int n_query_rows,
value_int k,
const value_idx* R_knn_inds,
const value_t* R_knn_dists,
dist_func& dfunc,
value_idx* inds,
value_t* dists,
float weight,
value_int* post_dists_counter)
{
const value_int bitset_size = ceil(index.n_landmarks / 32.0);
rmm::device_uvector<std::uint32_t> bitset(bitset_size * n_query_rows,
resource::get_cuda_stream(handle));
thrust::fill(
resource::get_thrust_policy(handle), bitset.data(), bitset.data() + bitset.size(), 0);
perform_post_filter_registers<value_idx, value_t, value_int, dims, 128>
<<<n_query_rows, 128, bitset_size * sizeof(std::uint32_t), resource::get_cuda_stream(handle)>>>(
query,
index.n,
R_knn_inds,
R_knn_dists,
index.get_R_radius().data_handle(),
index.get_R().data_handle(),
index.n_landmarks,
bitset_size,
k,
dfunc,
bitset.data(),
weight);
if (k <= 32)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
32,
2,
128,
dims>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 64)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
64,
3,
128,
dims>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 128)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
128,
3,
128,
dims>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 256)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
256,
4,
128,
dims>
<<<n_query_rows, 128, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 512)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
512,
8,
64,
dims><<<n_query_rows, 64, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
else if (k <= 1024)
compute_final_dists_registers<value_idx,
value_t,
value_int,
std::uint32_t,
dist_func,
1024,
8,
64,
dims><<<n_query_rows, 64, 0, resource::get_cuda_stream(handle)>>>(
index.get_X().data_handle(),
query,
index.n,
bitset.data(),
bitset_size,
index.get_R_closest_landmark_dists().data_handle(),
index.get_R_indptr().data_handle(),
index.get_R_1nn_cols().data_handle(),
index.get_R_1nn_dists().data_handle(),
inds,
dists,
index.n_landmarks,
k,
dfunc,
post_dists_counter);
}
}; // namespace detail
}; // namespace knn
}; // namespace spatial
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/ball_cover/common.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../haversine_distance.cuh"
#include "registers_types.cuh"
#include <cstdint>
#include <thrust/functional.h>
#include <thrust/tuple.h>
namespace raft {
namespace spatial {
namespace knn {
namespace detail {
struct NNComp {
template <typename one, typename two>
__host__ __device__ bool operator()(const one& t1, const two& t2)
{
// sort first by each sample's reference landmark,
if (thrust::get<0>(t1) < thrust::get<0>(t2)) return true;
if (thrust::get<0>(t1) > thrust::get<0>(t2)) return false;
// then by closest neighbor,
return thrust::get<1>(t1) < thrust::get<1>(t2);
}
};
/**
* Zeros the bit at location h in a one-hot encoded 32-bit int array
*/
__device__ inline void _zero_bit(std::uint32_t* arr, std::uint32_t h)
{
int bit = h % 32;
int idx = h / 32;
std::uint32_t assumed;
std::uint32_t old = arr[idx];
do {
assumed = old;
old = atomicCAS(arr + idx, assumed, assumed & ~(1 << bit));
} while (assumed != old);
}
/**
* Returns whether or not bit at location h is nonzero in a one-hot
* encoded 32-bit in array.
*/
__device__ inline bool _get_val(std::uint32_t* arr, std::uint32_t h)
{
int bit = h % 32;
int idx = h / 32;
return (arr[idx] & (1 << bit)) > 0;
}
}; // namespace detail
}; // namespace knn
}; // namespace spatial
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/detail/ball_cover/registers_types.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../haversine_distance.cuh" // compute_haversine
#include <cstdint> // uint32_t
namespace raft {
namespace spatial {
namespace knn {
namespace detail {
template <typename value_t, typename value_int = std::uint32_t>
struct DistFunc {
virtual __device__ __host__ __forceinline__ value_t operator()(const value_t* a,
const value_t* b,
const value_int n_dims)
{
return -1;
};
};
template <typename value_t, typename value_int = std::uint32_t>
struct HaversineFunc : public DistFunc<value_t, value_int> {
__device__ __host__ __forceinline__ value_t operator()(const value_t* a,
const value_t* b,
const value_int n_dims) override
{
return raft::spatial::knn::detail::compute_haversine(a[0], b[0], a[1], b[1]);
}
};
template <typename value_t, typename value_int = std::uint32_t>
struct EuclideanFunc : public DistFunc<value_t, value_int> {
__device__ __host__ __forceinline__ value_t operator()(const value_t* a,
const value_t* b,
const value_int n_dims) override
{
value_t sum_sq = 0;
for (value_int i = 0; i < n_dims; ++i) {
value_t diff = a[i] - b[i];
sum_sq += diff * diff;
}
return raft::sqrt(sum_sq);
}
};
}; // namespace detail
}; // namespace knn
}; // namespace spatial
}; // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/spatial/knn | rapidsai_public_repos/raft/cpp/include/raft/spatial/knn/specializations/knn.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/spectral/cluster_solvers.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CLUSTER_SOLVERS_H
#define __CLUSTER_SOLVERS_H
#pragma once
#include <raft/cluster/kmeans.cuh>
#include <raft/core/resource/thrust_policy.hpp>
#include <utility> // for std::pair
namespace raft {
namespace spectral {
using namespace matrix;
// aggregate of control params for Eigen Solver:
//
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct cluster_solver_config_t {
size_type_t n_clusters;
size_type_t maxIter;
value_type_t tol;
unsigned long long seed{123456};
};
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct kmeans_solver_t {
explicit kmeans_solver_t(
cluster_solver_config_t<index_type_t, value_type_t, size_type_t> const& config)
: config_(config)
{
}
std::pair<value_type_t, index_type_t> solve(raft::resources const& handle,
size_type_t n_obs_vecs,
size_type_t dim,
value_type_t const* __restrict__ obs,
index_type_t* __restrict__ codes) const
{
RAFT_EXPECTS(obs != nullptr, "Null obs buffer.");
RAFT_EXPECTS(codes != nullptr, "Null codes buffer.");
value_type_t residual{};
index_type_t iters{};
raft::cluster::KMeansParams km_params;
km_params.n_clusters = config_.n_clusters;
km_params.tol = config_.tol;
km_params.max_iter = config_.maxIter;
km_params.rng_state.seed = config_.seed;
auto X = raft::make_device_matrix_view<const value_type_t>(obs, n_obs_vecs, dim);
auto labels = raft::make_device_vector_view<index_type_t>(codes, n_obs_vecs);
auto centroids =
raft::make_device_matrix<value_type_t, index_type_t>(handle, config_.n_clusters, dim);
auto weight = raft::make_device_vector<value_type_t, index_type_t>(handle, n_obs_vecs);
thrust::fill(resource::get_thrust_policy(handle),
weight.data_handle(),
weight.data_handle() + n_obs_vecs,
1);
auto sw = std::make_optional((raft::device_vector_view<const value_type_t>)weight.view());
raft::cluster::kmeans_fit_predict<value_type_t, index_type_t>(
handle,
km_params,
X,
sw,
centroids.view(),
labels,
raft::make_host_scalar_view(&residual),
raft::make_host_scalar_view(&iters));
return std::make_pair(residual, iters);
}
auto const& get_config(void) const { return config_; }
private:
cluster_solver_config_t<index_type_t, value_type_t, size_type_t> config_;
};
} // namespace spectral
} // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/spectral/eigen_solvers.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __EIGEN_SOLVERS_H
#define __EIGEN_SOLVERS_H
#pragma once
#include <raft/sparse/solver/lanczos.cuh>
#include <raft/spectral/matrix_wrappers.hpp>
namespace raft {
namespace spectral {
// aggregate of control params for Eigen Solver:
//
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct eigen_solver_config_t {
size_type_t n_eigVecs;
size_type_t maxIter;
size_type_t restartIter;
value_type_t tol;
bool reorthogonalize{false};
unsigned long long seed{
1234567}; // CAVEAT: this default value is now common to all instances of using seed in
// Lanczos; was not the case before: there were places where a default seed = 123456
// was used; this may trigger slightly different # solver iterations
};
template <typename index_type_t, typename value_type_t, typename size_type_t = index_type_t>
struct lanczos_solver_t {
explicit lanczos_solver_t(
eigen_solver_config_t<index_type_t, value_type_t, size_type_t> const& config)
: config_(config)
{
}
index_type_t solve_smallest_eigenvectors(
raft::resources const& handle,
matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
value_type_t* __restrict__ eigVals,
value_type_t* __restrict__ eigVecs) const
{
RAFT_EXPECTS(eigVals != nullptr, "Null eigVals buffer.");
RAFT_EXPECTS(eigVecs != nullptr, "Null eigVecs buffer.");
index_type_t iters{};
sparse::solver::computeSmallestEigenvectors(handle,
A,
config_.n_eigVecs,
config_.maxIter,
config_.restartIter,
config_.tol,
config_.reorthogonalize,
iters,
eigVals,
eigVecs,
config_.seed);
return iters;
}
index_type_t solve_largest_eigenvectors(
raft::resources const& handle,
matrix::sparse_matrix_t<index_type_t, value_type_t> const& A,
value_type_t* __restrict__ eigVals,
value_type_t* __restrict__ eigVecs) const
{
RAFT_EXPECTS(eigVals != nullptr, "Null eigVals buffer.");
RAFT_EXPECTS(eigVecs != nullptr, "Null eigVecs buffer.");
index_type_t iters{};
sparse::solver::computeLargestEigenvectors(handle,
A,
config_.n_eigVecs,
config_.maxIter,
config_.restartIter,
config_.tol,
config_.reorthogonalize,
iters,
eigVals,
eigVecs,
config_.seed);
return iters;
}
auto const& get_config(void) const { return config_; }
private:
eigen_solver_config_t<index_type_t, value_type_t, size_type_t> config_;
};
} // namespace spectral
} // namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/spectral/matrix_wrappers.hpp | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/spectral/detail/matrix_wrappers.hpp>
// =========================================================
// Useful macros
// =========================================================
namespace raft {
namespace spectral {
namespace matrix {
using size_type = int; // for now; TODO: move it in appropriate header
// specifies type of algorithm used
// for SpMv:
//
using detail::sparse_mv_alg_t;
// Vector "view"-like aggregate for linear algebra purposes
//
using detail::vector_view_t;
using detail::vector_t;
using detail::sparse_matrix_t;
using detail::laplacian_matrix_t;
using detail::modularity_matrix_t;
} // namespace matrix
} // namespace spectral
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/spectral/specializations.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.