repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/neighbors/cagra_serialize.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "detail/cagra/cagra_serialize.cuh" namespace raft::neighbors::cagra { /** * \defgroup cagra_serialize CAGRA Serialize * @{ */ /** * Write the index to an output stream * * Experimental, both the API and the serialization format are subject to change. * * @code{.cpp} * #include <raft/core/resources.hpp> * * raft::resources handle; * * // create an output stream * std::ostream os(std::cout.rdbuf()); * // create an index with `auto index = cagra::build(...);` * raft::serialize(handle, os, index); * @endcode * * @tparam T data element type * @tparam IdxT type of the indices * * @param[in] handle the raft handle * @param[in] os output stream * @param[in] index CAGRA index * @param[in] include_dataset Whether or not to write out the dataset to the file. * */ template <typename T, typename IdxT> void serialize(raft::resources const& handle, std::ostream& os, const index<T, IdxT>& index, bool include_dataset = true) { detail::serialize(handle, os, index, include_dataset); } /** * Save the index to file. * * Experimental, both the API and the serialization format are subject to change. * * @code{.cpp} * #include <raft/core/resources.hpp> * * raft::resources handle; * * // create a string with a filepath * std::string filename("/path/to/index"); * // create an index with `auto index = cagra::build(...);` * raft::serialize(handle, filename, index); * @endcode * * @tparam T data element type * @tparam IdxT type of the indices * * @param[in] handle the raft handle * @param[in] filename the file name for saving the index * @param[in] index CAGRA index * @param[in] include_dataset Whether or not to write out the dataset to the file. * */ template <typename T, typename IdxT> void serialize(raft::resources const& handle, const std::string& filename, const index<T, IdxT>& index, bool include_dataset = true) { detail::serialize(handle, filename, index, include_dataset); } /** * Write the CAGRA built index as a base layer HNSW index to an output stream * * Experimental, both the API and the serialization format are subject to change. * * @code{.cpp} * #include <raft/core/resources.hpp> * * raft::resources handle; * * // create an output stream * std::ostream os(std::cout.rdbuf()); * // create an index with `auto index = cagra::build(...);` * raft::serialize_to_hnswlib(handle, os, index); * @endcode * * @tparam T data element type * @tparam IdxT type of the indices * * @param[in] handle the raft handle * @param[in] os output stream * @param[in] index CAGRA index * */ template <typename T, typename IdxT> void serialize_to_hnswlib(raft::resources const& handle, std::ostream& os, const index<T, IdxT>& index) { detail::serialize_to_hnswlib<T, IdxT>(handle, os, index); } /** * Write the CAGRA built index as a base layer HNSW index to file * * Experimental, both the API and the serialization format are subject to change. * * @code{.cpp} * #include <raft/core/resources.hpp> * * raft::resources handle; * * // create a string with a filepath * std::string filename("/path/to/index"); * // create an index with `auto index = cagra::build(...);` * raft::serialize_to_hnswlib(handle, filename, index); * @endcode * * @tparam T data element type * @tparam IdxT type of the indices * * @param[in] handle the raft handle * @param[in] filename the file name for saving the index * @param[in] index CAGRA index * */ template <typename T, typename IdxT> void serialize_to_hnswlib(raft::resources const& handle, const std::string& filename, const index<T, IdxT>& index) { detail::serialize_to_hnswlib<T, IdxT>(handle, filename, index); } /** * Load index from input stream * * Experimental, both the API and the serialization format are subject to change. * * @code{.cpp} * #include <raft/core/resources.hpp> * * raft::resources handle; * * // create an input stream * std::istream is(std::cin.rdbuf()); * using T = float; // data element type * using IdxT = int; // type of the index * auto index = raft::deserialize<T, IdxT>(handle, is); * @endcode * * @tparam T data element type * @tparam IdxT type of the indices * * @param[in] handle the raft handle * @param[in] is input stream * * @return raft::neighbors::experimental::cagra::index<T, IdxT> */ template <typename T, typename IdxT> index<T, IdxT> deserialize(raft::resources const& handle, std::istream& is) { return detail::deserialize<T, IdxT>(handle, is); } /** * Load index from file. * * Experimental, both the API and the serialization format are subject to change. * * @code{.cpp} * #include <raft/core/resources.hpp> * * raft::resources handle; * * // create a string with a filepath * std::string filename("/path/to/index"); * using T = float; // data element type * using IdxT = int; // type of the index * auto index = raft::deserialize<T, IdxT>(handle, filename); * @endcode * * @tparam T data element type * @tparam IdxT type of the indices * * @param[in] handle the raft handle * @param[in] filename the name of the file that stores the index * * @return raft::neighbors::experimental::cagra::index<T, IdxT> */ template <typename T, typename IdxT> index<T, IdxT> deserialize(raft::resources const& handle, const std::string& filename) { return detail::deserialize<T, IdxT>(handle, filename); } /**@}*/ } // namespace raft::neighbors::cagra // TODO: Remove deprecated experimental namespace in 23.12 release namespace raft::neighbors::experimental::cagra { using raft::neighbors::cagra::deserialize; using raft::neighbors::cagra::serialize; } // namespace raft::neighbors::experimental::cagra
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/neighbors/sample_filter.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cstdint> #include <raft/core/bitset.cuh> namespace raft::neighbors::filtering { /** * @brief Filter an index with a bitset * * @tparam index_t Indexing type */ template <typename bitset_t, typename index_t> struct bitset_filter { // View of the bitset to use as a filter const raft::core::bitset_view<bitset_t, index_t> bitset_view_; bitset_filter(const raft::core::bitset_view<bitset_t, index_t> bitset_for_filtering) : bitset_view_{bitset_for_filtering} { } inline _RAFT_HOST_DEVICE bool operator()( // query index const uint32_t query_ix, // the index of the current sample const uint32_t sample_ix) const { return bitset_view_.test(sample_ix); } }; } // namespace raft::neighbors::filtering
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/neighbors/brute_force-inl.cuh
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/distance/distance_types.hpp> #include <raft/neighbors/brute_force_types.hpp> #include <raft/neighbors/detail/knn_brute_force.cuh> #include <raft/spatial/knn/detail/fused_l2_knn.cuh> namespace raft::neighbors::brute_force { /** * @defgroup brute_force_knn Brute-force K-Nearest Neighbors * @{ */ /** * @brief Performs a k-select across several (contiguous) row-partitioned index/distance * matrices formatted like the following: * * part1row1: k0, k1, k2, k3 * part1row2: k0, k1, k2, k3 * part1row3: k0, k1, k2, k3 * part2row1: k0, k1, k2, k3 * part2row2: k0, k1, k2, k3 * part2row3: k0, k1, k2, k3 * etc... * * The example above shows what an aggregated index/distance matrix * would look like with two partitions when n_samples=3 and k=4. * * When working with extremely large data sets that have been broken * over multiple indexes, such as when computing over multiple GPUs, * the ids will often start at 0 for each local knn index but the * global ids need to be used when merging them together. An optional * translations vector can be supplied to map the starting id of * each partition to its global id so that the final merged knn * is based on the global ids. * * Usage example: * @code{.cpp} * #include <raft/core/resources.hpp> * #include <raft/neighbors/brute_force.cuh> * using namespace raft::neighbors; * * raft::resources handle; * ... * compute multiple knn graphs and aggregate row-wise * (see detailed description above) * ... * brute_force::knn_merge_parts(handle, in_keys, in_values, out_keys, out_values, n_samples); * @endcode * * @tparam idx_t * @tparam value_t * * @param[in] handle * @param[in] in_keys matrix of input keys (size n_samples * n_parts * k) * @param[in] in_values matrix of input values (size n_samples * n_parts * k) * @param[out] out_keys matrix of output keys (size n_samples * k) * @param[out] out_values matrix of output values (size n_samples * k) * @param[in] n_samples number of rows in each partition * @param[in] translations optional vector of starting global id mappings for each local partition */ template <typename value_t, typename idx_t> inline void knn_merge_parts( raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, row_major> in_keys, raft::device_matrix_view<const idx_t, idx_t, row_major> in_values, raft::device_matrix_view<value_t, idx_t, row_major> out_keys, raft::device_matrix_view<idx_t, idx_t, row_major> out_values, size_t n_samples, std::optional<raft::device_vector_view<idx_t, idx_t>> translations = std::nullopt) { RAFT_EXPECTS(in_keys.extent(1) == in_values.extent(1) && in_keys.extent(0) == in_values.extent(0), "in_keys and in_values must have the same shape."); RAFT_EXPECTS( out_keys.extent(0) == out_values.extent(0) && out_keys.extent(0) == n_samples, "Number of rows in output keys and val matrices must equal number of rows in search matrix."); RAFT_EXPECTS( out_keys.extent(1) == out_values.extent(1) && out_keys.extent(1) == in_keys.extent(1), "Number of columns in output indices and distances matrices must be equal to k"); idx_t* translations_ptr = nullptr; if (translations.has_value()) { translations_ptr = translations.value().data_handle(); } auto n_parts = in_keys.extent(0) / n_samples; detail::knn_merge_parts(in_keys.data_handle(), in_values.data_handle(), out_keys.data_handle(), out_values.data_handle(), n_samples, n_parts, in_keys.extent(1), resource::get_cuda_stream(handle), translations_ptr); } /** * @brief Flat C++ API function to perform a brute force knn on * a series of input arrays and combine the results into a single * output array for indexes and distances. Inputs can be either * row- or column-major but the output matrices will always be in * row-major format. * * Usage example: * @code{.cpp} * #include <raft/core/resources.hpp> * #include <raft/neighbors/brute_force.cuh> * #include <raft/distance/distance_types.hpp> * using namespace raft::neighbors; * * raft::resources handle; * ... * auto metric = raft::distance::DistanceType::L2SqrtExpanded; * brute_force::knn(handle, index, search, indices, distances, metric); * @endcode * * @param[in] handle: the cuml handle to use * @param[in] index: vector of device matrices (each size m_i*d) to be used as the knn index * @param[in] search: matrix (size n*d) to be used for searching the index * @param[out] indices: matrix (size n*k) to store output knn indices * @param[out] distances: matrix (size n*k) to store the output knn distance * @param[in] metric: distance metric to use. Euclidean (L2) is used by default * @param[in] metric_arg: the value of `p` for Minkowski (l-p) distances. This * is ignored if the metric_type is not Minkowski. * @param[in] global_id_offset: optional starting global id mapping for the local partition * (assumes the index contains contiguous ids in the global id space) * @param[in] distance_epilogue: optional epilogue function to run after computing distances. This function takes a triple of the (value, rowid, colid) for each element in the pairwise distances and returns a transformed value back. */ template <typename idx_t, typename value_t, typename matrix_idx, typename index_layout, typename search_layout, typename epilogue_op = raft::identity_op> void knn(raft::resources const& handle, std::vector<raft::device_matrix_view<const value_t, matrix_idx, index_layout>> index, raft::device_matrix_view<const value_t, matrix_idx, search_layout> search, raft::device_matrix_view<idx_t, matrix_idx, row_major> indices, raft::device_matrix_view<value_t, matrix_idx, row_major> distances, distance::DistanceType metric = distance::DistanceType::L2Unexpanded, std::optional<float> metric_arg = std::make_optional<float>(2.0f), std::optional<idx_t> global_id_offset = std::nullopt, epilogue_op distance_epilogue = raft::identity_op()) { RAFT_EXPECTS(index[0].extent(1) == search.extent(1), "Number of dimensions for both index and search matrices must be equal"); RAFT_EXPECTS(indices.extent(0) == distances.extent(0) && distances.extent(0) == search.extent(0), "Number of rows in output indices and distances matrices must equal number of rows " "in search matrix."); RAFT_EXPECTS(indices.extent(1) == distances.extent(1) && distances.extent(1), "Number of columns in output indices and distances matrices must the same"); bool rowMajorIndex = std::is_same_v<index_layout, layout_c_contiguous>; bool rowMajorQuery = std::is_same_v<search_layout, layout_c_contiguous>; std::vector<value_t*> inputs; std::vector<matrix_idx> sizes; for (std::size_t i = 0; i < index.size(); ++i) { inputs.push_back(const_cast<value_t*>(index[i].data_handle())); sizes.push_back(index[i].extent(0)); } std::vector<idx_t> trans; if (global_id_offset.has_value()) { trans.push_back(global_id_offset.value()); } std::vector<idx_t>* trans_arg = global_id_offset.has_value() ? &trans : nullptr; raft::neighbors::detail::brute_force_knn_impl(handle, inputs, sizes, index[0].extent(1), // TODO: This is unfortunate. Need to fix. const_cast<value_t*>(search.data_handle()), search.extent(0), indices.data_handle(), distances.data_handle(), indices.extent(1), rowMajorIndex, rowMajorQuery, trans_arg, metric, metric_arg.value_or(2.0f), distance_epilogue); } /** * @brief Compute the k-nearest neighbors using L2 expanded/unexpanded distance. * * This is a specialized function for fusing the k-selection with the distance * computation when k < 64. The value of k will be inferred from the number * of columns in the output matrices. * * Usage example: * @code{.cpp} * #include <raft/core/resources.hpp> * #include <raft/neighbors/brute_force.cuh> * #include <raft/distance/distance_types.hpp> * using namespace raft::neighbors; * * raft::resources handle; * ... * auto metric = raft::distance::DistanceType::L2SqrtExpanded; * brute_force::fused_l2_knn(handle, index, search, indices, distances, metric); * @endcode * @tparam value_t type of values * @tparam idx_t type of indices * @tparam idx_layout layout type of index matrix * @tparam query_layout layout type of query matrix * @param[in] handle raft handle for sharing expensive resources * @param[in] index input index array on device (size m * d) * @param[in] query input query array on device (size n * d) * @param[out] out_inds output indices array on device (size n * k) * @param[out] out_dists output dists array on device (size n * k) * @param[in] metric type of distance computation to perform (must be a variant of L2) */ template <typename value_t, typename idx_t, typename idx_layout, typename query_layout> void fused_l2_knn(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, idx_layout> index, raft::device_matrix_view<const value_t, idx_t, query_layout> query, raft::device_matrix_view<idx_t, idx_t, row_major> out_inds, raft::device_matrix_view<value_t, idx_t, row_major> out_dists, raft::distance::DistanceType metric) { int k = static_cast<int>(out_inds.extent(1)); RAFT_EXPECTS(k <= 64, "For fused k-selection, k must be < 64"); RAFT_EXPECTS(out_inds.extent(1) == out_dists.extent(1), "Value of k must match for outputs"); RAFT_EXPECTS(index.extent(1) == query.extent(1), "Number of columns in input matrices must be the same."); RAFT_EXPECTS(metric == distance::DistanceType::L2Expanded || metric == distance::DistanceType::L2Unexpanded || metric == distance::DistanceType::L2SqrtUnexpanded || metric == distance::DistanceType::L2SqrtExpanded, "Distance metric must be L2"); size_t n_index_rows = index.extent(0); size_t n_query_rows = query.extent(0); size_t D = index.extent(1); RAFT_EXPECTS(raft::is_row_or_column_major(index), "Index must be row or column major layout"); RAFT_EXPECTS(raft::is_row_or_column_major(query), "Query must be row or column major layout"); const bool rowMajorIndex = raft::is_row_major(index); const bool rowMajorQuery = raft::is_row_major(query); raft::spatial::knn::detail::fusedL2Knn(D, out_inds.data_handle(), out_dists.data_handle(), index.data_handle(), query.data_handle(), n_index_rows, n_query_rows, k, rowMajorIndex, rowMajorQuery, resource::get_cuda_stream(handle), metric); } /** * @brief Build the index from the dataset for efficient search. * * @tparam T data element type * * @param[in] res * @param[in] dataset a matrix view (host or device) to a row-major matrix [n_rows, dim] * @param[in] metric: distance metric to use. Euclidean (L2) is used by default * @param[in] metric_arg: the value of `p` for Minkowski (l-p) distances. This * is ignored if the metric_type is not Minkowski. * * @return the constructed brute force index */ template <typename T, typename Accessor> index<T> build(raft::resources const& res, mdspan<const T, matrix_extent<int64_t>, row_major, Accessor> dataset, raft::distance::DistanceType metric = distance::DistanceType::L2Unexpanded, T metric_arg = 0.0) { // certain distance metrics can benefit by pre-calculating the norms for the index dataset // which lets us avoid calculating these at query time std::optional<device_vector<T, int64_t>> norms; if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded || metric == raft::distance::DistanceType::CosineExpanded) { norms = make_device_vector<T, int64_t>(res, dataset.extent(0)); // cosine needs the l2norm, where as l2 distances needs the squared norm if (metric == raft::distance::DistanceType::CosineExpanded) { raft::linalg::norm(res, dataset, norms->view(), raft::linalg::NormType::L2Norm, raft::linalg::Apply::ALONG_ROWS, raft::sqrt_op{}); } else { raft::linalg::norm(res, dataset, norms->view(), raft::linalg::NormType::L2Norm, raft::linalg::Apply::ALONG_ROWS); } } return index<T>(res, dataset, std::move(norms), metric, metric_arg); } /** * @brief Brute Force search using the constructed index. * * @tparam T data element type * @tparam IdxT type of the indices * * @param[in] res raft resources * @param[in] idx brute force index * @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()] * @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset * [n_queries, k] * @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries, * k] */ template <typename T, typename IdxT> void search(raft::resources const& res, const index<T>& idx, raft::device_matrix_view<const T, int64_t, row_major> queries, raft::device_matrix_view<IdxT, int64_t, row_major> neighbors, raft::device_matrix_view<T, int64_t, row_major> distances) { raft::neighbors::detail::brute_force_search<T, IdxT>(res, idx, queries, neighbors, distances); } /** @} */ // end group brute_force_knn } // namespace raft::neighbors::brute_force
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/refine_host-inl.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/host_mdspan.hpp> #include <raft/core/nvtx.hpp> #include <raft/neighbors/detail/refine_common.hpp> #include <algorithm> #include <omp.h> namespace raft::neighbors::detail { template <typename DC, typename IdxT, typename DataT, typename DistanceT, typename ExtentsT> [[gnu::optimize(3), gnu::optimize("tree-vectorize")]] void refine_host_impl( raft::host_matrix_view<const DataT, ExtentsT, row_major> dataset, raft::host_matrix_view<const DataT, ExtentsT, row_major> queries, raft::host_matrix_view<const IdxT, ExtentsT, row_major> neighbor_candidates, raft::host_matrix_view<IdxT, ExtentsT, row_major> indices, raft::host_matrix_view<DistanceT, ExtentsT, row_major> distances) { size_t n_queries = queries.extent(0); size_t n_rows = dataset.extent(0); size_t dim = dataset.extent(1); size_t orig_k = neighbor_candidates.extent(1); size_t refined_k = indices.extent(1); common::nvtx::range<common::nvtx::domain::raft> fun_scope( "neighbors::refine_host(%zu, %zu -> %zu)", n_queries, orig_k, refined_k); auto suggested_n_threads = std::max(1, std::min(omp_get_num_procs(), omp_get_max_threads())); if (size_t(suggested_n_threads) > n_queries) { suggested_n_threads = n_queries; } #pragma omp parallel num_threads(suggested_n_threads) { std::vector<std::tuple<DistanceT, IdxT>> refined_pairs(orig_k); for (size_t i = omp_get_thread_num(); i < n_queries; i += omp_get_num_threads()) { // Compute the refined distance using original dataset vectors const DataT* query = queries.data_handle() + dim * i; for (size_t j = 0; j < orig_k; j++) { IdxT id = neighbor_candidates(i, j); DistanceT distance = 0.0; if (static_cast<size_t>(id) >= n_rows) { distance = std::numeric_limits<DistanceT>::max(); } else { const DataT* row = dataset.data_handle() + dim * id; for (size_t k = 0; k < dim; k++) { distance += DC::template eval<DistanceT>(query[k], row[k]); } } refined_pairs[j] = std::make_tuple(distance, id); } // Sort the query neighbors by their refined distances std::sort(refined_pairs.begin(), refined_pairs.end()); // Store first refined_k neighbors for (size_t j = 0; j < refined_k; j++) { indices(i, j) = std::get<1>(refined_pairs[j]); if (distances.data_handle() != nullptr) { distances(i, j) = DC::template postprocess(std::get<0>(refined_pairs[j])); } } } } } struct distance_comp_l2 { template <typename DistanceT> static inline auto eval(const DistanceT& a, const DistanceT& b) -> DistanceT { auto d = a - b; return d * d; } template <typename DistanceT> static inline auto postprocess(const DistanceT& a) -> DistanceT { return a; } }; struct distance_comp_inner { template <typename DistanceT> static inline auto eval(const DistanceT& a, const DistanceT& b) -> DistanceT { return -a * b; } template <typename DistanceT> static inline auto postprocess(const DistanceT& a) -> DistanceT { return -a; } }; /** * Naive CPU implementation of refine operation * * All pointers are expected to be accessible on the host. */ template <typename IdxT, typename DataT, typename DistanceT, typename ExtentsT> [[gnu::optimize(3), gnu::optimize("tree-vectorize")]] void refine_host( raft::host_matrix_view<const DataT, ExtentsT, row_major> dataset, raft::host_matrix_view<const DataT, ExtentsT, row_major> queries, raft::host_matrix_view<const IdxT, ExtentsT, row_major> neighbor_candidates, raft::host_matrix_view<IdxT, ExtentsT, row_major> indices, raft::host_matrix_view<DistanceT, ExtentsT, row_major> distances, distance::DistanceType metric = distance::DistanceType::L2Unexpanded) { refine_check_input(dataset.extents(), queries.extents(), neighbor_candidates.extents(), indices.extents(), distances.extents(), metric); switch (metric) { case raft::distance::DistanceType::L2Expanded: return refine_host_impl<distance_comp_l2>( dataset, queries, neighbor_candidates, indices, distances); case raft::distance::DistanceType::InnerProduct: return refine_host_impl<distance_comp_inner>( dataset, queries, neighbor_candidates, indices, distances); default: throw raft::logic_error("Unsupported metric"); } } } // namespace raft::neighbors::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/nn_descent.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime.h> #include <mma.h> #include <omp.h> #include <cub/cub.cuh> #include <limits> #include <queue> #include <random> #include <rmm/device_uvector.hpp> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/mr/allocator.h> #include <thrust/mr/device_memory_resource.h> #include "../nn_descent_types.hpp" #include <raft/core/device_mdarray.hpp> #include <raft/core/error.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/neighbors/detail/cagra/device_common.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/util/arch.cuh> // raft::util::arch::SM_* #include <raft/util/cuda_dev_essentials.cuh> #include <raft/util/cuda_rt_essentials.hpp> #include <raft/util/cudart_utils.hpp> #include <raft/util/pow2_utils.cuh> namespace raft::neighbors::experimental::nn_descent::detail { using pinned_memory_resource = thrust::universal_host_pinned_memory_resource; template <typename T> using pinned_memory_allocator = thrust::mr::stateless_resource_allocator<T, pinned_memory_resource>; using DistData_t = float; constexpr int DEGREE_ON_DEVICE{32}; constexpr int SEGMENT_SIZE{32}; constexpr int counter_interval{100}; template <typename Index_t> struct InternalID_t; // InternalID_t uses 1 bit for marking (new or old). template <> class InternalID_t<int> { private: using Index_t = int; Index_t id_{std::numeric_limits<Index_t>::max()}; public: __host__ __device__ bool is_new() const { return id_ >= 0; } __host__ __device__ Index_t& id_with_flag() { return id_; } __host__ __device__ Index_t id() const { if (is_new()) return id_; return -id_ - 1; } __host__ __device__ void mark_old() { if (id_ >= 0) id_ = -id_ - 1; } __host__ __device__ bool operator==(const InternalID_t<int>& other) const { return id() == other.id(); } }; template <typename Index_t> struct ResultItem; template <> class ResultItem<int> { private: using Index_t = int; Index_t id_; DistData_t dist_; public: __host__ __device__ ResultItem() : id_(std::numeric_limits<Index_t>::max()), dist_(std::numeric_limits<DistData_t>::max()){}; __host__ __device__ ResultItem(const Index_t id_with_flag, const DistData_t dist) : id_(id_with_flag), dist_(dist){}; __host__ __device__ bool is_new() const { return id_ >= 0; } __host__ __device__ Index_t& id_with_flag() { return id_; } __host__ __device__ Index_t id() const { if (is_new()) return id_; return -id_ - 1; } __host__ __device__ DistData_t& dist() { return dist_; } __host__ __device__ void mark_old() { if (id_ >= 0) id_ = -id_ - 1; } __host__ __device__ bool operator<(const ResultItem<Index_t>& other) const { if (dist_ == other.dist_) return id() < other.id(); return dist_ < other.dist_; } __host__ __device__ bool operator==(const ResultItem<Index_t>& other) const { return id() == other.id(); } __host__ __device__ bool operator>=(const ResultItem<Index_t>& other) const { return !(*this < other); } __host__ __device__ bool operator<=(const ResultItem<Index_t>& other) const { return (*this == other) || (*this < other); } __host__ __device__ bool operator>(const ResultItem<Index_t>& other) const { return !(*this <= other); } __host__ __device__ bool operator!=(const ResultItem<Index_t>& other) const { return !(*this == other); } }; using align32 = raft::Pow2<32>; template <typename T> int get_batch_size(const int it_now, const T nrow, const int batch_size) { int it_total = ceildiv(nrow, batch_size); return (it_now == it_total - 1) ? nrow - it_now * batch_size : batch_size; } // for avoiding bank conflict template <typename T> constexpr __host__ __device__ __forceinline__ int skew_dim(int ndim) { // all "4"s are for alignment if constexpr (std::is_same<T, float>::value) { ndim = ceildiv(ndim, 4) * 4; return ndim + (ndim % 32 == 0) * 4; } } template <typename T> __device__ __forceinline__ ResultItem<T> xor_swap(ResultItem<T> x, int mask, int dir) { ResultItem<T> y; y.dist() = __shfl_xor_sync(raft::warp_full_mask(), x.dist(), mask, raft::warp_size()); y.id_with_flag() = __shfl_xor_sync(raft::warp_full_mask(), x.id_with_flag(), mask, raft::warp_size()); return x < y == dir ? y : x; } __device__ __forceinline__ int xor_swap(int x, int mask, int dir) { int y = __shfl_xor_sync(raft::warp_full_mask(), x, mask, raft::warp_size()); return x < y == dir ? y : x; } // TODO: Move to RAFT utils https://github.com/rapidsai/raft/issues/1827 __device__ __forceinline__ uint bfe(uint lane_id, uint pos) { uint res; asm("bfe.u32 %0,%1,%2,%3;" : "=r"(res) : "r"(lane_id), "r"(pos), "r"(1)); return res; } template <typename T> __device__ __forceinline__ void warp_bitonic_sort(T* element_ptr, const int lane_id) { static_assert(raft::warp_size() == 32); auto& element = *element_ptr; element = xor_swap(element, 0x01, bfe(lane_id, 1) ^ bfe(lane_id, 0)); element = xor_swap(element, 0x02, bfe(lane_id, 2) ^ bfe(lane_id, 1)); element = xor_swap(element, 0x01, bfe(lane_id, 2) ^ bfe(lane_id, 0)); element = xor_swap(element, 0x04, bfe(lane_id, 3) ^ bfe(lane_id, 2)); element = xor_swap(element, 0x02, bfe(lane_id, 3) ^ bfe(lane_id, 1)); element = xor_swap(element, 0x01, bfe(lane_id, 3) ^ bfe(lane_id, 0)); element = xor_swap(element, 0x08, bfe(lane_id, 4) ^ bfe(lane_id, 3)); element = xor_swap(element, 0x04, bfe(lane_id, 4) ^ bfe(lane_id, 2)); element = xor_swap(element, 0x02, bfe(lane_id, 4) ^ bfe(lane_id, 1)); element = xor_swap(element, 0x01, bfe(lane_id, 4) ^ bfe(lane_id, 0)); element = xor_swap(element, 0x10, bfe(lane_id, 4)); element = xor_swap(element, 0x08, bfe(lane_id, 3)); element = xor_swap(element, 0x04, bfe(lane_id, 2)); element = xor_swap(element, 0x02, bfe(lane_id, 1)); element = xor_swap(element, 0x01, bfe(lane_id, 0)); return; } struct BuildConfig { size_t max_dataset_size; size_t dataset_dim; size_t node_degree{64}; size_t internal_node_degree{0}; // If internal_node_degree == 0, the value of node_degree will be assigned to it size_t max_iterations{50}; float termination_threshold{0.0001}; }; template <typename Index_t> class BloomFilter { public: BloomFilter(size_t nrow, size_t num_sets_per_list, size_t num_hashs) : nrow_(nrow), num_sets_per_list_(num_sets_per_list), num_hashs_(num_hashs), bitsets_(nrow * num_bits_per_set_ * num_sets_per_list) { } void add(size_t list_id, Index_t key) { if (is_cleared) { is_cleared = false; } uint32_t hash = hash_0(key); size_t global_set_idx = list_id * num_bits_per_set_ * num_sets_per_list_ + key % num_sets_per_list_ * num_bits_per_set_; bitsets_[global_set_idx + hash % num_bits_per_set_] = 1; for (size_t i = 1; i < num_hashs_; i++) { hash = hash + hash_1(key); bitsets_[global_set_idx + hash % num_bits_per_set_] = 1; } } bool check(size_t list_id, Index_t key) { bool is_present = true; uint32_t hash = hash_0(key); size_t global_set_idx = list_id * num_bits_per_set_ * num_sets_per_list_ + key % num_sets_per_list_ * num_bits_per_set_; is_present &= bitsets_[global_set_idx + hash % num_bits_per_set_]; if (!is_present) return false; for (size_t i = 1; i < num_hashs_; i++) { hash = hash + hash_1(key); is_present &= bitsets_[global_set_idx + hash % num_bits_per_set_]; if (!is_present) return false; } return true; } void clear() { if (is_cleared) return; #pragma omp parallel for for (size_t i = 0; i < nrow_ * num_bits_per_set_ * num_sets_per_list_; i++) { bitsets_[i] = 0; } is_cleared = true; } private: uint32_t hash_0(uint32_t value) { value *= 1103515245; value += 12345; value ^= value << 13; value ^= value >> 17; value ^= value << 5; return value; } uint32_t hash_1(uint32_t value) { value *= 1664525; value += 1013904223; value ^= value << 13; value ^= value >> 17; value ^= value << 5; return value; } static constexpr int num_bits_per_set_ = 512; bool is_cleared{true}; std::vector<bool> bitsets_; size_t nrow_; size_t num_sets_per_list_; size_t num_hashs_; }; template <typename Index_t> struct GnndGraph { static constexpr int segment_size = 32; InternalID_t<Index_t>* h_graph; size_t nrow; size_t node_degree; int num_samples; int num_segments; raft::host_matrix<DistData_t, size_t, raft::row_major> h_dists; thrust::host_vector<Index_t, pinned_memory_allocator<Index_t>> h_graph_new; thrust::host_vector<int2, pinned_memory_allocator<int2>> h_list_sizes_new; thrust::host_vector<Index_t, pinned_memory_allocator<Index_t>> h_graph_old; thrust::host_vector<int2, pinned_memory_allocator<int2>> h_list_sizes_old; BloomFilter<Index_t> bloom_filter; GnndGraph(const GnndGraph&) = delete; GnndGraph& operator=(const GnndGraph&) = delete; GnndGraph(const size_t nrow, const size_t node_degree, const size_t internal_node_degree, const size_t num_samples); void init_random_graph(); // TODO: Create a generic bloom filter utility https://github.com/rapidsai/raft/issues/1827 // Use Bloom filter to sample "new" neighbors for local joining void sample_graph_new(InternalID_t<Index_t>* new_neighbors, const size_t width); void sample_graph(bool sample_new); void update_graph(const InternalID_t<Index_t>* new_neighbors, const DistData_t* new_dists, const size_t width, std::atomic<int64_t>& update_counter); void sort_lists(); void clear(); ~GnndGraph(); }; template <typename Data_t = float, typename Index_t = int> class GNND { public: GNND(raft::resources const& res, const BuildConfig& build_config); GNND(const GNND&) = delete; GNND& operator=(const GNND&) = delete; void build(Data_t* data, const Index_t nrow, Index_t* output_graph); ~GNND() = default; using ID_t = InternalID_t<Index_t>; private: void add_reverse_edges(Index_t* graph_ptr, Index_t* h_rev_graph_ptr, Index_t* d_rev_graph_ptr, int2* list_sizes, cudaStream_t stream = 0); void local_join(cudaStream_t stream = 0); raft::resources const& res; BuildConfig build_config_; GnndGraph<Index_t> graph_; std::atomic<int64_t> update_counter_; size_t nrow_; size_t ndim_; raft::device_matrix<__half, size_t, raft::row_major> d_data_; raft::device_vector<DistData_t, size_t> l2_norms_; raft::device_matrix<ID_t, size_t, raft::row_major> graph_buffer_; raft::device_matrix<DistData_t, size_t, raft::row_major> dists_buffer_; // TODO: Investigate using RMM/RAFT types https://github.com/rapidsai/raft/issues/1827 thrust::host_vector<ID_t, pinned_memory_allocator<ID_t>> graph_host_buffer_; thrust::host_vector<DistData_t, pinned_memory_allocator<DistData_t>> dists_host_buffer_; raft::device_vector<int, size_t> d_locks_; thrust::host_vector<Index_t, pinned_memory_allocator<Index_t>> h_rev_graph_new_; thrust::host_vector<Index_t, pinned_memory_allocator<Index_t>> h_graph_old_; thrust::host_vector<Index_t, pinned_memory_allocator<Index_t>> h_rev_graph_old_; // int2.x is the number of forward edges, int2.y is the number of reverse edges raft::device_vector<int2, size_t> d_list_sizes_new_; raft::device_vector<int2, size_t> d_list_sizes_old_; }; constexpr int TILE_ROW_WIDTH = 64; constexpr int TILE_COL_WIDTH = 128; constexpr int NUM_SAMPLES = 32; // For now, the max. number of samples is 32, so the sample cache size is fixed // to 64 (32 * 2). constexpr int MAX_NUM_BI_SAMPLES = 64; constexpr int SKEWED_MAX_NUM_BI_SAMPLES = skew_dim<float>(MAX_NUM_BI_SAMPLES); constexpr int BLOCK_SIZE = 512; constexpr int WMMA_M = 16; constexpr int WMMA_N = 16; constexpr int WMMA_K = 16; template <typename Data_t> __device__ __forceinline__ void load_vec(Data_t* vec_buffer, const Data_t* d_vec, const int load_dims, const int padding_dims, const int lane_id) { if constexpr (std::is_same_v<Data_t, float> or std::is_same_v<Data_t, uint8_t> or std::is_same_v<Data_t, int8_t>) { constexpr int num_load_elems_per_warp = raft::warp_size(); for (int step = 0; step < ceildiv(padding_dims, num_load_elems_per_warp); step++) { int idx = step * num_load_elems_per_warp + lane_id; if (idx < load_dims) { vec_buffer[idx] = d_vec[idx]; } else if (idx < padding_dims) { vec_buffer[idx] = 0.0f; } } } if constexpr (std::is_same_v<Data_t, __half>) { if ((size_t)d_vec % sizeof(float2) == 0 && (size_t)vec_buffer % sizeof(float2) == 0 && load_dims % 4 == 0 && padding_dims % 4 == 0) { constexpr int num_load_elems_per_warp = raft::warp_size() * 4; #pragma unroll for (int step = 0; step < ceildiv(padding_dims, num_load_elems_per_warp); step++) { int idx_in_vec = step * num_load_elems_per_warp + lane_id * 4; if (idx_in_vec + 4 <= load_dims) { *(float2*)(vec_buffer + idx_in_vec) = *(float2*)(d_vec + idx_in_vec); } else if (idx_in_vec + 4 <= padding_dims) { *(float2*)(vec_buffer + idx_in_vec) = float2({0.0f, 0.0f}); } } } else { constexpr int num_load_elems_per_warp = raft::warp_size(); for (int step = 0; step < ceildiv(padding_dims, num_load_elems_per_warp); step++) { int idx = step * num_load_elems_per_warp + lane_id; if (idx < load_dims) { vec_buffer[idx] = d_vec[idx]; } else if (idx < padding_dims) { vec_buffer[idx] = 0.0f; } } } } } // TODO: Replace with RAFT utilities https://github.com/rapidsai/raft/issues/1827 /** Calculate L2 norm, and cast data to __half */ template <typename Data_t> RAFT_KERNEL preprocess_data_kernel(const Data_t* input_data, __half* output_data, int dim, DistData_t* l2_norms, size_t list_offset = 0) { extern __shared__ char buffer[]; __shared__ float l2_norm; Data_t* s_vec = (Data_t*)buffer; size_t list_id = list_offset + blockIdx.x; load_vec(s_vec, input_data + blockIdx.x * dim, dim, dim, threadIdx.x % raft::warp_size()); if (threadIdx.x == 0) { l2_norm = 0; } __syncthreads(); int lane_id = threadIdx.x % raft::warp_size(); for (int step = 0; step < ceildiv(dim, raft::warp_size()); step++) { int idx = step * raft::warp_size() + lane_id; float part_dist = 0; if (idx < dim) { part_dist = s_vec[idx]; part_dist = part_dist * part_dist; } __syncwarp(); for (int offset = raft::warp_size() >> 1; offset >= 1; offset >>= 1) { part_dist += __shfl_down_sync(raft::warp_full_mask(), part_dist, offset); } if (lane_id == 0) { l2_norm += part_dist; } __syncwarp(); } for (int step = 0; step < ceildiv(dim, raft::warp_size()); step++) { int idx = step * raft::warp_size() + threadIdx.x; if (idx < dim) { if (l2_norms == nullptr) { output_data[list_id * dim + idx] = (float)input_data[(size_t)blockIdx.x * dim + idx] / sqrt(l2_norm); } else { output_data[list_id * dim + idx] = input_data[(size_t)blockIdx.x * dim + idx]; if (idx == 0) { l2_norms[list_id] = l2_norm; } } } } } template <typename Index_t> RAFT_KERNEL add_rev_edges_kernel(const Index_t* graph, Index_t* rev_graph, int num_samples, int2* list_sizes) { size_t list_id = blockIdx.x; int2 list_size = list_sizes[list_id]; for (int idx = threadIdx.x; idx < list_size.x; idx += blockDim.x) { // each node has same number (num_samples) of forward and reverse edges size_t rev_list_id = graph[list_id * num_samples + idx]; // there are already num_samples forward edges int idx_in_rev_list = atomicAdd(&list_sizes[rev_list_id].y, 1); if (idx_in_rev_list >= num_samples) { atomicExch(&list_sizes[rev_list_id].y, num_samples); } else { rev_graph[rev_list_id * num_samples + idx_in_rev_list] = list_id; } } } template <typename Index_t, typename ID_t = InternalID_t<Index_t>> __device__ void insert_to_global_graph(ResultItem<Index_t> elem, size_t list_id, ID_t* graph, DistData_t* dists, int node_degree, int* locks) { int tx = threadIdx.x; int lane_id = tx % raft::warp_size(); size_t global_idx_base = list_id * node_degree; if (elem.id() == list_id) return; const int num_segments = ceildiv(node_degree, raft::warp_size()); int loop_flag = 0; do { int segment_id = elem.id() % num_segments; if (lane_id == 0) { loop_flag = atomicCAS(&locks[list_id * num_segments + segment_id], 0, 1) == 0; } loop_flag = __shfl_sync(raft::warp_full_mask(), loop_flag, 0); if (loop_flag == 1) { ResultItem<Index_t> knn_list_frag; int local_idx = segment_id * raft::warp_size() + lane_id; size_t global_idx = global_idx_base + local_idx; if (local_idx < node_degree) { knn_list_frag.id_with_flag() = graph[global_idx].id_with_flag(); knn_list_frag.dist() = dists[global_idx]; } int pos_to_insert = -1; ResultItem<Index_t> prev_elem; prev_elem.id_with_flag() = __shfl_up_sync(raft::warp_full_mask(), knn_list_frag.id_with_flag(), 1); prev_elem.dist() = __shfl_up_sync(raft::warp_full_mask(), knn_list_frag.dist(), 1); if (lane_id == 0) { prev_elem = ResultItem<Index_t>{std::numeric_limits<Index_t>::min(), std::numeric_limits<DistData_t>::lowest()}; } if (elem > prev_elem && elem < knn_list_frag) { pos_to_insert = segment_id * raft::warp_size() + lane_id; } else if (elem == prev_elem || elem == knn_list_frag) { pos_to_insert = -2; } uint mask = __ballot_sync(raft::warp_full_mask(), pos_to_insert >= 0); if (mask) { uint set_lane_id = __fns(mask, 0, 1); pos_to_insert = __shfl_sync(raft::warp_full_mask(), pos_to_insert, set_lane_id); } if (pos_to_insert >= 0) { int local_idx = segment_id * raft::warp_size() + lane_id; if (local_idx > pos_to_insert) { local_idx++; } else if (local_idx == pos_to_insert) { graph[global_idx_base + local_idx].id_with_flag() = elem.id_with_flag(); dists[global_idx_base + local_idx] = elem.dist(); local_idx++; } size_t global_pos = global_idx_base + local_idx; if (local_idx < (segment_id + 1) * raft::warp_size() && local_idx < node_degree) { graph[global_pos].id_with_flag() = knn_list_frag.id_with_flag(); dists[global_pos] = knn_list_frag.dist(); } } __threadfence(); if (loop_flag && lane_id == 0) { atomicExch(&locks[list_id * num_segments + segment_id], 0); } } } while (!loop_flag); } template <typename Index_t> __device__ ResultItem<Index_t> get_min_item(const Index_t id, const int idx_in_list, const Index_t* neighbs, const DistData_t* distances, const bool find_in_row = true) { int lane_id = threadIdx.x % raft::warp_size(); static_assert(MAX_NUM_BI_SAMPLES == 64); int idx[MAX_NUM_BI_SAMPLES / raft::warp_size()]; float dist[MAX_NUM_BI_SAMPLES / raft::warp_size()] = {std::numeric_limits<DistData_t>::max(), std::numeric_limits<DistData_t>::max()}; idx[0] = lane_id; idx[1] = raft::warp_size() + lane_id; if (neighbs[idx[0]] != id) { dist[0] = find_in_row ? distances[idx_in_list * SKEWED_MAX_NUM_BI_SAMPLES + lane_id] : distances[idx_in_list + lane_id * SKEWED_MAX_NUM_BI_SAMPLES]; } if (neighbs[idx[1]] != id) { dist[1] = find_in_row ? distances[idx_in_list * SKEWED_MAX_NUM_BI_SAMPLES + raft::warp_size() + lane_id] : distances[idx_in_list + (raft::warp_size() + lane_id) * SKEWED_MAX_NUM_BI_SAMPLES]; } if (dist[1] < dist[0]) { dist[0] = dist[1]; idx[0] = idx[1]; } __syncwarp(); for (int offset = raft::warp_size() >> 1; offset >= 1; offset >>= 1) { float other_idx = __shfl_down_sync(raft::warp_full_mask(), idx[0], offset); float other_dist = __shfl_down_sync(raft::warp_full_mask(), dist[0], offset); if (other_dist < dist[0]) { dist[0] = other_dist; idx[0] = other_idx; } } ResultItem<Index_t> result; result.dist() = __shfl_sync(raft::warp_full_mask(), dist[0], 0); result.id_with_flag() = neighbs[__shfl_sync(raft::warp_full_mask(), idx[0], 0)]; return result; } template <typename T> __device__ __forceinline__ void remove_duplicates( T* list_a, int list_a_size, T* list_b, int list_b_size, int& unique_counter, int execute_warp_id) { static_assert(raft::warp_size() == 32); if (!(threadIdx.x >= execute_warp_id * raft::warp_size() && threadIdx.x < execute_warp_id * raft::warp_size() + raft::warp_size())) { return; } int lane_id = threadIdx.x % raft::warp_size(); T elem = std::numeric_limits<T>::max(); if (lane_id < list_a_size) { elem = list_a[lane_id]; } warp_bitonic_sort(&elem, lane_id); if (elem != std::numeric_limits<T>::max()) { list_a[lane_id] = elem; } T elem_b = std::numeric_limits<T>::max(); if (lane_id < list_b_size) { elem_b = list_b[lane_id]; } __syncwarp(); int idx_l = 0; int idx_r = list_a_size; bool existed = false; while (idx_l < idx_r) { int idx = (idx_l + idx_r) / 2; int elem = list_a[idx]; if (elem == elem_b) { existed = true; break; } if (elem_b > elem) { idx_l = idx + 1; } else { idx_r = idx; } } if (!existed && elem_b != std::numeric_limits<T>::max()) { int idx = atomicAdd(&unique_counter, 1); list_a[list_a_size + idx] = elem_b; } } // launch_bounds here denote BLOCK_SIZE = 512 and MIN_BLOCKS_PER_SM = 4 // Per // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications, // MAX_RESIDENT_THREAD_PER_SM = BLOCK_SIZE * BLOCKS_PER_SM = 2048 // For architectures 750 and 860, the values for MAX_RESIDENT_THREAD_PER_SM // is 1024 and 1536 respectively, which means the bounds don't work anymore template <typename Index_t, typename ID_t = InternalID_t<Index_t>> RAFT_KERNEL #ifdef __CUDA_ARCH__ #if (__CUDA_ARCH__) == 750 || (__CUDA_ARCH__) == 860 __launch_bounds__(BLOCK_SIZE) #else __launch_bounds__(BLOCK_SIZE, 4) #endif #endif local_join_kernel(const Index_t* graph_new, const Index_t* rev_graph_new, const int2* sizes_new, const Index_t* graph_old, const Index_t* rev_graph_old, const int2* sizes_old, const int width, const __half* data, const int data_dim, ID_t* graph, DistData_t* dists, int graph_width, int* locks, DistData_t* l2_norms) { #if (__CUDA_ARCH__ >= 700) using namespace nvcuda; __shared__ int s_list[MAX_NUM_BI_SAMPLES * 2]; constexpr int APAD = 8; constexpr int BPAD = 8; __shared__ __half s_nv[MAX_NUM_BI_SAMPLES][TILE_COL_WIDTH + APAD]; // New vectors __shared__ __half s_ov[MAX_NUM_BI_SAMPLES][TILE_COL_WIDTH + BPAD]; // Old vectors static_assert(sizeof(float) * MAX_NUM_BI_SAMPLES * SKEWED_MAX_NUM_BI_SAMPLES <= sizeof(__half) * MAX_NUM_BI_SAMPLES * (TILE_COL_WIDTH + BPAD)); // s_distances: MAX_NUM_BI_SAMPLES x SKEWED_MAX_NUM_BI_SAMPLES, reuse the space of s_ov float* s_distances = (float*)&s_ov[0][0]; int* s_unique_counter = (int*)&s_ov[0][0]; if (threadIdx.x == 0) { s_unique_counter[0] = 0; s_unique_counter[1] = 0; } Index_t* new_neighbors = s_list; Index_t* old_neighbors = s_list + MAX_NUM_BI_SAMPLES; size_t list_id = blockIdx.x; int2 list_new_size2 = sizes_new[list_id]; int list_new_size = list_new_size2.x + list_new_size2.y; int2 list_old_size2 = sizes_old[list_id]; int list_old_size = list_old_size2.x + list_old_size2.y; if (!list_new_size) return; int tx = threadIdx.x; if (tx < list_new_size2.x) { new_neighbors[tx] = graph_new[list_id * width + tx]; } else if (tx >= list_new_size2.x && tx < list_new_size) { new_neighbors[tx] = rev_graph_new[list_id * width + tx - list_new_size2.x]; } if (tx < list_old_size2.x) { old_neighbors[tx] = graph_old[list_id * width + tx]; } else if (tx >= list_old_size2.x && tx < list_old_size) { old_neighbors[tx] = rev_graph_old[list_id * width + tx - list_old_size2.x]; } __syncthreads(); remove_duplicates(new_neighbors, list_new_size2.x, new_neighbors + list_new_size2.x, list_new_size2.y, s_unique_counter[0], 0); remove_duplicates(old_neighbors, list_old_size2.x, old_neighbors + list_old_size2.x, list_old_size2.y, s_unique_counter[1], 1); __syncthreads(); list_new_size = list_new_size2.x + s_unique_counter[0]; list_old_size = list_old_size2.x + s_unique_counter[1]; int warp_id = threadIdx.x / raft::warp_size(); int lane_id = threadIdx.x % raft::warp_size(); constexpr int num_warps = BLOCK_SIZE / raft::warp_size(); int warp_id_y = warp_id / 4; int warp_id_x = warp_id % 4; wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag; wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag; wmma::fill_fragment(c_frag, 0.0); for (int step = 0; step < ceildiv(data_dim, TILE_COL_WIDTH); step++) { int num_load_elems = (step == ceildiv(data_dim, TILE_COL_WIDTH) - 1) ? data_dim - step * TILE_COL_WIDTH : TILE_COL_WIDTH; #pragma unroll for (int i = 0; i < MAX_NUM_BI_SAMPLES / num_warps; i++) { int idx = i * num_warps + warp_id; if (idx < list_new_size) { size_t neighbor_id = new_neighbors[idx]; size_t idx_in_data = neighbor_id * data_dim; load_vec(s_nv[idx], data + idx_in_data + step * TILE_COL_WIDTH, num_load_elems, TILE_COL_WIDTH, lane_id); } } __syncthreads(); for (int i = 0; i < TILE_COL_WIDTH / WMMA_K; i++) { wmma::load_matrix_sync(a_frag, s_nv[warp_id_y * WMMA_M] + i * WMMA_K, TILE_COL_WIDTH + APAD); wmma::load_matrix_sync(b_frag, s_nv[warp_id_x * WMMA_N] + i * WMMA_K, TILE_COL_WIDTH + BPAD); wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); __syncthreads(); } } wmma::store_matrix_sync( s_distances + warp_id_y * WMMA_M * SKEWED_MAX_NUM_BI_SAMPLES + warp_id_x * WMMA_N, c_frag, SKEWED_MAX_NUM_BI_SAMPLES, wmma::mem_row_major); __syncthreads(); for (int i = threadIdx.x; i < MAX_NUM_BI_SAMPLES * SKEWED_MAX_NUM_BI_SAMPLES; i += blockDim.x) { if (i % SKEWED_MAX_NUM_BI_SAMPLES < list_new_size && i / SKEWED_MAX_NUM_BI_SAMPLES < list_new_size) { if (l2_norms == nullptr) { s_distances[i] = -s_distances[i]; } else { s_distances[i] = l2_norms[new_neighbors[i % SKEWED_MAX_NUM_BI_SAMPLES]] + l2_norms[new_neighbors[i / SKEWED_MAX_NUM_BI_SAMPLES]] - 2.0 * s_distances[i]; } } else { s_distances[i] = std::numeric_limits<float>::max(); } } __syncthreads(); for (int step = 0; step < ceildiv(list_new_size, num_warps); step++) { int idx_in_list = step * num_warps + tx / raft::warp_size(); if (idx_in_list >= list_new_size) continue; auto min_elem = get_min_item(s_list[idx_in_list], idx_in_list, new_neighbors, s_distances); if (min_elem.id() < gridDim.x) { insert_to_global_graph(min_elem, s_list[idx_in_list], graph, dists, graph_width, locks); } } if (!list_old_size) return; __syncthreads(); wmma::fill_fragment(c_frag, 0.0); for (int step = 0; step < ceildiv(data_dim, TILE_COL_WIDTH); step++) { int num_load_elems = (step == ceildiv(data_dim, TILE_COL_WIDTH) - 1) ? data_dim - step * TILE_COL_WIDTH : TILE_COL_WIDTH; if (TILE_COL_WIDTH < data_dim) { #pragma unroll for (int i = 0; i < MAX_NUM_BI_SAMPLES / num_warps; i++) { int idx = i * num_warps + warp_id; if (idx < list_new_size) { size_t neighbor_id = new_neighbors[idx]; size_t idx_in_data = neighbor_id * data_dim; load_vec(s_nv[idx], data + idx_in_data + step * TILE_COL_WIDTH, num_load_elems, TILE_COL_WIDTH, lane_id); } } } #pragma unroll for (int i = 0; i < MAX_NUM_BI_SAMPLES / num_warps; i++) { int idx = i * num_warps + warp_id; if (idx < list_old_size) { size_t neighbor_id = old_neighbors[idx]; size_t idx_in_data = neighbor_id * data_dim; load_vec(s_ov[idx], data + idx_in_data + step * TILE_COL_WIDTH, num_load_elems, TILE_COL_WIDTH, lane_id); } } __syncthreads(); for (int i = 0; i < TILE_COL_WIDTH / WMMA_K; i++) { wmma::load_matrix_sync(a_frag, s_nv[warp_id_y * WMMA_M] + i * WMMA_K, TILE_COL_WIDTH + APAD); wmma::load_matrix_sync(b_frag, s_ov[warp_id_x * WMMA_N] + i * WMMA_K, TILE_COL_WIDTH + BPAD); wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); __syncthreads(); } } wmma::store_matrix_sync( s_distances + warp_id_y * WMMA_M * SKEWED_MAX_NUM_BI_SAMPLES + warp_id_x * WMMA_N, c_frag, SKEWED_MAX_NUM_BI_SAMPLES, wmma::mem_row_major); __syncthreads(); for (int i = threadIdx.x; i < MAX_NUM_BI_SAMPLES * SKEWED_MAX_NUM_BI_SAMPLES; i += blockDim.x) { if (i % SKEWED_MAX_NUM_BI_SAMPLES < list_old_size && i / SKEWED_MAX_NUM_BI_SAMPLES < list_new_size) { if (l2_norms == nullptr) { s_distances[i] = -s_distances[i]; } else { s_distances[i] = l2_norms[old_neighbors[i % SKEWED_MAX_NUM_BI_SAMPLES]] + l2_norms[new_neighbors[i / SKEWED_MAX_NUM_BI_SAMPLES]] - 2.0 * s_distances[i]; } } else { s_distances[i] = std::numeric_limits<float>::max(); } } __syncthreads(); for (int step = 0; step < ceildiv(MAX_NUM_BI_SAMPLES * 2, num_warps); step++) { int idx_in_list = step * num_warps + tx / raft::warp_size(); if (idx_in_list >= list_new_size && idx_in_list < MAX_NUM_BI_SAMPLES) continue; if (idx_in_list >= MAX_NUM_BI_SAMPLES + list_old_size && idx_in_list < MAX_NUM_BI_SAMPLES * 2) continue; ResultItem<Index_t> min_elem{std::numeric_limits<Index_t>::max(), std::numeric_limits<DistData_t>::max()}; if (idx_in_list < MAX_NUM_BI_SAMPLES) { auto temp_min_item = get_min_item(s_list[idx_in_list], idx_in_list, old_neighbors, s_distances); if (temp_min_item.dist() < min_elem.dist()) { min_elem = temp_min_item; } } else { auto temp_min_item = get_min_item( s_list[idx_in_list], idx_in_list - MAX_NUM_BI_SAMPLES, new_neighbors, s_distances, false); if (temp_min_item.dist() < min_elem.dist()) { min_elem = temp_min_item; } } if (min_elem.id() < gridDim.x) { insert_to_global_graph(min_elem, s_list[idx_in_list], graph, dists, graph_width, locks); } } #endif } namespace { template <typename Index_t> int insert_to_ordered_list(InternalID_t<Index_t>* list, DistData_t* dist_list, const int width, const InternalID_t<Index_t> neighb_id, const DistData_t dist) { if (dist > dist_list[width - 1]) { return width; } int idx_insert = width; bool position_found = false; for (int i = 0; i < width; i++) { if (list[i].id() == neighb_id.id()) { return width; } if (!position_found && dist_list[i] > dist) { idx_insert = i; position_found = true; } } if (idx_insert == width) return idx_insert; memmove(list + idx_insert + 1, list + idx_insert, sizeof(*list) * (width - idx_insert - 1)); memmove(dist_list + idx_insert + 1, dist_list + idx_insert, sizeof(*dist_list) * (width - idx_insert - 1)); list[idx_insert] = neighb_id; dist_list[idx_insert] = dist; return idx_insert; }; } // namespace template <typename Index_t> GnndGraph<Index_t>::GnndGraph(const size_t nrow, const size_t node_degree, const size_t internal_node_degree, const size_t num_samples) : nrow(nrow), node_degree(node_degree), num_samples(num_samples), bloom_filter(nrow, internal_node_degree / segment_size, 3), h_dists{raft::make_host_matrix<DistData_t, size_t, raft::row_major>(nrow, node_degree)}, h_graph_new(nrow * num_samples), h_list_sizes_new(nrow), h_graph_old(nrow * num_samples), h_list_sizes_old{nrow} { // node_degree must be a multiple of segment_size; assert(node_degree % segment_size == 0); assert(internal_node_degree % segment_size == 0); num_segments = node_degree / segment_size; // To save the CPU memory, graph should be allocated by external function h_graph = nullptr; } // This is the only operation on the CPU that cannot be overlapped. // So it should be as fast as possible. template <typename Index_t> void GnndGraph<Index_t>::sample_graph_new(InternalID_t<Index_t>* new_neighbors, const size_t width) { #pragma omp parallel for for (size_t i = 0; i < nrow; i++) { auto list_new = h_graph_new.data() + i * num_samples; h_list_sizes_new[i].x = 0; h_list_sizes_new[i].y = 0; for (size_t j = 0; j < width; j++) { auto new_neighb_id = new_neighbors[i * width + j].id(); if ((size_t)new_neighb_id >= nrow) break; if (bloom_filter.check(i, new_neighb_id)) { continue; } bloom_filter.add(i, new_neighb_id); new_neighbors[i * width + j].mark_old(); list_new[h_list_sizes_new[i].x++] = new_neighb_id; if (h_list_sizes_new[i].x == num_samples) break; } } } template <typename Index_t> void GnndGraph<Index_t>::init_random_graph() { for (size_t seg_idx = 0; seg_idx < static_cast<size_t>(num_segments); seg_idx++) { // random sequence (range: 0~nrow) // segment_x stores neighbors which id % num_segments == x std::vector<Index_t> rand_seq(nrow / num_segments); std::iota(rand_seq.begin(), rand_seq.end(), 0); auto gen = std::default_random_engine{seg_idx}; std::shuffle(rand_seq.begin(), rand_seq.end(), gen); #pragma omp parallel for for (size_t i = 0; i < nrow; i++) { size_t base_idx = i * node_degree + seg_idx * segment_size; auto h_neighbor_list = h_graph + base_idx; auto h_dist_list = h_dists.data_handle() + base_idx; for (size_t j = 0; j < static_cast<size_t>(segment_size); j++) { size_t idx = base_idx + j; Index_t id = rand_seq[idx % rand_seq.size()] * num_segments + seg_idx; if ((size_t)id == i) { id = rand_seq[(idx + segment_size) % rand_seq.size()] * num_segments + seg_idx; } h_neighbor_list[j].id_with_flag() = id; h_dist_list[j] = std::numeric_limits<DistData_t>::max(); } } } } template <typename Index_t> void GnndGraph<Index_t>::sample_graph(bool sample_new) { #pragma omp parallel for for (size_t i = 0; i < nrow; i++) { h_list_sizes_old[i].x = 0; h_list_sizes_old[i].y = 0; h_list_sizes_new[i].x = 0; h_list_sizes_new[i].y = 0; auto list = h_graph + i * node_degree; auto list_old = h_graph_old.data() + i * num_samples; auto list_new = h_graph_new.data() + i * num_samples; for (int j = 0; j < segment_size; j++) { for (int k = 0; k < num_segments; k++) { auto neighbor = list[k * segment_size + j]; if ((size_t)neighbor.id() >= nrow) continue; if (!neighbor.is_new()) { if (h_list_sizes_old[i].x < num_samples) { list_old[h_list_sizes_old[i].x++] = neighbor.id(); } } else if (sample_new) { if (h_list_sizes_new[i].x < num_samples) { list[k * segment_size + j].mark_old(); list_new[h_list_sizes_new[i].x++] = neighbor.id(); } } if (h_list_sizes_old[i].x == num_samples && h_list_sizes_new[i].x == num_samples) { break; } } if (h_list_sizes_old[i].x == num_samples && h_list_sizes_new[i].x == num_samples) { break; } } } } template <typename Index_t> void GnndGraph<Index_t>::update_graph(const InternalID_t<Index_t>* new_neighbors, const DistData_t* new_dists, const size_t width, std::atomic<int64_t>& update_counter) { #pragma omp parallel for for (size_t i = 0; i < nrow; i++) { for (size_t j = 0; j < width; j++) { auto new_neighb_id = new_neighbors[i * width + j]; auto new_dist = new_dists[i * width + j]; if (new_dist == std::numeric_limits<DistData_t>::max()) break; if ((size_t)new_neighb_id.id() == i) continue; int seg_idx = new_neighb_id.id() % num_segments; auto list = h_graph + i * node_degree + seg_idx * segment_size; auto dist_list = h_dists.data_handle() + i * node_degree + seg_idx * segment_size; int insert_pos = insert_to_ordered_list(list, dist_list, segment_size, new_neighb_id, new_dist); if (i % counter_interval == 0 && insert_pos != segment_size) { update_counter++; } } } } template <typename Index_t> void GnndGraph<Index_t>::sort_lists() { #pragma omp parallel for for (size_t i = 0; i < nrow; i++) { std::vector<std::pair<DistData_t, Index_t>> new_list; for (size_t j = 0; j < node_degree; j++) { new_list.emplace_back(h_dists.data_handle()[i * node_degree + j], h_graph[i * node_degree + j].id()); } std::sort(new_list.begin(), new_list.end()); for (size_t j = 0; j < node_degree; j++) { h_graph[i * node_degree + j].id_with_flag() = new_list[j].second; h_dists.data_handle()[i * node_degree + j] = new_list[j].first; } } } template <typename Index_t> void GnndGraph<Index_t>::clear() { bloom_filter.clear(); } template <typename Index_t> GnndGraph<Index_t>::~GnndGraph() { assert(h_graph == nullptr); } template <typename Data_t, typename Index_t> GNND<Data_t, Index_t>::GNND(raft::resources const& res, const BuildConfig& build_config) : res(res), build_config_(build_config), graph_(build_config.max_dataset_size, align32::roundUp(build_config.node_degree), align32::roundUp(build_config.internal_node_degree ? build_config.internal_node_degree : build_config.node_degree), NUM_SAMPLES), nrow_(build_config.max_dataset_size), ndim_(build_config.dataset_dim), d_data_{raft::make_device_matrix<__half, size_t, raft::row_major>( res, nrow_, build_config.dataset_dim)}, l2_norms_{raft::make_device_vector<DistData_t, size_t>(res, nrow_)}, graph_buffer_{ raft::make_device_matrix<ID_t, size_t, raft::row_major>(res, nrow_, DEGREE_ON_DEVICE)}, dists_buffer_{ raft::make_device_matrix<DistData_t, size_t, raft::row_major>(res, nrow_, DEGREE_ON_DEVICE)}, graph_host_buffer_(nrow_ * DEGREE_ON_DEVICE), dists_host_buffer_(nrow_ * DEGREE_ON_DEVICE), d_locks_{raft::make_device_vector<int, size_t>(res, nrow_)}, h_rev_graph_new_(nrow_ * NUM_SAMPLES), h_graph_old_(nrow_ * NUM_SAMPLES), h_rev_graph_old_(nrow_ * NUM_SAMPLES), d_list_sizes_new_{raft::make_device_vector<int2, size_t>(res, nrow_)}, d_list_sizes_old_{raft::make_device_vector<int2, size_t>(res, nrow_)} { static_assert(NUM_SAMPLES <= 32); thrust::fill(thrust::device, dists_buffer_.data_handle(), dists_buffer_.data_handle() + dists_buffer_.size(), std::numeric_limits<float>::max()); thrust::fill(thrust::device, reinterpret_cast<Index_t*>(graph_buffer_.data_handle()), reinterpret_cast<Index_t*>(graph_buffer_.data_handle()) + graph_buffer_.size(), std::numeric_limits<Index_t>::max()); thrust::fill(thrust::device, d_locks_.data_handle(), d_locks_.data_handle() + d_locks_.size(), 0); }; template <typename Data_t, typename Index_t> void GNND<Data_t, Index_t>::add_reverse_edges(Index_t* graph_ptr, Index_t* h_rev_graph_ptr, Index_t* d_rev_graph_ptr, int2* list_sizes, cudaStream_t stream) { add_rev_edges_kernel<<<nrow_, raft::warp_size(), 0, stream>>>( graph_ptr, d_rev_graph_ptr, NUM_SAMPLES, list_sizes); raft::copy( h_rev_graph_ptr, d_rev_graph_ptr, nrow_ * NUM_SAMPLES, raft::resource::get_cuda_stream(res)); } template <typename Data_t, typename Index_t> void GNND<Data_t, Index_t>::local_join(cudaStream_t stream) { thrust::fill(thrust::device.on(stream), dists_buffer_.data_handle(), dists_buffer_.data_handle() + dists_buffer_.size(), std::numeric_limits<float>::max()); local_join_kernel<<<nrow_, BLOCK_SIZE, 0, stream>>>( thrust::raw_pointer_cast(graph_.h_graph_new.data()), thrust::raw_pointer_cast(h_rev_graph_new_.data()), d_list_sizes_new_.data_handle(), thrust::raw_pointer_cast(h_graph_old_.data()), thrust::raw_pointer_cast(h_rev_graph_old_.data()), d_list_sizes_old_.data_handle(), NUM_SAMPLES, d_data_.data_handle(), ndim_, graph_buffer_.data_handle(), dists_buffer_.data_handle(), DEGREE_ON_DEVICE, d_locks_.data_handle(), l2_norms_.data_handle()); } template <typename Data_t, typename Index_t> void GNND<Data_t, Index_t>::build(Data_t* data, const Index_t nrow, Index_t* output_graph) { using input_t = typename std::remove_const<Data_t>::type; cudaStream_t stream = raft::resource::get_cuda_stream(res); nrow_ = nrow; graph_.h_graph = (InternalID_t<Index_t>*)output_graph; cudaPointerAttributes data_ptr_attr; RAFT_CUDA_TRY(cudaPointerGetAttributes(&data_ptr_attr, data)); size_t batch_size = (data_ptr_attr.devicePointer == nullptr) ? 100000 : nrow_; raft::spatial::knn::detail::utils::batch_load_iterator vec_batches{ data, static_cast<size_t>(nrow_), build_config_.dataset_dim, batch_size, stream}; for (auto const& batch : vec_batches) { preprocess_data_kernel<<< batch.size(), raft::warp_size(), sizeof(Data_t) * ceildiv(build_config_.dataset_dim, static_cast<size_t>(raft::warp_size())) * raft::warp_size(), stream>>>(batch.data(), d_data_.data_handle(), build_config_.dataset_dim, l2_norms_.data_handle(), batch.offset()); } thrust::fill(thrust::device.on(stream), (Index_t*)graph_buffer_.data_handle(), (Index_t*)graph_buffer_.data_handle() + graph_buffer_.size(), std::numeric_limits<Index_t>::max()); graph_.clear(); graph_.init_random_graph(); graph_.sample_graph(true); auto update_and_sample = [&](bool update_graph) { if (update_graph) { update_counter_ = 0; graph_.update_graph(thrust::raw_pointer_cast(graph_host_buffer_.data()), thrust::raw_pointer_cast(dists_host_buffer_.data()), DEGREE_ON_DEVICE, update_counter_); if (update_counter_ < build_config_.termination_threshold * nrow_ * build_config_.dataset_dim / counter_interval) { update_counter_ = -1; } } graph_.sample_graph(false); }; for (size_t it = 0; it < build_config_.max_iterations; it++) { raft::copy(d_list_sizes_new_.data_handle(), thrust::raw_pointer_cast(graph_.h_list_sizes_new.data()), nrow_, raft::resource::get_cuda_stream(res)); raft::copy(thrust::raw_pointer_cast(h_graph_old_.data()), thrust::raw_pointer_cast(graph_.h_graph_old.data()), nrow_ * NUM_SAMPLES, raft::resource::get_cuda_stream(res)); raft::copy(d_list_sizes_old_.data_handle(), thrust::raw_pointer_cast(graph_.h_list_sizes_old.data()), nrow_, raft::resource::get_cuda_stream(res)); raft::resource::sync_stream(res); std::thread update_and_sample_thread(update_and_sample, it); RAFT_LOG_DEBUG("# GNND iteraton: %lu / %lu", it + 1, build_config_.max_iterations); // Reuse dists_buffer_ to save GPU memory. graph_buffer_ cannot be reused, because it // contains some information for local_join. static_assert(DEGREE_ON_DEVICE * sizeof(*(dists_buffer_.data_handle())) >= NUM_SAMPLES * sizeof(*(graph_buffer_.data_handle()))); add_reverse_edges(thrust::raw_pointer_cast(graph_.h_graph_new.data()), thrust::raw_pointer_cast(h_rev_graph_new_.data()), (Index_t*)dists_buffer_.data_handle(), d_list_sizes_new_.data_handle(), stream); add_reverse_edges(thrust::raw_pointer_cast(h_graph_old_.data()), thrust::raw_pointer_cast(h_rev_graph_old_.data()), (Index_t*)dists_buffer_.data_handle(), d_list_sizes_old_.data_handle(), stream); // Tensor operations from `mma.h` are guarded with archicteture // __CUDA_ARCH__ >= 700. Since RAFT supports compilation for ARCH 600, // we need to ensure that `local_join_kernel` (which uses tensor) operations // is not only not compiled, but also a runtime error is presented to the user auto kernel = preprocess_data_kernel<input_t>; void* kernel_ptr = reinterpret_cast<void*>(kernel); auto runtime_arch = raft::util::arch::kernel_virtual_arch(kernel_ptr); auto wmma_range = raft::util::arch::SM_range(raft::util::arch::SM_70(), raft::util::arch::SM_future()); if (wmma_range.contains(runtime_arch)) { local_join(stream); } else { THROW("NN_DESCENT cannot be run for __CUDA_ARCH__ < 700"); } update_and_sample_thread.join(); if (update_counter_ == -1) { break; } raft::copy(thrust::raw_pointer_cast(graph_host_buffer_.data()), graph_buffer_.data_handle(), nrow_ * DEGREE_ON_DEVICE, raft::resource::get_cuda_stream(res)); raft::resource::sync_stream(res); raft::copy(thrust::raw_pointer_cast(dists_host_buffer_.data()), dists_buffer_.data_handle(), nrow_ * DEGREE_ON_DEVICE, raft::resource::get_cuda_stream(res)); graph_.sample_graph_new(thrust::raw_pointer_cast(graph_host_buffer_.data()), DEGREE_ON_DEVICE); } graph_.update_graph(thrust::raw_pointer_cast(graph_host_buffer_.data()), thrust::raw_pointer_cast(dists_host_buffer_.data()), DEGREE_ON_DEVICE, update_counter_); raft::resource::sync_stream(res); graph_.sort_lists(); // Reuse graph_.h_dists as the buffer for shrink the lists in graph static_assert(sizeof(decltype(*(graph_.h_dists.data_handle()))) >= sizeof(Index_t)); Index_t* graph_shrink_buffer = (Index_t*)graph_.h_dists.data_handle(); #pragma omp parallel for for (size_t i = 0; i < (size_t)nrow_; i++) { for (size_t j = 0; j < build_config_.node_degree; j++) { size_t idx = i * graph_.node_degree + j; int id = graph_.h_graph[idx].id(); if (id < static_cast<int>(nrow_)) { graph_shrink_buffer[i * build_config_.node_degree + j] = id; } else { graph_shrink_buffer[i * build_config_.node_degree + j] = raft::neighbors::cagra::detail::device::xorshift64(idx) % nrow_; } } } graph_.h_graph = nullptr; #pragma omp parallel for for (size_t i = 0; i < (size_t)nrow_; i++) { for (size_t j = 0; j < build_config_.node_degree; j++) { output_graph[i * build_config_.node_degree + j] = graph_shrink_buffer[i * build_config_.node_degree + j]; } } } template <typename T, typename IdxT = uint32_t, typename Accessor = host_device_accessor<std::experimental::default_accessor<T>, memory_type::host>> void build(raft::resources const& res, const index_params& params, mdspan<const T, matrix_extent<int64_t>, row_major, Accessor> dataset, index<IdxT>& idx) { RAFT_EXPECTS(dataset.extent(0) < std::numeric_limits<int>::max() - 1, "The dataset size for GNND should be less than %d", std::numeric_limits<int>::max() - 1); size_t intermediate_degree = params.intermediate_graph_degree; size_t graph_degree = params.graph_degree; if (intermediate_degree >= static_cast<size_t>(dataset.extent(0))) { RAFT_LOG_WARN( "Intermediate graph degree cannot be larger than dataset size, reducing it to %lu", dataset.extent(0)); intermediate_degree = dataset.extent(0) - 1; } if (intermediate_degree < graph_degree) { RAFT_LOG_WARN( "Graph degree (%lu) cannot be larger than intermediate graph degree (%lu), reducing " "graph_degree.", graph_degree, intermediate_degree); graph_degree = intermediate_degree; } // The elements in each knn-list are partitioned into different buckets, and we need more buckets // to mitigate bucket collisions. `intermediate_degree` is OK to larger than // extended_graph_degree. size_t extended_graph_degree = align32::roundUp(static_cast<size_t>(graph_degree * (graph_degree <= 32 ? 1.0 : 1.3))); size_t extended_intermediate_degree = align32::roundUp( static_cast<size_t>(intermediate_degree * (intermediate_degree <= 32 ? 1.0 : 1.3))); auto int_graph = raft::make_host_matrix<int, int64_t, row_major>( dataset.extent(0), static_cast<int64_t>(extended_graph_degree)); BuildConfig build_config{.max_dataset_size = static_cast<size_t>(dataset.extent(0)), .dataset_dim = static_cast<size_t>(dataset.extent(1)), .node_degree = extended_graph_degree, .internal_node_degree = extended_intermediate_degree, .max_iterations = params.max_iterations, .termination_threshold = params.termination_threshold}; GNND<const T, int> nnd(res, build_config); nnd.build(dataset.data_handle(), dataset.extent(0), int_graph.data_handle()); #pragma omp parallel for for (size_t i = 0; i < static_cast<size_t>(dataset.extent(0)); i++) { for (size_t j = 0; j < graph_degree; j++) { auto graph = idx.graph().data_handle(); graph[i * graph_degree + j] = int_graph.data_handle()[i * extended_graph_degree + j]; } } } template <typename T, typename IdxT = uint32_t, typename Accessor = host_device_accessor<std::experimental::default_accessor<T>, memory_type::host>> index<IdxT> build(raft::resources const& res, const index_params& params, mdspan<const T, matrix_extent<int64_t>, row_major, Accessor> dataset) { size_t intermediate_degree = params.intermediate_graph_degree; size_t graph_degree = params.graph_degree; if (intermediate_degree < graph_degree) { RAFT_LOG_WARN( "Graph degree (%lu) cannot be larger than intermediate graph degree (%lu), reducing " "graph_degree.", graph_degree, intermediate_degree); graph_degree = intermediate_degree; } index<IdxT> idx{res, dataset.extent(0), static_cast<int64_t>(graph_degree)}; build(res, params, dataset, idx); return idx; } } // namespace raft::neighbors::experimental::nn_descent::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/knn_brute_force.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/cuda_stream_pool.hpp> #include <raft/core/resource/device_memory_resource.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/cuda_stream_pool.hpp> #include <rmm/device_uvector.hpp> #include <cstdint> #include <iostream> #include <raft/core/resources.hpp> #include <raft/distance/detail/distance_ops/l2_exp.cuh> #include <raft/distance/distance.cuh> #include <raft/distance/distance_types.hpp> #include <raft/linalg/map.cuh> #include <raft/linalg/transpose.cuh> #include <raft/matrix/init.cuh> #include <raft/matrix/select_k.cuh> #include <raft/neighbors/brute_force_types.hpp> #include <raft/neighbors/detail/faiss_select/DistanceUtils.h> #include <raft/neighbors/detail/knn_merge_parts.cuh> #include <raft/spatial/knn/detail/fused_l2_knn.cuh> #include <raft/spatial/knn/detail/haversine_distance.cuh> #include <raft/spatial/knn/detail/processing.cuh> #include <set> #include <thrust/iterator/transform_iterator.h> namespace raft::neighbors::detail { using namespace raft::spatial::knn::detail; using namespace raft::spatial::knn; /** * Calculates brute force knn, using a fixed memory budget * by tiling over both the rows and columns of pairwise_distances */ template <typename ElementType = float, typename IndexType = int64_t, typename DistanceEpilogue = raft::identity_op> void tiled_brute_force_knn(const raft::resources& handle, const ElementType* search, // size (m ,d) const ElementType* index, // size (n ,d) size_t m, size_t n, size_t d, size_t k, ElementType* distances, // size (m, k) IndexType* indices, // size (m, k) raft::distance::DistanceType metric, float metric_arg = 2.0, size_t max_row_tile_size = 0, size_t max_col_tile_size = 0, DistanceEpilogue distance_epilogue = raft::identity_op(), const ElementType* precomputed_index_norms = nullptr, const ElementType* precomputed_search_norms = nullptr) { // Figure out the number of rows/cols to tile for size_t tile_rows = 0; size_t tile_cols = 0; auto stream = resource::get_cuda_stream(handle); auto device_memory = resource::get_workspace_resource(handle); auto total_mem = device_memory->get_mem_info(stream).second; faiss_select::chooseTileSize(m, n, d, sizeof(ElementType), total_mem, tile_rows, tile_cols); // for unittesting, its convenient to be able to put a max size on the tiles // so we can test the tiling logic without having to use huge inputs. if (max_row_tile_size && (tile_rows > max_row_tile_size)) { tile_rows = max_row_tile_size; } if (max_col_tile_size && (tile_cols > max_col_tile_size)) { tile_cols = max_col_tile_size; } // tile_cols must be at least k items tile_cols = std::max(tile_cols, k); // stores pairwise distances for the current tile rmm::device_uvector<ElementType> temp_distances(tile_rows * tile_cols, stream); // calculate norms for L2 expanded distances - this lets us avoid calculating // norms repeatedly per-tile, and just do once for the entire input auto pairwise_metric = metric; rmm::device_uvector<ElementType> search_norms(0, stream); rmm::device_uvector<ElementType> index_norms(0, stream); if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded || metric == raft::distance::DistanceType::CosineExpanded) { if (!precomputed_search_norms) { search_norms.resize(m, stream); } if (!precomputed_index_norms) { index_norms.resize(n, stream); } // cosine needs the l2norm, where as l2 distances needs the squared norm if (metric == raft::distance::DistanceType::CosineExpanded) { if (!precomputed_search_norms) { raft::linalg::rowNorm(search_norms.data(), search, d, m, raft::linalg::NormType::L2Norm, true, stream, raft::sqrt_op{}); } if (!precomputed_index_norms) { raft::linalg::rowNorm(index_norms.data(), index, d, n, raft::linalg::NormType::L2Norm, true, stream, raft::sqrt_op{}); } } else { if (!precomputed_search_norms) { raft::linalg::rowNorm( search_norms.data(), search, d, m, raft::linalg::NormType::L2Norm, true, stream); } if (!precomputed_index_norms) { raft::linalg::rowNorm( index_norms.data(), index, d, n, raft::linalg::NormType::L2Norm, true, stream); } } pairwise_metric = raft::distance::DistanceType::InnerProduct; } // if we're tiling over columns, we need additional buffers for temporary output // distances/indices size_t num_col_tiles = raft::ceildiv(n, tile_cols); size_t temp_out_cols = k * num_col_tiles; // the final column tile could have less than 'k' items in it // in which case the number of columns here is too high in the temp output. // adjust if necessary auto last_col_tile_size = n % tile_cols; if (last_col_tile_size && (last_col_tile_size < k)) { temp_out_cols -= k - last_col_tile_size; } // if we have less than k items in the index, we should fill out the result // to indicate that we are missing items (and match behaviour in faiss) if (n < k) { raft::matrix::fill(handle, raft::make_device_matrix_view(distances, m, k), std::numeric_limits<ElementType>::lowest()); if constexpr (std::is_signed_v<IndexType>) { raft::matrix::fill(handle, raft::make_device_matrix_view(indices, m, k), IndexType{-1}); } } rmm::device_uvector<ElementType> temp_out_distances(tile_rows * temp_out_cols, stream); rmm::device_uvector<IndexType> temp_out_indices(tile_rows * temp_out_cols, stream); bool select_min = raft::distance::is_min_close(metric); for (size_t i = 0; i < m; i += tile_rows) { size_t current_query_size = std::min(tile_rows, m - i); for (size_t j = 0; j < n; j += tile_cols) { size_t current_centroid_size = std::min(tile_cols, n - j); size_t current_k = std::min(current_centroid_size, k); // calculate the top-k elements for the current tile, by calculating the // full pairwise distance for the tile - and then selecting the top-k from that // note: we're using a int32 IndexType here on purpose in order to // use the pairwise_distance instantiations. Since the tile size will ensure // that the total memory is < 1GB per tile, this will not cause any issues distance::pairwise_distance<ElementType, int>(handle, search + i * d, index + j * d, temp_distances.data(), current_query_size, current_centroid_size, d, pairwise_metric, true, metric_arg); if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded) { auto row_norms = precomputed_search_norms ? precomputed_search_norms : search_norms.data(); auto col_norms = precomputed_index_norms ? precomputed_index_norms : index_norms.data(); auto dist = temp_distances.data(); bool sqrt = metric == raft::distance::DistanceType::L2SqrtExpanded; raft::linalg::map_offset( handle, raft::make_device_vector_view(dist, current_query_size * current_centroid_size), [=] __device__(IndexType idx) { IndexType row = i + (idx / current_centroid_size); IndexType col = j + (idx % current_centroid_size); raft::distance::detail::ops::l2_exp_cutlass_op<ElementType, ElementType> l2_op(sqrt); auto val = l2_op(row_norms[row], col_norms[col], dist[idx]); return distance_epilogue(val, row, col); }); } else if (metric == raft::distance::DistanceType::CosineExpanded) { auto row_norms = precomputed_search_norms ? precomputed_search_norms : search_norms.data(); auto col_norms = precomputed_index_norms ? precomputed_index_norms : index_norms.data(); auto dist = temp_distances.data(); raft::linalg::map_offset( handle, raft::make_device_vector_view(dist, current_query_size * current_centroid_size), [=] __device__(IndexType idx) { IndexType row = i + (idx / current_centroid_size); IndexType col = j + (idx % current_centroid_size); auto val = 1.0 - dist[idx] / (row_norms[row] * col_norms[col]); val = distance_epilogue(val, row, col); return val; }); } else { // if we're not l2 distance, and we have a distance epilogue - run it now if constexpr (!std::is_same_v<DistanceEpilogue, raft::identity_op>) { auto distances_ptr = temp_distances.data(); raft::linalg::map_offset( handle, raft::make_device_vector_view(temp_distances.data(), current_query_size * current_centroid_size), [=] __device__(size_t idx) { IndexType row = i + (idx / current_centroid_size); IndexType col = j + (idx % current_centroid_size); return distance_epilogue(distances_ptr[idx], row, col); }); } } matrix::select_k<ElementType, IndexType>( handle, raft::make_device_matrix_view<const ElementType, int64_t, row_major>( temp_distances.data(), current_query_size, current_centroid_size), std::nullopt, raft::make_device_matrix_view<ElementType, int64_t, row_major>( distances + i * k, current_query_size, current_k), raft::make_device_matrix_view<IndexType, int64_t, row_major>( indices + i * k, current_query_size, current_k), select_min, true); // if we're tiling over columns, we need to do a couple things to fix up // the output of select_k // 1. The column id's in the output are relative to the tile, so we need // to adjust the column ids by adding the column the tile starts at (j) // 2. select_k writes out output in a row-major format, which means we // can't just concat the output of all the tiles and do a select_k on the // concatenation. // Fix both of these problems in a single pass here if (tile_cols != n) { const ElementType* in_distances = distances + i * k; const IndexType* in_indices = indices + i * k; ElementType* out_distances = temp_out_distances.data(); IndexType* out_indices = temp_out_indices.data(); auto count = thrust::make_counting_iterator<IndexType>(0); thrust::for_each(resource::get_thrust_policy(handle), count, count + current_query_size * current_k, [=] __device__(IndexType i) { IndexType row = i / current_k, col = i % current_k; IndexType out_index = row * temp_out_cols + j * k / tile_cols + col; out_distances[out_index] = in_distances[i]; out_indices[out_index] = in_indices[i] + j; }); } } if (tile_cols != n) { // select the actual top-k items here from the temporary output matrix::select_k<ElementType, IndexType>( handle, raft::make_device_matrix_view<const ElementType, int64_t, row_major>( temp_out_distances.data(), current_query_size, temp_out_cols), raft::make_device_matrix_view<const IndexType, int64_t, row_major>( temp_out_indices.data(), current_query_size, temp_out_cols), raft::make_device_matrix_view<ElementType, int64_t, row_major>( distances + i * k, current_query_size, k), raft::make_device_matrix_view<IndexType, int64_t, row_major>( indices + i * k, current_query_size, k), select_min, true); } } } /** * Search the kNN for the k-nearest neighbors of a set of query vectors * @param[in] input vector of device device memory array pointers to search * @param[in] sizes vector of memory sizes for each device array pointer in input * @param[in] D number of cols in input and search_items * @param[in] search_items set of vectors to query for neighbors * @param[in] n number of items in search_items * @param[out] res_I pointer to device memory for returning k nearest indices * @param[out] res_D pointer to device memory for returning k nearest distances * @param[in] k number of neighbors to query * @param[in] userStream the main cuda stream to use * @param[in] internalStreams optional when n_params > 0, the index partitions can be * queried in parallel using these streams. Note that n_int_streams also * has to be > 0 for these to be used and their cardinality does not need * to correspond to n_parts. * @param[in] n_int_streams size of internalStreams. When this is <= 0, only the * user stream will be used. * @param[in] rowMajorIndex are the index arrays in row-major layout? * @param[in] rowMajorQuery are the query array in row-major layout? * @param[in] translations translation ids for indices when index rows represent * non-contiguous partitions * @param[in] metric corresponds to the raft::distance::DistanceType enum (default is L2Expanded) * @param[in] metricArg metric argument to use. Corresponds to the p arg for lp norm */ template <typename IntType = int, typename IdxType = std::int64_t, typename value_t = float, typename DistanceEpilogue = raft::identity_op> void brute_force_knn_impl( raft::resources const& handle, std::vector<value_t*>& input, std::vector<IntType>& sizes, IntType D, value_t* search_items, IntType n, IdxType* res_I, value_t* res_D, IntType k, bool rowMajorIndex = true, bool rowMajorQuery = true, std::vector<IdxType>* translations = nullptr, raft::distance::DistanceType metric = raft::distance::DistanceType::L2Expanded, float metricArg = 0, DistanceEpilogue distance_epilogue = raft::identity_op(), std::vector<value_t*>* input_norms = nullptr, const value_t* search_norms = nullptr) { auto userStream = resource::get_cuda_stream(handle); ASSERT(input.size() == sizes.size(), "input and sizes vectors should be the same size"); std::vector<IdxType>* id_ranges; if (translations == nullptr) { // If we don't have explicit translations // for offsets of the indices, build them // from the local partitions id_ranges = new std::vector<IdxType>(); IdxType total_n = 0; for (size_t i = 0; i < input.size(); i++) { id_ranges->push_back(total_n); total_n += sizes[i]; } } else { // otherwise, use the given translations id_ranges = translations; } int device; RAFT_CUDA_TRY(cudaGetDevice(&device)); rmm::device_uvector<IdxType> trans(id_ranges->size(), userStream); raft::update_device(trans.data(), id_ranges->data(), id_ranges->size(), userStream); rmm::device_uvector<value_t> all_D(0, userStream); rmm::device_uvector<IdxType> all_I(0, userStream); value_t* out_D = res_D; IdxType* out_I = res_I; if (input.size() > 1) { all_D.resize(input.size() * k * n, userStream); all_I.resize(input.size() * k * n, userStream); out_D = all_D.data(); out_I = all_I.data(); } // currently we don't support col_major inside tiled_brute_force_knn, because // of limitations of the pairwise_distance API: // 1) paiwise_distance takes a single 'isRowMajor' parameter - and we have // multiple options here (like rowMajorQuery/rowMajorIndex) // 2) because of tiling, we need to be able to set a custom stride in the PW // api, which isn't supported // Instead, transpose the input matrices if they are passed as col-major. auto search = search_items; rmm::device_uvector<value_t> search_row_major(0, userStream); if (!rowMajorQuery) { search_row_major.resize(n * D, userStream); raft::linalg::transpose(handle, search, search_row_major.data(), n, D, userStream); search = search_row_major.data(); } // transpose into a temporary buffer if necessary rmm::device_uvector<value_t> index_row_major(0, userStream); if (!rowMajorIndex) { size_t total_size = 0; for (auto size : sizes) { total_size += size; } index_row_major.resize(total_size * D, userStream); } // Make other streams from pool wait on main stream resource::wait_stream_pool_on_stream(handle); size_t total_rows_processed = 0; for (size_t i = 0; i < input.size(); i++) { value_t* out_d_ptr = out_D + (i * k * n); IdxType* out_i_ptr = out_I + (i * k * n); auto stream = resource::get_next_usable_stream(handle, i); if (k <= 64 && rowMajorQuery == rowMajorIndex && rowMajorQuery == true && std::is_same_v<DistanceEpilogue, raft::identity_op> && (metric == raft::distance::DistanceType::L2Unexpanded || metric == raft::distance::DistanceType::L2SqrtUnexpanded || metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded)) { fusedL2Knn(D, out_i_ptr, out_d_ptr, input[i], search_items, sizes[i], n, k, rowMajorIndex, rowMajorQuery, stream, metric, input_norms ? (*input_norms)[i] : nullptr, search_norms); // Perform necessary post-processing if (metric == raft::distance::DistanceType::L2SqrtExpanded || metric == raft::distance::DistanceType::L2SqrtUnexpanded || metric == raft::distance::DistanceType::LpUnexpanded) { float p = 0.5; // standard l2 if (metric == raft::distance::DistanceType::LpUnexpanded) p = 1.0 / metricArg; raft::linalg::unaryOp<float>( res_D, res_D, n * k, [p] __device__(float input) { return powf(fabsf(input), p); }, stream); } } else { switch (metric) { case raft::distance::DistanceType::Haversine: ASSERT(D == 2, "Haversine distance requires 2 dimensions " "(latitude / longitude)."); haversine_knn(out_i_ptr, out_d_ptr, input[i], search_items, sizes[i], n, k, stream); break; default: // Create a new handle with the current stream from the stream pool raft::resources stream_pool_handle(handle); raft::resource::set_cuda_stream(stream_pool_handle, stream); auto index = input[i]; if (!rowMajorIndex) { index = index_row_major.data() + total_rows_processed * D; total_rows_processed += sizes[i]; raft::linalg::transpose(handle, input[i], index, sizes[i], D, stream); } tiled_brute_force_knn<value_t, IdxType>(stream_pool_handle, search, index, n, sizes[i], D, k, out_d_ptr, out_i_ptr, metric, metricArg, 0, 0, distance_epilogue, input_norms ? (*input_norms)[i] : nullptr, search_norms); break; } } RAFT_CUDA_TRY(cudaPeekAtLastError()); } // Sync internal streams if used. We don't need to // sync the user stream because we'll already have // fully serial execution. resource::sync_stream_pool(handle); if (input.size() > 1 || translations != nullptr) { // This is necessary for proper index translations. If there are // no translations or partitions to combine, it can be skipped. knn_merge_parts(out_D, out_I, res_D, res_I, n, input.size(), k, userStream, trans.data()); } if (translations == nullptr) delete id_ranges; }; template <typename T, typename IdxT> void brute_force_search( raft::resources const& res, const raft::neighbors::brute_force::index<T>& idx, raft::device_matrix_view<const T, int64_t, row_major> queries, raft::device_matrix_view<IdxT, int64_t, row_major> neighbors, raft::device_matrix_view<T, int64_t, row_major> distances, std::optional<raft::device_vector_view<const T, int64_t>> query_norms = std::nullopt) { RAFT_EXPECTS(neighbors.extent(1) == distances.extent(1), "Value of k must match for outputs"); RAFT_EXPECTS(idx.dataset().extent(1) == queries.extent(1), "Number of columns in queries must match brute force index"); auto k = neighbors.extent(1); auto d = idx.dataset().extent(1); std::vector<T*> dataset = {const_cast<T*>(idx.dataset().data_handle())}; std::vector<int64_t> sizes = {idx.dataset().extent(0)}; std::vector<T*> norms; if (idx.has_norms()) { norms.push_back(const_cast<T*>(idx.norms().data_handle())); } brute_force_knn_impl<int64_t, IdxT, T>(res, dataset, sizes, d, const_cast<T*>(queries.data_handle()), queries.extent(0), neighbors.data_handle(), distances.data_handle(), k, true, true, nullptr, idx.metric(), idx.metric_arg(), raft::identity_op(), norms.size() ? &norms : nullptr, query_norms ? query_norms->data_handle() : nullptr); } } // namespace raft::neighbors::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/knn_merge_parts.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <cstdint> #include <raft/neighbors/detail/faiss_select/DistanceUtils.h> #include <raft/neighbors/detail/faiss_select/Select.cuh> namespace raft::neighbors::detail { template <typename value_idx = std::int64_t, typename value_t = float, int warp_q, int thread_q, int tpb> RAFT_KERNEL knn_merge_parts_kernel(const value_t* inK, const value_idx* inV, value_t* outK, value_idx* outV, size_t n_samples, int n_parts, value_t initK, value_idx initV, int k, value_idx* translations) { constexpr int kNumWarps = tpb / WarpSize; __shared__ value_t smemK[kNumWarps * warp_q]; __shared__ value_idx smemV[kNumWarps * warp_q]; /** * Uses shared memory */ faiss_select:: BlockSelect<value_t, value_idx, false, faiss_select::Comparator<value_t>, warp_q, thread_q, tpb> heap(initK, initV, smemK, smemV, k); // Grid is exactly sized to rows available int row = blockIdx.x; int total_k = k * n_parts; int i = threadIdx.x; // Get starting pointers for cols in current thread int part = i / k; size_t row_idx = (row * k) + (part * n_samples * k); int col = i % k; const value_t* inKStart = inK + (row_idx + col); const value_idx* inVStart = inV + (row_idx + col); int limit = Pow2<WarpSize>::roundDown(total_k); value_idx translation = 0; for (; i < limit; i += tpb) { translation = translations[part]; heap.add(*inKStart, (*inVStart) + translation); part = (i + tpb) / k; row_idx = (row * k) + (part * n_samples * k); col = (i + tpb) % k; inKStart = inK + (row_idx + col); inVStart = inV + (row_idx + col); } // Handle last remainder fraction of a warp of elements if (i < total_k) { translation = translations[part]; heap.addThreadQ(*inKStart, (*inVStart) + translation); } heap.reduce(); for (int i = threadIdx.x; i < k; i += tpb) { outK[row * k + i] = smemK[i]; outV[row * k + i] = smemV[i]; } } template <typename value_idx = std::int64_t, typename value_t = float, int warp_q, int thread_q> inline void knn_merge_parts_impl(const value_t* inK, const value_idx* inV, value_t* outK, value_idx* outV, size_t n_samples, int n_parts, int k, cudaStream_t stream, value_idx* translations) { auto grid = dim3(n_samples); constexpr int n_threads = (warp_q <= 1024) ? 128 : 64; auto block = dim3(n_threads); auto kInit = std::numeric_limits<value_t>::max(); auto vInit = -1; knn_merge_parts_kernel<value_idx, value_t, warp_q, thread_q, n_threads> <<<grid, block, 0, stream>>>( inK, inV, outK, outV, n_samples, n_parts, kInit, vInit, k, translations); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * @brief Merge knn distances and index matrix, which have been partitioned * by row, into a single matrix with only the k-nearest neighbors. * * @param inK partitioned knn distance matrix * @param inV partitioned knn index matrix * @param outK merged knn distance matrix * @param outV merged knn index matrix * @param n_samples number of samples per partition * @param n_parts number of partitions * @param k number of neighbors per partition (also number of merged neighbors) * @param stream CUDA stream to use * @param translations mapping of index offsets for each partition */ template <typename value_idx = std::int64_t, typename value_t = float> inline void knn_merge_parts(const value_t* inK, const value_idx* inV, value_t* outK, value_idx* outV, size_t n_samples, int n_parts, int k, cudaStream_t stream, value_idx* translations) { if (k == 1) knn_merge_parts_impl<value_idx, value_t, 1, 1>( inK, inV, outK, outV, n_samples, n_parts, k, stream, translations); else if (k <= 32) knn_merge_parts_impl<value_idx, value_t, 32, 2>( inK, inV, outK, outV, n_samples, n_parts, k, stream, translations); else if (k <= 64) knn_merge_parts_impl<value_idx, value_t, 64, 3>( inK, inV, outK, outV, n_samples, n_parts, k, stream, translations); else if (k <= 128) knn_merge_parts_impl<value_idx, value_t, 128, 3>( inK, inV, outK, outV, n_samples, n_parts, k, stream, translations); else if (k <= 256) knn_merge_parts_impl<value_idx, value_t, 256, 4>( inK, inV, outK, outV, n_samples, n_parts, k, stream, translations); else if (k <= 512) knn_merge_parts_impl<value_idx, value_t, 512, 8>( inK, inV, outK, outV, n_samples, n_parts, k, stream, translations); else if (k <= 1024) knn_merge_parts_impl<value_idx, value_t, 1024, 8>( inK, inV, outK, outV, n_samples, n_parts, k, stream, translations); } } // namespace raft::neighbors::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_flat_build.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/cluster/kmeans_balanced.cuh> #include <raft/core/logger.hpp> #include <raft/core/mdarray.hpp> #include <raft/core/nvtx.hpp> #include <raft/core/operators.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/linalg/add.cuh> #include <raft/linalg/map.cuh> #include <raft/linalg/norm.cuh> #include <raft/neighbors/ivf_flat_codepacker.hpp> #include <raft/neighbors/ivf_flat_types.hpp> #include <raft/neighbors/ivf_list.hpp> #include <raft/neighbors/ivf_list_types.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/stats/histogram.cuh> #include <raft/util/pow2_utils.cuh> #include <rmm/cuda_stream_view.hpp> #include <cstdint> namespace raft::neighbors::ivf_flat::detail { using namespace raft::spatial::knn::detail; // NOLINT template <typename T, typename IdxT> auto clone(const raft::resources& res, const index<T, IdxT>& source) -> index<T, IdxT> { auto stream = resource::get_cuda_stream(res); // Allocate the new index index<T, IdxT> target(res, source.metric(), source.n_lists(), source.adaptive_centers(), source.conservative_memory_allocation(), source.dim()); // Copy the independent parts copy(target.list_sizes().data_handle(), source.list_sizes().data_handle(), source.list_sizes().size(), stream); copy(target.centers().data_handle(), source.centers().data_handle(), source.centers().size(), stream); if (source.center_norms().has_value()) { target.allocate_center_norms(res); copy(target.center_norms()->data_handle(), source.center_norms()->data_handle(), source.center_norms()->size(), stream); } // Copy shared pointers target.lists() = source.lists(); // Make sure the device pointers point to the new lists target.recompute_internal_state(res); return target; } /** * @brief Record the dataset into the index, one source row at a time. * * The index consists of the dataset rows, grouped by their labels (into clusters/lists). * Within each cluster (list), the data is grouped into blocks of `WarpSize` interleaved * vectors. Note, the total index length is slightly larger than the dataset length, because * each cluster is padded by `WarpSize` elements * * CUDA launch grid: * X dimension must cover the dataset (n_rows), YZ are not used; * there are no dependencies between threads, hence no constraints on the block size. * * @tparam T element type. * @tparam IdxT type of the indices in the source source_vecs * @tparam LabelT label type * @tparam gather_src if false, then we build the index from vectors source_vecs[i,:], otherwise * we use source_vecs[source_ixs[i],:]. In both cases i=0..n_rows-1. * * @param[in] labels device pointer to the cluster ids for each row [n_rows] * @param[in] source_vecs device pointer to the input data [n_rows, dim] * @param[in] source_ixs device pointer to the input indices [n_rows] * @param[out] list_data_ptrs device pointer to the index data of size [n_lists][index_size, dim] * @param[out] list_index_ptrs device pointer to the source ids corr. to the output [n_lists] * [index_size] * @param[out] list_sizes_ptr device pointer to the cluster sizes [n_lists]; * it's used as an atomic counter, and must be initialized with zeros. * @param n_rows source length * @param dim the dimensionality of the data * @param veclen size of vectorized loads/stores; must satisfy `dim % veclen == 0`. * */ template <typename T, typename IdxT, typename LabelT, bool gather_src = false> RAFT_KERNEL build_index_kernel(const LabelT* labels, const T* source_vecs, const IdxT* source_ixs, T** list_data_ptrs, IdxT** list_index_ptrs, uint32_t* list_sizes_ptr, IdxT n_rows, uint32_t dim, uint32_t veclen) { const IdxT i = IdxT(blockDim.x) * IdxT(blockIdx.x) + threadIdx.x; if (i >= n_rows) { return; } auto list_id = labels[i]; auto inlist_id = atomicAdd(list_sizes_ptr + list_id, 1); auto* list_index = list_index_ptrs[list_id]; auto* list_data = list_data_ptrs[list_id]; // Record the source vector id in the index list_index[inlist_id] = source_ixs == nullptr ? i : source_ixs[i]; // The data is written in interleaved groups of `index::kGroupSize` vectors using interleaved_group = Pow2<kIndexGroupSize>; auto group_offset = interleaved_group::roundDown(inlist_id); auto ingroup_id = interleaved_group::mod(inlist_id) * veclen; // Point to the location of the interleaved group of vectors list_data += group_offset * dim; // Point to the source vector if constexpr (gather_src) { source_vecs += source_ixs[i] * dim; } else { source_vecs += i * dim; } // Interleave dimensions of the source vector while recording it. // NB: such `veclen` is selected, that `dim % veclen == 0` for (uint32_t l = 0; l < dim; l += veclen) { for (uint32_t j = 0; j < veclen; j++) { list_data[l * kIndexGroupSize + ingroup_id + j] = source_vecs[l + j]; } } } /** See raft::neighbors::ivf_flat::extend docs */ template <typename T, typename IdxT> void extend(raft::resources const& handle, index<T, IdxT>* index, const T* new_vectors, const IdxT* new_indices, IdxT n_rows) { using LabelT = uint32_t; RAFT_EXPECTS(index != nullptr, "index cannot be empty."); auto stream = resource::get_cuda_stream(handle); auto n_lists = index->n_lists(); auto dim = index->dim(); list_spec<uint32_t, T, IdxT> list_device_spec{index->dim(), index->conservative_memory_allocation()}; common::nvtx::range<common::nvtx::domain::raft> fun_scope( "ivf_flat::extend(%zu, %u)", size_t(n_rows), dim); RAFT_EXPECTS(new_indices != nullptr || index->size() == 0, "You must pass data indices when the index is non-empty."); auto new_labels = raft::make_device_vector<LabelT, IdxT>(handle, n_rows); raft::cluster::kmeans_balanced_params kmeans_params; kmeans_params.metric = index->metric(); auto new_vectors_view = raft::make_device_matrix_view<const T, IdxT>(new_vectors, n_rows, dim); auto orig_centroids_view = raft::make_device_matrix_view<const float, IdxT>(index->centers().data_handle(), n_lists, dim); raft::cluster::kmeans_balanced::predict(handle, kmeans_params, new_vectors_view, orig_centroids_view, new_labels.view(), utils::mapping<float>{}); auto* list_sizes_ptr = index->list_sizes().data_handle(); auto old_list_sizes_dev = raft::make_device_vector<uint32_t, IdxT>(handle, n_lists); copy(old_list_sizes_dev.data_handle(), list_sizes_ptr, n_lists, stream); // Calculate the centers and sizes on the new data, starting from the original values if (index->adaptive_centers()) { auto centroids_view = raft::make_device_matrix_view<float, IdxT>( index->centers().data_handle(), index->centers().extent(0), index->centers().extent(1)); auto list_sizes_view = raft::make_device_vector_view<std::remove_pointer_t<decltype(list_sizes_ptr)>, IdxT>( list_sizes_ptr, n_lists); auto const_labels_view = make_const_mdspan(new_labels.view()); raft::cluster::kmeans_balanced::helpers::calc_centers_and_sizes(handle, new_vectors_view, const_labels_view, centroids_view, list_sizes_view, false, utils::mapping<float>{}); } else { raft::stats::histogram<uint32_t, IdxT>(raft::stats::HistTypeAuto, reinterpret_cast<int32_t*>(list_sizes_ptr), IdxT(n_lists), new_labels.data_handle(), n_rows, 1, stream); raft::linalg::add( list_sizes_ptr, list_sizes_ptr, old_list_sizes_dev.data_handle(), n_lists, stream); } // Calculate and allocate new list data std::vector<uint32_t> new_list_sizes(n_lists); std::vector<uint32_t> old_list_sizes(n_lists); { copy(old_list_sizes.data(), old_list_sizes_dev.data_handle(), n_lists, stream); copy(new_list_sizes.data(), list_sizes_ptr, n_lists, stream); resource::sync_stream(handle); auto& lists = index->lists(); for (uint32_t label = 0; label < n_lists; label++) { ivf::resize_list(handle, lists[label], list_device_spec, new_list_sizes[label], Pow2<kIndexGroupSize>::roundUp(old_list_sizes[label])); } } // Update the pointers and the sizes index->recompute_internal_state(handle); // Copy the old sizes, so we can start from the current state of the index; // we'll rebuild the `list_sizes_ptr` in the following kernel, using it as an atomic counter. raft::copy(list_sizes_ptr, old_list_sizes_dev.data_handle(), n_lists, stream); // Kernel to insert the new vectors const dim3 block_dim(256); const dim3 grid_dim(raft::ceildiv<IdxT>(n_rows, block_dim.x)); build_index_kernel<<<grid_dim, block_dim, 0, stream>>>(new_labels.data_handle(), new_vectors, new_indices, index->data_ptrs().data_handle(), index->inds_ptrs().data_handle(), list_sizes_ptr, n_rows, dim, index->veclen()); RAFT_CUDA_TRY(cudaPeekAtLastError()); // Precompute the centers vector norms for L2Expanded distance if (!index->center_norms().has_value()) { index->allocate_center_norms(handle); if (index->center_norms().has_value()) { raft::linalg::rowNorm(index->center_norms()->data_handle(), index->centers().data_handle(), dim, n_lists, raft::linalg::L2Norm, true, stream); RAFT_LOG_TRACE_VEC(index->center_norms()->data_handle(), std::min<uint32_t>(dim, 20)); } } else if (index->center_norms().has_value() && index->adaptive_centers()) { raft::linalg::rowNorm(index->center_norms()->data_handle(), index->centers().data_handle(), dim, n_lists, raft::linalg::L2Norm, true, stream); RAFT_LOG_TRACE_VEC(index->center_norms()->data_handle(), std::min<uint32_t>(dim, 20)); } } /** See raft::neighbors::ivf_flat::extend docs */ template <typename T, typename IdxT> auto extend(raft::resources const& handle, const index<T, IdxT>& orig_index, const T* new_vectors, const IdxT* new_indices, IdxT n_rows) -> index<T, IdxT> { auto ext_index = clone(handle, orig_index); detail::extend(handle, &ext_index, new_vectors, new_indices, n_rows); return ext_index; } /** See raft::neighbors::ivf_flat::build docs */ template <typename T, typename IdxT> inline auto build(raft::resources const& handle, const index_params& params, const T* dataset, IdxT n_rows, uint32_t dim) -> index<T, IdxT> { auto stream = resource::get_cuda_stream(handle); common::nvtx::range<common::nvtx::domain::raft> fun_scope( "ivf_flat::build(%zu, %u)", size_t(n_rows), dim); static_assert(std::is_same_v<T, float> || std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t>, "unsupported data type"); RAFT_EXPECTS(n_rows > 0 && dim > 0, "empty dataset"); RAFT_EXPECTS(n_rows >= params.n_lists, "number of rows can't be less than n_lists"); index<T, IdxT> index(handle, params, dim); utils::memzero(index.list_sizes().data_handle(), index.list_sizes().size(), stream); utils::memzero(index.data_ptrs().data_handle(), index.data_ptrs().size(), stream); utils::memzero(index.inds_ptrs().data_handle(), index.inds_ptrs().size(), stream); // Train the kmeans clustering { auto trainset_ratio = std::max<size_t>( 1, n_rows / std::max<size_t>(params.kmeans_trainset_fraction * n_rows, index.n_lists())); auto n_rows_train = n_rows / trainset_ratio; rmm::device_uvector<T> trainset(n_rows_train * index.dim(), stream); // TODO: a proper sampling RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset.data(), sizeof(T) * index.dim(), dataset, sizeof(T) * index.dim() * trainset_ratio, sizeof(T) * index.dim(), n_rows_train, cudaMemcpyDefault, stream)); auto trainset_const_view = raft::make_device_matrix_view<const T, IdxT>(trainset.data(), n_rows_train, index.dim()); auto centers_view = raft::make_device_matrix_view<float, IdxT>( index.centers().data_handle(), index.n_lists(), index.dim()); raft::cluster::kmeans_balanced_params kmeans_params; kmeans_params.n_iters = params.kmeans_n_iters; kmeans_params.metric = index.metric(); raft::cluster::kmeans_balanced::fit( handle, kmeans_params, trainset_const_view, centers_view, utils::mapping<float>{}); } // add the data if necessary if (params.add_data_on_build) { detail::extend<T, IdxT>(handle, &index, dataset, nullptr, n_rows); } return index; } /** * Build an index that can be used in refinement operation. * * See raft::neighbors::refine for details on the refinement operation. * * The returned index cannot be used for a regular ivf_flat::search. The index misses information * about coarse clusters. Instead, the neighbor candidates are assumed to form clusters, one for * each query. The candidate vectors are gathered into the index dataset, that can be later used * in ivfflat_interleaved_scan. * * @param[in] handle the raft handle * @param[inout] refinement_index * @param[in] dataset device pointer to dataset vectors, size [n_rows, dim]. Note that n_rows is * not known to this function, but each candidate_idx has to be smaller than n_rows. * @param[in] candidate_idx device pointer to neighbor candidates, size [n_queries, n_candidates] * @param[in] n_candidates of neighbor_candidates */ template <typename T, typename IdxT> inline void fill_refinement_index(raft::resources const& handle, index<T, IdxT>* refinement_index, const T* dataset, const IdxT* candidate_idx, IdxT n_queries, uint32_t n_candidates) { using LabelT = uint32_t; auto stream = resource::get_cuda_stream(handle); uint32_t n_lists = n_queries; common::nvtx::range<common::nvtx::domain::raft> fun_scope( "ivf_flat::fill_refinement_index(%zu, %u)", size_t(n_queries)); rmm::device_uvector<LabelT> new_labels(n_queries * n_candidates, stream); auto new_labels_view = raft::make_device_vector_view<LabelT, IdxT>(new_labels.data(), n_queries * n_candidates); linalg::map_offset( handle, new_labels_view, raft::compose_op(raft::cast_op<LabelT>(), raft::div_const_op<IdxT>(n_candidates))); auto list_sizes_ptr = refinement_index->list_sizes().data_handle(); // We do not fill centers and center norms, since we will not run coarse search. // Allocate new memory auto& lists = refinement_index->lists(); list_spec<uint32_t, T, IdxT> list_device_spec{refinement_index->dim(), false}; for (uint32_t label = 0; label < n_lists; label++) { ivf::resize_list(handle, lists[label], list_device_spec, n_candidates, uint32_t(0)); } // Update the pointers and the sizes refinement_index->recompute_internal_state(handle); RAFT_CUDA_TRY(cudaMemsetAsync(list_sizes_ptr, 0, n_lists * sizeof(uint32_t), stream)); const dim3 block_dim(256); const dim3 grid_dim(raft::ceildiv<IdxT>(n_queries * n_candidates, block_dim.x)); build_index_kernel<T, IdxT, LabelT, true> <<<grid_dim, block_dim, 0, stream>>>(new_labels.data(), dataset, candidate_idx, refinement_index->data_ptrs().data_handle(), refinement_index->inds_ptrs().data_handle(), list_sizes_ptr, n_queries * n_candidates, refinement_index->dim(), refinement_index->veclen()); RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <typename T> RAFT_KERNEL pack_interleaved_list_kernel(const T* codes, T* list_data, uint32_t n_rows, uint32_t dim, uint32_t veclen, std::variant<uint32_t, const uint32_t*> offset_or_indices) { uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t dst_ix = std::holds_alternative<uint32_t>(offset_or_indices) ? std::get<uint32_t>(offset_or_indices) + tid : std::get<const uint32_t*>(offset_or_indices)[tid]; if (tid < n_rows) { codepacker::pack_1(codes + tid * dim, list_data, dim, veclen, dst_ix); } } template <typename T> RAFT_KERNEL unpack_interleaved_list_kernel( const T* list_data, T* codes, uint32_t n_rows, uint32_t dim, uint32_t veclen, std::variant<uint32_t, const uint32_t*> offset_or_indices) { uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t src_ix = std::holds_alternative<uint32_t>(offset_or_indices) ? std::get<uint32_t>(offset_or_indices) + tid : std::get<const uint32_t*>(offset_or_indices)[tid]; if (tid < n_rows) { codepacker::unpack_1(list_data, codes + tid * dim, dim, veclen, src_ix); } } template <typename T, typename IdxT> void pack_list_data( raft::resources const& res, device_matrix_view<const T, uint32_t, row_major> codes, uint32_t veclen, std::variant<uint32_t, const uint32_t*> offset_or_indices, device_mdspan<T, typename list_spec<uint32_t, T, IdxT>::list_extents, row_major> list_data) { uint32_t n_rows = codes.extent(0); uint32_t dim = codes.extent(1); if (n_rows == 0 || dim == 0) return; static constexpr uint32_t kBlockSize = 256; dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1); dim3 threads(kBlockSize, 1, 1); auto stream = resource::get_cuda_stream(res); pack_interleaved_list_kernel<<<blocks, threads, 0, stream>>>( codes.data_handle(), list_data.data_handle(), n_rows, dim, veclen, offset_or_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <typename T, typename IdxT> void unpack_list_data( raft::resources const& res, device_mdspan<const T, typename list_spec<uint32_t, T, IdxT>::list_extents, row_major> list_data, uint32_t veclen, std::variant<uint32_t, const uint32_t*> offset_or_indices, device_matrix_view<T, uint32_t, row_major> codes) { uint32_t n_rows = codes.extent(0); uint32_t dim = codes.extent(1); if (n_rows == 0 || dim == 0) return; static constexpr uint32_t kBlockSize = 256; dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1); dim3 threads(kBlockSize, 1, 1); auto stream = resource::get_cuda_stream(res); unpack_interleaved_list_kernel<<<blocks, threads, 0, stream>>>( list_data.data_handle(), codes.data_handle(), n_rows, dim, veclen, offset_or_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } // namespace raft::neighbors::ivf_flat::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/selection_faiss.cuh
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "selection_faiss-inl.cuh" #endif #ifdef RAFT_COMPILED #include "selection_faiss-ext.cuh" #endif
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_flat_serialize.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/detail/mdspan_numpy_serializer.hpp> #include <raft/core/mdarray.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/serialize.hpp> #include <raft/neighbors/ivf_flat_types.hpp> #include <raft/neighbors/ivf_list.hpp> #include <raft/neighbors/ivf_list_types.hpp> #include <raft/util/pow2_utils.cuh> #include <fstream> namespace raft::neighbors::ivf_flat::detail { // Serialization version // No backward compatibility yet; that is, can't add additional fields without breaking // backward compatibility. // TODO(hcho3) Implement next-gen serializer for IVF that allows for expansion in a backward // compatible fashion. constexpr int serialization_version = 4; /** * Save the index to file. * * Experimental, both the API and the serialization format are subject to change. * * @param[in] handle the raft handle * @param[in] filename the file name for saving the index * @param[in] index_ IVF-Flat index * */ template <typename T, typename IdxT> void serialize(raft::resources const& handle, std::ostream& os, const index<T, IdxT>& index_) { RAFT_LOG_DEBUG( "Saving IVF-Flat index, size %zu, dim %u", static_cast<size_t>(index_.size()), index_.dim()); std::string dtype_string = raft::detail::numpy_serializer::get_numpy_dtype<T>().to_string(); dtype_string.resize(4); os << dtype_string; serialize_scalar(handle, os, serialization_version); serialize_scalar(handle, os, index_.size()); serialize_scalar(handle, os, index_.dim()); serialize_scalar(handle, os, index_.n_lists()); serialize_scalar(handle, os, index_.metric()); serialize_scalar(handle, os, index_.adaptive_centers()); serialize_scalar(handle, os, index_.conservative_memory_allocation()); serialize_mdspan(handle, os, index_.centers()); if (index_.center_norms()) { bool has_norms = true; serialize_scalar(handle, os, has_norms); serialize_mdspan(handle, os, *index_.center_norms()); } else { bool has_norms = false; serialize_scalar(handle, os, has_norms); } auto sizes_host = make_host_vector<uint32_t, uint32_t>(index_.list_sizes().extent(0)); copy(sizes_host.data_handle(), index_.list_sizes().data_handle(), sizes_host.size(), resource::get_cuda_stream(handle)); resource::sync_stream(handle); serialize_mdspan(handle, os, sizes_host.view()); list_spec<uint32_t, T, IdxT> list_store_spec{index_.dim(), true}; for (uint32_t label = 0; label < index_.n_lists(); label++) { ivf::serialize_list(handle, os, index_.lists()[label], list_store_spec, Pow2<kIndexGroupSize>::roundUp(sizes_host(label))); } resource::sync_stream(handle); } template <typename T, typename IdxT> void serialize(raft::resources const& handle, const std::string& filename, const index<T, IdxT>& index_) { std::ofstream of(filename, std::ios::out | std::ios::binary); if (!of) { RAFT_FAIL("Cannot open file %s", filename.c_str()); } detail::serialize(handle, of, index_); of.close(); if (!of) { RAFT_FAIL("Error writing output %s", filename.c_str()); } } /** Load an index from file. * * Experimental, both the API and the serialization format are subject to change. * * @param[in] handle the raft handle * @param[in] filename the name of the file that stores the index * @param[in] index_ IVF-Flat index * */ template <typename T, typename IdxT> auto deserialize(raft::resources const& handle, std::istream& is) -> index<T, IdxT> { char dtype_string[4]; is.read(dtype_string, 4); auto ver = deserialize_scalar<int>(handle, is); if (ver != serialization_version) { RAFT_FAIL("serialization version mismatch, expected %d, got %d ", serialization_version, ver); } auto n_rows = deserialize_scalar<IdxT>(handle, is); auto dim = deserialize_scalar<std::uint32_t>(handle, is); auto n_lists = deserialize_scalar<std::uint32_t>(handle, is); auto metric = deserialize_scalar<raft::distance::DistanceType>(handle, is); bool adaptive_centers = deserialize_scalar<bool>(handle, is); bool cma = deserialize_scalar<bool>(handle, is); index<T, IdxT> index_ = index<T, IdxT>(handle, metric, n_lists, adaptive_centers, cma, dim); deserialize_mdspan(handle, is, index_.centers()); bool has_norms = deserialize_scalar<bool>(handle, is); if (has_norms) { index_.allocate_center_norms(handle); if (!index_.center_norms()) { RAFT_FAIL("Error inconsistent center norms"); } else { auto center_norms = index_.center_norms().value(); deserialize_mdspan(handle, is, center_norms); } } deserialize_mdspan(handle, is, index_.list_sizes()); list_spec<uint32_t, T, IdxT> list_device_spec{index_.dim(), cma}; list_spec<uint32_t, T, IdxT> list_store_spec{index_.dim(), true}; for (uint32_t label = 0; label < index_.n_lists(); label++) { ivf::deserialize_list(handle, is, index_.lists()[label], list_store_spec, list_device_spec); } resource::sync_stream(handle); index_.recompute_internal_state(handle); return index_; } template <typename T, typename IdxT> auto deserialize(raft::resources const& handle, const std::string& filename) -> index<T, IdxT> { std::ifstream is(filename, std::ios::in | std::ios::binary); if (!is) { RAFT_FAIL("Cannot open file %s", filename.c_str()); } auto index = detail::deserialize<T, IdxT>(handle, is); is.close(); return index; } } // namespace raft::neighbors::ivf_flat::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_serialize.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/resource/cuda_stream.hpp> #include <raft/neighbors/detail/ivf_pq_build.cuh> #include <raft/neighbors/ivf_list.hpp> #include <raft/neighbors/ivf_pq_types.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/logger.hpp> #include <raft/core/resources.hpp> #include <raft/core/serialize.hpp> #include <fstream> #include <memory> namespace raft::neighbors::ivf_pq::detail { // Serialization version // No backward compatibility yet; that is, can't add additional fields without breaking // backward compatibility. // TODO(hcho3) Implement next-gen serializer for IVF that allows for expansion in a backward // compatible fashion. constexpr int kSerializationVersion = 3; /** * Write the index to an output stream * * Experimental, both the API and the serialization format are subject to change. * * @param[in] handle the raft handle * @param[in] os output stream * @param[in] index IVF-PQ index * */ template <typename IdxT> void serialize(raft::resources const& handle_, std::ostream& os, const index<IdxT>& index) { RAFT_LOG_DEBUG("Size %zu, dim %d, pq_dim %d, pq_bits %d", static_cast<size_t>(index.size()), static_cast<int>(index.dim()), static_cast<int>(index.pq_dim()), static_cast<int>(index.pq_bits())); serialize_scalar(handle_, os, kSerializationVersion); serialize_scalar(handle_, os, index.size()); serialize_scalar(handle_, os, index.dim()); serialize_scalar(handle_, os, index.pq_bits()); serialize_scalar(handle_, os, index.pq_dim()); serialize_scalar(handle_, os, index.conservative_memory_allocation()); serialize_scalar(handle_, os, index.metric()); serialize_scalar(handle_, os, index.codebook_kind()); serialize_scalar(handle_, os, index.n_lists()); serialize_mdspan(handle_, os, index.pq_centers()); serialize_mdspan(handle_, os, index.centers()); serialize_mdspan(handle_, os, index.centers_rot()); serialize_mdspan(handle_, os, index.rotation_matrix()); auto sizes_host = make_host_mdarray<uint32_t, uint32_t, row_major>(index.list_sizes().extents()); copy(sizes_host.data_handle(), index.list_sizes().data_handle(), sizes_host.size(), resource::get_cuda_stream(handle_)); resource::sync_stream(handle_); serialize_mdspan(handle_, os, sizes_host.view()); auto list_store_spec = list_spec<uint32_t, IdxT>{index.pq_bits(), index.pq_dim(), true}; for (uint32_t label = 0; label < index.n_lists(); label++) { ivf::serialize_list(handle_, os, index.lists()[label], list_store_spec, sizes_host(label)); } } /** * Save the index to file. * * Experimental, both the API and the serialization format are subject to change. * * @param[in] handle the raft handle * @param[in] filename the file name for saving the index * @param[in] index IVF-PQ index * */ template <typename IdxT> void serialize(raft::resources const& handle_, const std::string& filename, const index<IdxT>& index) { std::ofstream of(filename, std::ios::out | std::ios::binary); if (!of) { RAFT_FAIL("Cannot open file %s", filename.c_str()); } detail::serialize(handle_, of, index); of.close(); if (!of) { RAFT_FAIL("Error writing output %s", filename.c_str()); } return; } /** * Load index from input stream * * Experimental, both the API and the serialization format are subject to change. * * @param[in] handle the raft handle * @param[in] is input stream * */ template <typename IdxT> auto deserialize(raft::resources const& handle_, std::istream& is) -> index<IdxT> { auto ver = deserialize_scalar<int>(handle_, is); if (ver != kSerializationVersion) { RAFT_FAIL("serialization version mismatch %d vs. %d", ver, kSerializationVersion); } auto n_rows = deserialize_scalar<IdxT>(handle_, is); auto dim = deserialize_scalar<std::uint32_t>(handle_, is); auto pq_bits = deserialize_scalar<std::uint32_t>(handle_, is); auto pq_dim = deserialize_scalar<std::uint32_t>(handle_, is); auto cma = deserialize_scalar<bool>(handle_, is); auto metric = deserialize_scalar<raft::distance::DistanceType>(handle_, is); auto codebook_kind = deserialize_scalar<raft::neighbors::ivf_pq::codebook_gen>(handle_, is); auto n_lists = deserialize_scalar<std::uint32_t>(handle_, is); RAFT_LOG_DEBUG("n_rows %zu, dim %d, pq_dim %d, pq_bits %d, n_lists %d", static_cast<std::size_t>(n_rows), static_cast<int>(dim), static_cast<int>(pq_dim), static_cast<int>(pq_bits), static_cast<int>(n_lists)); auto index = raft::neighbors::ivf_pq::index<IdxT>( handle_, metric, codebook_kind, n_lists, dim, pq_bits, pq_dim, cma); deserialize_mdspan(handle_, is, index.pq_centers()); deserialize_mdspan(handle_, is, index.centers()); deserialize_mdspan(handle_, is, index.centers_rot()); deserialize_mdspan(handle_, is, index.rotation_matrix()); deserialize_mdspan(handle_, is, index.list_sizes()); auto list_device_spec = list_spec<uint32_t, IdxT>{pq_bits, pq_dim, cma}; auto list_store_spec = list_spec<uint32_t, IdxT>{pq_bits, pq_dim, true}; for (auto& list : index.lists()) { ivf::deserialize_list(handle_, is, list, list_store_spec, list_device_spec); } resource::sync_stream(handle_); recompute_internal_state(handle_, index); return index; } /** * Load index from file. * * Experimental, both the API and the serialization format are subject to change. * * @param[in] handle the raft handle * @param[in] filename the name of the file that stores the index * */ template <typename IdxT> auto deserialize(raft::resources const& handle_, const std::string& filename) -> index<IdxT> { std::ifstream infile(filename, std::ios::in | std::ios::binary); if (!infile) { RAFT_FAIL("Cannot open file %s", filename.c_str()); } auto index = detail::deserialize<IdxT>(handle_, infile); infile.close(); return index; } } // namespace raft::neighbors::ivf_pq::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_search.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_properties.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/neighbors/detail/ivf_pq_compute_similarity.cuh> #include <raft/neighbors/detail/ivf_pq_dummy_block_sort.cuh> #include <raft/neighbors/detail/ivf_pq_fp_8bit.cuh> #include <raft/neighbors/ivf_pq_types.hpp> #include <raft/neighbors/sample_filter_types.hpp> #include <raft/core/cudart_utils.hpp> #include <raft/core/device_mdarray.hpp> #include <raft/core/logger.hpp> #include <raft/core/nvtx.hpp> #include <raft/core/operators.hpp> #include <raft/core/resource/detail/device_memory_resource.hpp> #include <raft/core/resource/device_memory_resource.hpp> #include <raft/core/resources.hpp> #include <raft/distance/distance_types.hpp> #include <raft/linalg/gemm.cuh> #include <raft/linalg/map.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/matrix/detail/select_k.cuh> #include <raft/matrix/detail/select_warpsort.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/device_atomics.cuh> #include <raft/util/device_loads_stores.cuh> #include <raft/util/pow2_utils.cuh> #include <raft/util/vectorized.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <cub/cub.cuh> #include <cuda_fp16.h> #include <optional> namespace raft::neighbors::ivf_pq::detail { using namespace raft::spatial::knn::detail; // NOLINT /** * Select the clusters to probe and, as a side-effect, translate the queries type `T -> float` * * Assuming the number of clusters is not that big (a few thousands), we do a plain GEMM * followed by select_k to select the clusters to probe. There's no need to return the similarity * scores here. */ template <typename T> void select_clusters(raft::resources const& handle, uint32_t* clusters_to_probe, // [n_queries, n_probes] float* float_queries, // [n_queries, dim_ext] uint32_t n_queries, uint32_t n_probes, uint32_t n_lists, uint32_t dim, uint32_t dim_ext, raft::distance::DistanceType metric, const T* queries, // [n_queries, dim] const float* cluster_centers, // [n_lists, dim_ext] rmm::mr::device_memory_resource* mr) { auto stream = resource::get_cuda_stream(handle); /* NOTE[qc_distances] We compute query-center distances to choose the clusters to probe. We accomplish that with just one GEMM operation thanks to some preprocessing: L2 distance: cluster_centers[i, dim()] contains the squared norm of the center vector i; we extend the dimension K of the GEMM to compute it together with all the dot products: `qc_distances[i, j] = |cluster_centers[j]|^2 - 2 * (queries[i], cluster_centers[j])` This is a monotonous mapping of the proper L2 distance. IP distance: `qc_distances[i, j] = - (queries[i], cluster_centers[j])` This is a negative inner-product distance. We minimize it to find the similar clusters. NB: qc_distances is NOT used further in ivfpq_search. */ float norm_factor; switch (metric) { case raft::distance::DistanceType::L2SqrtExpanded: case raft::distance::DistanceType::L2Expanded: norm_factor = 1.0 / -2.0; break; case raft::distance::DistanceType::InnerProduct: norm_factor = 0.0; break; default: RAFT_FAIL("Unsupported distance type %d.", int(metric)); } auto float_queries_view = raft::make_device_vector_view<float, uint32_t>(float_queries, dim_ext * n_queries); linalg::map_offset( handle, float_queries_view, [queries, dim, dim_ext, norm_factor] __device__(uint32_t ix) { uint32_t col = ix % dim_ext; uint32_t row = ix / dim_ext; return col < dim ? utils::mapping<float>{}(queries[col + dim * row]) : norm_factor; }); float alpha; float beta; uint32_t gemm_k = dim; switch (metric) { case raft::distance::DistanceType::L2SqrtExpanded: case raft::distance::DistanceType::L2Expanded: { alpha = -2.0; beta = 0.0; gemm_k = dim + 1; RAFT_EXPECTS(gemm_k <= dim_ext, "unexpected gemm_k or dim_ext"); } break; case raft::distance::DistanceType::InnerProduct: { alpha = -1.0; beta = 0.0; } break; default: RAFT_FAIL("Unsupported distance type %d.", int(metric)); } rmm::device_uvector<float> qc_distances(n_queries * n_lists, stream, mr); linalg::gemm(handle, true, false, n_lists, n_queries, gemm_k, &alpha, cluster_centers, dim_ext, float_queries, dim_ext, &beta, qc_distances.data(), n_lists, stream); // Select neighbor clusters for each query. rmm::device_uvector<float> cluster_dists(n_queries * n_probes, stream, mr); matrix::detail::select_k<float, uint32_t>(handle, qc_distances.data(), nullptr, n_queries, n_lists, n_probes, cluster_dists.data(), clusters_to_probe, true, mr); } /** * For each query, we calculate a cumulative sum of the cluster sizes that we probe, and return that * in chunk_indices. Essentially this is a segmented inclusive scan of the cluster sizes. The total * number of samples per query (sum of the cluster sizes that we probe) is returned in n_samples. */ template <int BlockDim> __launch_bounds__(BlockDim) RAFT_KERNEL calc_chunk_indices_kernel(uint32_t n_probes, const uint32_t* cluster_sizes, // [n_clusters] const uint32_t* clusters_to_probe, // [n_queries, n_probes] uint32_t* chunk_indices, // [n_queries, n_probes] uint32_t* n_samples // [n_queries] ) { using block_scan = cub::BlockScan<uint32_t, BlockDim>; __shared__ typename block_scan::TempStorage shm; // locate the query data clusters_to_probe += n_probes * blockIdx.x; chunk_indices += n_probes * blockIdx.x; // block scan const uint32_t n_probes_aligned = Pow2<BlockDim>::roundUp(n_probes); uint32_t total = 0; for (uint32_t probe_ix = threadIdx.x; probe_ix < n_probes_aligned; probe_ix += BlockDim) { auto label = probe_ix < n_probes ? clusters_to_probe[probe_ix] : 0u; auto chunk = probe_ix < n_probes ? cluster_sizes[label] : 0u; if (threadIdx.x == 0) { chunk += total; } block_scan(shm).InclusiveSum(chunk, chunk, total); __syncthreads(); if (probe_ix < n_probes) { chunk_indices[probe_ix] = chunk; } } // save the total size if (threadIdx.x == 0) { n_samples[blockIdx.x] = total; } } struct calc_chunk_indices { public: struct configured { void* kernel; dim3 block_dim; dim3 grid_dim; uint32_t n_probes; inline void operator()(const uint32_t* cluster_sizes, const uint32_t* clusters_to_probe, uint32_t* chunk_indices, uint32_t* n_samples, rmm::cuda_stream_view stream) { void* args[] = // NOLINT {&n_probes, &cluster_sizes, &clusters_to_probe, &chunk_indices, &n_samples}; RAFT_CUDA_TRY(cudaLaunchKernel(kernel, grid_dim, block_dim, args, 0, stream)); } }; static inline auto configure(uint32_t n_probes, uint32_t n_queries) -> configured { return try_block_dim<1024>(n_probes, n_queries); } private: template <int BlockDim> static auto try_block_dim(uint32_t n_probes, uint32_t n_queries) -> configured { if constexpr (BlockDim >= WarpSize * 2) { if (BlockDim >= n_probes * 2) { return try_block_dim<(BlockDim / 2)>(n_probes, n_queries); } } return {reinterpret_cast<void*>(calc_chunk_indices_kernel<BlockDim>), dim3(BlockDim, 1, 1), dim3(n_queries, 1, 1), n_probes}; } }; /** * Look up the chunk id corresponding to the sample index. * * Each query vector was compared to all the vectors from n_probes clusters, and sample_ix is an * ordered number of one of such vectors. This function looks up to which chunk it belongs, * and returns the index within the chunk (which is also an index within a cluster). * * @param[inout] sample_ix * input: the offset of the sample in the batch; * output: the offset inside the chunk (probe) / selected cluster. * @param[in] n_probes number of probes * @param[in] chunk_indices offsets of the chunks within the batch [n_probes] * @return chunk index (== n_probes when the input index is not in the valid range, * which can happen if there is not enough data to output in the selected clusters). */ __device__ inline auto find_chunk_ix(uint32_t& sample_ix, // NOLINT uint32_t n_probes, const uint32_t* chunk_indices) -> uint32_t { uint32_t ix_min = 0; uint32_t ix_max = n_probes; do { uint32_t i = (ix_min + ix_max) / 2; if (chunk_indices[i] <= sample_ix) { ix_min = i + 1; } else { ix_max = i; } } while (ix_min < ix_max); if (ix_min > 0) { sample_ix -= chunk_indices[ix_min - 1]; } return ix_min; } template <int BlockDim, typename IdxT> __launch_bounds__(BlockDim) RAFT_KERNEL postprocess_neighbors_kernel(IdxT* neighbors_out, // [n_queries, topk] const uint32_t* neighbors_in, // [n_queries, topk] const IdxT* const* db_indices, // [n_clusters][..] const uint32_t* clusters_to_probe, // [n_queries, n_probes] const uint32_t* chunk_indices, // [n_queries, n_probes] uint32_t n_queries, uint32_t n_probes, uint32_t topk) { const uint64_t i = threadIdx.x + BlockDim * uint64_t(blockIdx.x); const uint32_t query_ix = i / uint64_t(topk); if (query_ix >= n_queries) { return; } const uint32_t k = i % uint64_t(topk); neighbors_in += query_ix * topk; neighbors_out += query_ix * topk; chunk_indices += query_ix * n_probes; clusters_to_probe += query_ix * n_probes; uint32_t data_ix = neighbors_in[k]; const uint32_t chunk_ix = find_chunk_ix(data_ix, n_probes, chunk_indices); const bool valid = chunk_ix < n_probes; neighbors_out[k] = valid ? db_indices[clusters_to_probe[chunk_ix]][data_ix] : ivf_pq::kOutOfBoundsRecord<IdxT>; } /** * Transform found sample indices into the corresponding database indices * (as stored in index.indices()). * The sample indices are the record indices as they appear in the database view formed by the * probed clusters / defined by the `chunk_indices`. * We assume the searched sample sizes (for a single query) fit into `uint32_t`. */ template <typename IdxT> void postprocess_neighbors(IdxT* neighbors_out, // [n_queries, topk] const uint32_t* neighbors_in, // [n_queries, topk] const IdxT* const* db_indices, // [n_clusters][..] const uint32_t* clusters_to_probe, // [n_queries, n_probes] const uint32_t* chunk_indices, // [n_queries, n_probes] uint32_t n_queries, uint32_t n_probes, uint32_t topk, rmm::cuda_stream_view stream) { constexpr int kPNThreads = 256; const int pn_blocks = raft::div_rounding_up_unsafe<size_t>(n_queries * topk, kPNThreads); postprocess_neighbors_kernel<kPNThreads, IdxT> <<<pn_blocks, kPNThreads, 0, stream>>>(neighbors_out, neighbors_in, db_indices, clusters_to_probe, chunk_indices, n_queries, n_probes, topk); } /** * Post-process the scores depending on the metric type; * translate the element type if necessary. */ template <typename ScoreT> void postprocess_distances(float* out, // [n_queries, topk] const ScoreT* in, // [n_queries, topk] distance::DistanceType metric, uint32_t n_queries, uint32_t topk, float scaling_factor, rmm::cuda_stream_view stream) { size_t len = size_t(n_queries) * size_t(topk); switch (metric) { case distance::DistanceType::L2Unexpanded: case distance::DistanceType::L2Expanded: { linalg::unaryOp(out, in, len, raft::compose_op(raft::mul_const_op<float>{scaling_factor * scaling_factor}, raft::cast_op<float>{}), stream); } break; case distance::DistanceType::L2SqrtUnexpanded: case distance::DistanceType::L2SqrtExpanded: { linalg::unaryOp( out, in, len, raft::compose_op{ raft::mul_const_op<float>{scaling_factor}, raft::sqrt_op{}, raft::cast_op<float>{}}, stream); } break; case distance::DistanceType::InnerProduct: { linalg::unaryOp(out, in, len, raft::compose_op(raft::mul_const_op<float>{-scaling_factor * scaling_factor}, raft::cast_op<float>{}), stream); } break; default: RAFT_FAIL("Unexpected metric."); } } /** * An approximation to the number of times each cluster appears in a batched sample. * * If the pairs (probe_ix, query_ix) are sorted by the probe_ix, there is a good chance that * the same probe_ix (cluster) is processed by several blocks on a single SM. This greatly * increases the L1 cache hit rate (i.e. increases the data locality). * * This function gives an estimate of how many times a specific cluster may appear in the * batch. Thus, it gives a practical limit to how many blocks should be active on the same SM * to improve the L1 cache hit rate. */ constexpr inline auto expected_probe_coresidency(uint32_t n_clusters, uint32_t n_probes, uint32_t n_queries) -> uint32_t { /* Let say: n = n_clusters k = n_probes m = n_queries r = # of times a specific block appears in the batched sample. Then, r has the Binomial distribution (p = k / n): P(r) = C(m,r) * k^r * (n - k)^(m - r) / n^m E[r] = m * k / n E[r | r > 0] = m * k / n / (1 - (1 - k/n)^m) The latter can be approximated by a much simpler formula, assuming (k / n) -> 0: E[r | r > 0] = 1 + (m - 1) * k / (2 * n) + O( (k/n)^2 ) */ return 1 + (n_queries - 1) * n_probes / (2 * n_clusters); } /** * The "main part" of the search, which assumes that outer-level `search` has already: * * 1. computed the closest clusters to probe (`clusters_to_probe`); * 2. transformed input queries into the rotated space (rot_dim); * 3. split the query batch into smaller chunks, so that the device workspace * is guaranteed to fit into GPU memory. */ template <typename ScoreT, typename LutT, typename IvfSampleFilterT, typename IdxT> void ivfpq_search_worker(raft::resources const& handle, const index<IdxT>& index, uint32_t max_samples, uint32_t n_probes, uint32_t topK, uint32_t n_queries, uint32_t queries_offset, // needed for filtering const uint32_t* clusters_to_probe, // [n_queries, n_probes] const float* query, // [n_queries, rot_dim] IdxT* neighbors, // [n_queries, topK] float* distances, // [n_queries, topK] float scaling_factor, double preferred_shmem_carveout, IvfSampleFilterT sample_filter) { auto stream = resource::get_cuda_stream(handle); auto mr = resource::get_workspace_resource(handle); bool manage_local_topk = is_local_topk_feasible(topK, n_probes, n_queries); auto topk_len = manage_local_topk ? n_probes * topK : max_samples; std::size_t n_queries_probes = std::size_t(n_queries) * std::size_t(n_probes); std::size_t n_queries_topk_len = std::size_t(n_queries) * std::size_t(topk_len); if (manage_local_topk) { RAFT_LOG_DEBUG("Fused version of the search kernel is selected (manage_local_topk == true)"); } else { RAFT_LOG_DEBUG( "Non-fused version of the search kernel is selected (manage_local_topk == false)"); } rmm::device_uvector<uint32_t> index_list_sorted_buf(0, stream, mr); uint32_t* index_list_sorted = nullptr; rmm::device_uvector<uint32_t> num_samples(n_queries, stream, mr); rmm::device_uvector<uint32_t> chunk_index(n_queries_probes, stream, mr); // [maxBatchSize, max_samples] or [maxBatchSize, n_probes, topk] rmm::device_uvector<ScoreT> distances_buf(n_queries_topk_len, stream, mr); rmm::device_uvector<uint32_t> neighbors_buf(0, stream, mr); uint32_t* neighbors_ptr = nullptr; if (manage_local_topk) { neighbors_buf.resize(n_queries_topk_len, stream); neighbors_ptr = neighbors_buf.data(); } rmm::device_uvector<uint32_t> neighbors_uint32_buf(0, stream, mr); uint32_t* neighbors_uint32 = nullptr; if constexpr (sizeof(IdxT) == sizeof(uint32_t)) { neighbors_uint32 = reinterpret_cast<uint32_t*>(neighbors); } else { neighbors_uint32_buf.resize(n_queries * topK, stream); neighbors_uint32 = neighbors_uint32_buf.data(); } calc_chunk_indices::configure(n_probes, n_queries)(index.list_sizes().data_handle(), clusters_to_probe, chunk_index.data(), num_samples.data(), stream); auto coresidency = expected_probe_coresidency(index.n_lists(), n_probes, n_queries); if (coresidency > 1) { // Sorting index by cluster number (label). // The goal is to incrase the L2 cache hit rate to read the vectors // of a cluster by processing the cluster at the same time as much as // possible. index_list_sorted_buf.resize(n_queries_probes, stream); auto index_list_buf = make_device_mdarray<uint32_t>(handle, mr, make_extents<uint32_t>(n_queries_probes)); rmm::device_uvector<uint32_t> cluster_labels_out(n_queries_probes, stream, mr); auto index_list = index_list_buf.data_handle(); index_list_sorted = index_list_sorted_buf.data(); linalg::map_offset(handle, index_list_buf.view(), identity_op{}); int begin_bit = 0; int end_bit = sizeof(uint32_t) * 8; size_t cub_workspace_size = 0; cub::DeviceRadixSort::SortPairs(nullptr, cub_workspace_size, clusters_to_probe, cluster_labels_out.data(), index_list, index_list_sorted, n_queries_probes, begin_bit, end_bit, stream); rmm::device_buffer cub_workspace(cub_workspace_size, stream, mr); cub::DeviceRadixSort::SortPairs(cub_workspace.data(), cub_workspace_size, clusters_to_probe, cluster_labels_out.data(), index_list, index_list_sorted, n_queries_probes, begin_bit, end_bit, stream); } // select and run the main search kernel uint32_t precomp_data_count = 0; switch (index.metric()) { case distance::DistanceType::L2SqrtExpanded: case distance::DistanceType::L2SqrtUnexpanded: case distance::DistanceType::L2Unexpanded: case distance::DistanceType::L2Expanded: { // stores basediff (query[i] - center[i]) precomp_data_count = index.rot_dim(); } break; case distance::DistanceType::InnerProduct: { // stores two components (query[i] * center[i], query[i] * center[i]) precomp_data_count = index.rot_dim() * 2; } break; default: { RAFT_FAIL("Unsupported metric"); } break; } auto search_instance = compute_similarity_select<ScoreT, LutT, IvfSampleFilterT>( resource::get_device_properties(handle), manage_local_topk, coresidency, preferred_shmem_carveout, index.pq_bits(), index.pq_dim(), precomp_data_count, n_queries, n_probes, topK); rmm::device_uvector<LutT> device_lut(search_instance.device_lut_size, stream, mr); std::optional<device_vector<float>> query_kths_buf{std::nullopt}; float* query_kths = nullptr; if (manage_local_topk) { query_kths_buf.emplace( make_device_mdarray<float>(handle, mr, make_extents<uint32_t>(n_queries))); linalg::map(handle, query_kths_buf->view(), raft::const_op<float>{dummy_block_sort_t<ScoreT, IdxT>::queue_t::kDummy}); query_kths = query_kths_buf->data_handle(); } compute_similarity_run(search_instance, stream, index.rot_dim(), n_probes, index.pq_dim(), n_queries, queries_offset, index.metric(), index.codebook_kind(), topK, max_samples, index.centers_rot().data_handle(), index.pq_centers().data_handle(), index.data_ptrs().data_handle(), clusters_to_probe, chunk_index.data(), query, index_list_sorted, query_kths, sample_filter, device_lut.data(), distances_buf.data(), neighbors_ptr); // Select topk vectors for each query rmm::device_uvector<ScoreT> topk_dists(n_queries * topK, stream, mr); matrix::detail::select_k<ScoreT, uint32_t>(handle, distances_buf.data(), neighbors_ptr, n_queries, topk_len, topK, topk_dists.data(), neighbors_uint32, true, mr); // Postprocessing postprocess_distances( distances, topk_dists.data(), index.metric(), n_queries, topK, scaling_factor, stream); postprocess_neighbors(neighbors, neighbors_uint32, index.inds_ptrs().data_handle(), clusters_to_probe, chunk_index.data(), n_queries, n_probes, topK, stream); } /** * This structure helps selecting a proper instance of the worker search function, * which contains a few template parameters. */ template <typename IdxT, typename IvfSampleFilterT> struct ivfpq_search { public: using fun_t = decltype(&ivfpq_search_worker<float, float, IvfSampleFilterT, IdxT>); /** * Select an instance of the ivf-pq search function based on search tuning parameters, * such as the look-up data type or the internal score type. */ static auto fun(const search_params& params, distance::DistanceType metric) -> fun_t { return fun_try_score_t(params, metric); } private: template <typename ScoreT, typename LutT> static auto filter_reasonable_instances(const search_params& params) -> fun_t { if constexpr (sizeof(ScoreT) >= sizeof(LutT)) { return ivfpq_search_worker<ScoreT, LutT, IvfSampleFilterT, IdxT>; } else { RAFT_FAIL( "Unexpected lut_dtype / internal_distance_dtype combination (%d, %d). " "Size of the internal_distance_dtype should be not smaller than the size of the lut_dtype.", int(params.lut_dtype), int(params.internal_distance_dtype)); } } template <typename ScoreT> static auto fun_try_lut_t(const search_params& params, distance::DistanceType metric) -> fun_t { bool signed_metric = false; switch (metric) { case raft::distance::DistanceType::InnerProduct: signed_metric = true; break; default: break; } switch (params.lut_dtype) { case CUDA_R_32F: return filter_reasonable_instances<ScoreT, float>(params); case CUDA_R_16F: return filter_reasonable_instances<ScoreT, half>(params); case CUDA_R_8U: case CUDA_R_8I: if (signed_metric) { return filter_reasonable_instances<ScoreT, fp_8bit<5, true>>(params); } else { return filter_reasonable_instances<ScoreT, fp_8bit<5, false>>(params); } default: RAFT_FAIL("Unexpected lut_dtype (%d)", int(params.lut_dtype)); } } static auto fun_try_score_t(const search_params& params, distance::DistanceType metric) -> fun_t { switch (params.internal_distance_dtype) { case CUDA_R_32F: return fun_try_lut_t<float>(params, metric); case CUDA_R_16F: return fun_try_lut_t<half>(params, metric); default: RAFT_FAIL("Unexpected internal_distance_dtype (%d)", int(params.internal_distance_dtype)); } } }; /** * A heuristic for bounding the number of queries per batch, to improve GPU utilization. * (based on the number of SMs and the work size). * * @param res is used to query the workspace size * @param k top-k * @param n_probes number of selected clusters per query * @param n_queries number of queries hoped to be processed at once. * (maximum value for the returned batch size) * @param max_samples maximum possible number of samples to be processed for the given `n_probes` * * @return maximum recommended batch size. */ inline auto get_max_batch_size(raft::resources const& res, uint32_t k, uint32_t n_probes, uint32_t n_queries, uint32_t max_samples) -> uint32_t { uint32_t max_batch_size = n_queries; uint32_t n_ctas_total = getMultiProcessorCount() * 2; uint32_t n_ctas_total_per_batch = n_ctas_total / max_batch_size; float utilization = float(n_ctas_total_per_batch * max_batch_size) / n_ctas_total; if (n_ctas_total_per_batch > 1 || (n_ctas_total_per_batch == 1 && utilization < 0.6)) { uint32_t n_ctas_total_per_batch_1 = n_ctas_total_per_batch + 1; uint32_t max_batch_size_1 = n_ctas_total / n_ctas_total_per_batch_1; float utilization_1 = float(n_ctas_total_per_batch_1 * max_batch_size_1) / n_ctas_total; if (utilization < utilization_1) { max_batch_size = max_batch_size_1; } } // Check in the tmp distance buffer is not too big auto ws_size = [k, n_probes, max_samples](uint32_t bs) -> uint64_t { const uint64_t buffers_fused = 12ull * k * n_probes; const uint64_t buffers_non_fused = 4ull * max_samples; const uint64_t other = 32ull * n_probes; return static_cast<uint64_t>(bs) * (other + (is_local_topk_feasible(k, n_probes, bs) ? buffers_fused : buffers_non_fused)); }; auto max_ws_size = resource::get_workspace_free_bytes(res); if (ws_size(max_batch_size) > max_ws_size) { uint32_t smaller_batch_size = bound_by_power_of_two(max_batch_size); // gradually reduce the batch size until we fit into the max size limit. while (smaller_batch_size > 1 && ws_size(smaller_batch_size) > max_ws_size) { smaller_batch_size >>= 1; } return smaller_batch_size; } return max_batch_size; } /** See raft::spatial::knn::ivf_pq::search docs */ template <typename T, typename IdxT, typename IvfSampleFilterT = raft::neighbors::filtering::none_ivf_sample_filter> inline void search(raft::resources const& handle, const search_params& params, const index<IdxT>& index, const T* queries, uint32_t n_queries, uint32_t k, IdxT* neighbors, float* distances, IvfSampleFilterT sample_filter = IvfSampleFilterT()) { static_assert(std::is_same_v<T, float> || std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t>, "Unsupported element type."); common::nvtx::range<common::nvtx::domain::raft> fun_scope( "ivf_pq::search(n_queries = %u, n_probes = %u, k = %u, dim = %zu)", n_queries, params.n_probes, k, index.dim()); resource::detail::warn_non_pool_workspace(handle, "raft::ivf_pq::search"); RAFT_EXPECTS( params.internal_distance_dtype == CUDA_R_16F || params.internal_distance_dtype == CUDA_R_32F, "internal_distance_dtype must be either CUDA_R_16F or CUDA_R_32F"); RAFT_EXPECTS(params.lut_dtype == CUDA_R_16F || params.lut_dtype == CUDA_R_32F || params.lut_dtype == CUDA_R_8U, "lut_dtype must be CUDA_R_16F, CUDA_R_32F or CUDA_R_8U"); RAFT_EXPECTS(k > 0, "parameter `k` in top-k must be positive."); RAFT_EXPECTS( k <= index.size(), "parameter `k` (%u) in top-k must not be larger that the total size of the index (%zu)", k, static_cast<uint64_t>(index.size())); RAFT_EXPECTS(params.n_probes > 0, "n_probes (number of clusters to probe in the search) must be positive."); switch (utils::check_pointer_residency(queries, neighbors, distances)) { case utils::pointer_residency::device_only: case utils::pointer_residency::host_and_device: break; default: RAFT_FAIL("all pointers must be accessible from the device."); } auto stream = resource::get_cuda_stream(handle); auto dim = index.dim(); auto dim_ext = index.dim_ext(); auto n_probes = std::min<uint32_t>(params.n_probes, index.n_lists()); uint32_t max_samples = 0; { IdxT ms = Pow2<128>::roundUp(index.accum_sorted_sizes()(n_probes)); RAFT_EXPECTS(ms <= IdxT(std::numeric_limits<uint32_t>::max()), "The maximum sample size is too big."); max_samples = ms; } auto mr = resource::get_workspace_resource(handle); // Maximum number of query vectors to search at the same time. const auto max_queries = std::min<uint32_t>(std::max<uint32_t>(n_queries, 1), 4096); auto max_batch_size = get_max_batch_size(handle, k, n_probes, max_queries, max_samples); rmm::device_uvector<float> float_queries(max_queries * dim_ext, stream, mr); rmm::device_uvector<float> rot_queries(max_queries * index.rot_dim(), stream, mr); rmm::device_uvector<uint32_t> clusters_to_probe(max_queries * n_probes, stream, mr); auto filter_adapter = raft::neighbors::filtering::ivf_to_sample_filter( index.inds_ptrs().data_handle(), sample_filter); auto search_instance = ivfpq_search<IdxT, decltype(filter_adapter)>::fun(params, index.metric()); for (uint32_t offset_q = 0; offset_q < n_queries; offset_q += max_queries) { uint32_t queries_batch = min(max_queries, n_queries - offset_q); select_clusters(handle, clusters_to_probe.data(), float_queries.data(), queries_batch, n_probes, index.n_lists(), dim, dim_ext, index.metric(), queries + static_cast<size_t>(dim) * offset_q, index.centers().data_handle(), mr); // Rotate queries float alpha = 1.0; float beta = 0.0; linalg::gemm(handle, true, false, index.rot_dim(), queries_batch, dim, &alpha, index.rotation_matrix().data_handle(), dim, float_queries.data(), dim_ext, &beta, rot_queries.data(), index.rot_dim(), stream); for (uint32_t offset_b = 0; offset_b < queries_batch; offset_b += max_batch_size) { uint32_t batch_size = min(max_batch_size, queries_batch - offset_b); /* The distance calculation is done in the rotated/transformed space; as long as `index.rotation_matrix()` is orthogonal, the distances and thus results are preserved. */ search_instance(handle, index, max_samples, n_probes, k, batch_size, offset_q + offset_b, clusters_to_probe.data() + uint64_t(n_probes) * offset_b, rot_queries.data() + uint64_t(index.rot_dim()) * offset_b, neighbors + uint64_t(k) * (offset_q + offset_b), distances + uint64_t(k) * (offset_q + offset_b), utils::config<T>::kDivisor / utils::config<float>::kDivisor, params.preferred_shmem_carveout, filter_adapter); } } } } // namespace raft::neighbors::ivf_pq::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_build.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_memory_resource.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/neighbors/detail/ivf_pq_codepacking.cuh> #include <raft/neighbors/ivf_list.hpp> #include <raft/neighbors/ivf_pq_types.hpp> #include <raft/cluster/kmeans_balanced.cuh> #include <raft/core/device_mdarray.hpp> #include <raft/core/logger.hpp> #include <raft/core/nvtx.hpp> #include <raft/core/operators.hpp> #include <raft/core/resource/detail/device_memory_resource.hpp> #include <raft/core/resources.hpp> #include <raft/distance/distance_types.hpp> #include <raft/linalg/add.cuh> #include <raft/linalg/detail/qr.cuh> #include <raft/linalg/gemm.cuh> #include <raft/linalg/map.cuh> #include <raft/linalg/norm.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/matrix/gather.cuh> #include <raft/matrix/linewise_op.cuh> #include <raft/random/rng.cuh> #include <raft/stats/histogram.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/device_atomics.cuh> #include <raft/util/integer_utils.hpp> #include <raft/util/pow2_utils.cuh> #include <raft/util/vectorized.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/managed_memory_resource.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <rmm/mr/device/pool_memory_resource.hpp> #include <thrust/extrema.h> #include <thrust/scan.h> #include <memory> #include <variant> namespace raft::neighbors::ivf_pq::detail { using namespace raft::spatial::knn::detail; // NOLINT template <uint32_t BlockDim, typename T, typename S> __launch_bounds__(BlockDim) RAFT_KERNEL copy_warped_kernel( T* out, uint32_t ld_out, const S* in, uint32_t ld_in, uint32_t n_cols, size_t n_rows) { using warp = Pow2<WarpSize>; size_t row_ix = warp::div(size_t(threadIdx.x) + size_t(BlockDim) * size_t(blockIdx.x)); uint32_t i = warp::mod(threadIdx.x); if (row_ix >= n_rows) return; out += row_ix * ld_out; in += row_ix * ld_in; auto f = utils::mapping<T>{}; for (uint32_t col_ix = i; col_ix < n_cols; col_ix += warp::Value) { auto x = f(in[col_ix]); __syncwarp(); out[col_ix] = x; } } /** * Copy the data one warp-per-row: * * 1. load the data per-warp * 2. apply the `utils::mapping<T>{}` * 3. sync within warp * 4. store the data. * * Assuming sizeof(T) >= sizeof(S) and the data is properly aligned (see the usage in `build`), this * allows to re-structure the data within rows in-place. */ template <typename T, typename S> void copy_warped(T* out, uint32_t ld_out, const S* in, uint32_t ld_in, uint32_t n_cols, size_t n_rows, rmm::cuda_stream_view stream) { constexpr uint32_t kBlockDim = 128; dim3 threads(kBlockDim, 1, 1); dim3 blocks(div_rounding_up_safe<size_t>(n_rows, kBlockDim / WarpSize), 1, 1); copy_warped_kernel<kBlockDim, T, S> <<<blocks, threads, 0, stream>>>(out, ld_out, in, ld_in, n_cols, n_rows); } /** * @brief Fill-in a random orthogonal transformation matrix. * * @param handle * @param force_random_rotation * @param n_rows * @param n_cols * @param[out] rotation_matrix device pointer to a row-major matrix of size [n_rows, n_cols]. * @param rng random number generator state */ inline void make_rotation_matrix(raft::resources const& handle, bool force_random_rotation, uint32_t n_rows, uint32_t n_cols, float* rotation_matrix, raft::random::RngState rng = raft::random::RngState(7ULL)) { common::nvtx::range<common::nvtx::domain::raft> fun_scope( "ivf_pq::make_rotation_matrix(%u * %u)", n_rows, n_cols); auto stream = resource::get_cuda_stream(handle); bool inplace = n_rows == n_cols; uint32_t n = std::max(n_rows, n_cols); if (force_random_rotation || !inplace) { rmm::device_uvector<float> buf(inplace ? 0 : n * n, stream); float* mat = inplace ? rotation_matrix : buf.data(); raft::random::normal(handle, rng, mat, n * n, 0.0f, 1.0f); linalg::detail::qrGetQ_inplace(handle, mat, n, n, stream); if (!inplace) { RAFT_CUDA_TRY(cudaMemcpy2DAsync(rotation_matrix, sizeof(float) * n_cols, mat, sizeof(float) * n, sizeof(float) * n_cols, n_rows, cudaMemcpyDefault, stream)); } } else { uint32_t stride = n + 1; auto rotation_matrix_view = raft::make_device_vector_view<float, uint32_t>(rotation_matrix, n * n); linalg::map_offset(handle, rotation_matrix_view, [stride] __device__(uint32_t i) { return static_cast<float>(i % stride == 0u); }); } } /** * @brief Compute residual vectors from the source dataset given by selected indices. * * The residual has the form `rotation_matrix %* (dataset[row_ids, :] - center)` * */ template <typename T, typename IdxT> void select_residuals(raft::resources const& handle, float* residuals, IdxT n_rows, uint32_t dim, uint32_t rot_dim, const float* rotation_matrix, // [rot_dim, dim] const float* center, // [dim] const T* dataset, // [.., dim] const IdxT* row_ids, // [n_rows] rmm::mr::device_memory_resource* device_memory ) { auto stream = resource::get_cuda_stream(handle); rmm::device_uvector<float> tmp(size_t(n_rows) * size_t(dim), stream, device_memory); // Note: the number of rows of the input dataset isn't actually n_rows, but matrix::gather doesn't // need to know it, any strictly positive number would work. cub::TransformInputIterator<float, utils::mapping<float>, const T*> mapping_itr( dataset, utils::mapping<float>{}); raft::matrix::gather(mapping_itr, (IdxT)dim, n_rows, row_ids, n_rows, tmp.data(), stream); raft::matrix::linewise_op(handle, make_device_matrix_view<const T, IdxT>(tmp.data(), n_rows, dim), make_device_matrix_view<T, IdxT>(tmp.data(), n_rows, dim), true, raft::sub_op{}, make_device_vector_view<const T, IdxT>(center, dim)); float alpha = 1.0; float beta = 0.0; linalg::gemm(handle, true, false, rot_dim, n_rows, dim, &alpha, rotation_matrix, dim, tmp.data(), dim, &beta, residuals, rot_dim, stream); } /** * @brief Compute residual vectors from the source dataset given by selected indices. * * The residual has the form * `rotation_matrix %* (dataset[:, :] - centers[labels[:], 0:dim])` * */ template <typename T, typename IdxT> void flat_compute_residuals( raft::resources const& handle, float* residuals, // [n_rows, rot_dim] IdxT n_rows, device_matrix_view<const float, uint32_t, row_major> rotation_matrix, // [rot_dim, dim] device_matrix_view<const float, uint32_t, row_major> centers, // [n_lists, dim_ext] const T* dataset, // [n_rows, dim] std::variant<uint32_t, const uint32_t*> labels, // [n_rows] rmm::mr::device_memory_resource* device_memory) { auto stream = resource::get_cuda_stream(handle); auto dim = rotation_matrix.extent(1); auto rot_dim = rotation_matrix.extent(0); rmm::device_uvector<float> tmp(n_rows * dim, stream, device_memory); auto tmp_view = raft::make_device_vector_view<float, IdxT>(tmp.data(), tmp.size()); linalg::map_offset(handle, tmp_view, [centers, dataset, labels, dim] __device__(size_t i) { auto row_ix = i / dim; auto el_ix = i % dim; auto label = std::holds_alternative<uint32_t>(labels) ? std::get<uint32_t>(labels) : std::get<const uint32_t*>(labels)[row_ix]; return utils::mapping<float>{}(dataset[i]) - centers(label, el_ix); }); float alpha = 1.0f; float beta = 0.0f; linalg::gemm(handle, true, false, rot_dim, n_rows, dim, &alpha, rotation_matrix.data_handle(), dim, tmp.data(), dim, &beta, residuals, rot_dim, stream); } template <uint32_t BlockDim, typename IdxT> __launch_bounds__(BlockDim) RAFT_KERNEL fill_indices_kernel(IdxT n_rows, IdxT* data_indices, IdxT* data_offsets, const uint32_t* labels) { const auto i = IdxT(BlockDim) * IdxT(blockIdx.x) + IdxT(threadIdx.x); if (i >= n_rows) { return; } data_indices[atomicAdd<IdxT>(data_offsets + labels[i], 1)] = i; } /** * @brief Calculate cluster offsets and arrange data indices into clusters. * * @param n_rows * @param n_lists * @param[in] labels output of k-means prediction [n_rows] * @param[in] cluster_sizes [n_lists] * @param[out] cluster_offsets [n_lists+1] * @param[out] data_indices [n_rows] * * @return size of the largest cluster */ template <typename IdxT> auto calculate_offsets_and_indices(IdxT n_rows, uint32_t n_lists, const uint32_t* labels, const uint32_t* cluster_sizes, IdxT* cluster_offsets, IdxT* data_indices, rmm::cuda_stream_view stream) -> uint32_t { auto exec_policy = rmm::exec_policy(stream); // Calculate the offsets IdxT cumsum = 0; update_device(cluster_offsets, &cumsum, 1, stream); thrust::inclusive_scan( exec_policy, cluster_sizes, cluster_sizes + n_lists, cluster_offsets + 1, add_op{}); update_host(&cumsum, cluster_offsets + n_lists, 1, stream); uint32_t max_cluster_size = *thrust::max_element(exec_policy, cluster_sizes, cluster_sizes + n_lists); stream.synchronize(); RAFT_EXPECTS(cumsum == n_rows, "cluster sizes do not add up."); RAFT_LOG_DEBUG("Max cluster size %d", max_cluster_size); rmm::device_uvector<IdxT> data_offsets_buf(n_lists, stream); auto data_offsets = data_offsets_buf.data(); copy(data_offsets, cluster_offsets, n_lists, stream); constexpr uint32_t n_threads = 128; // NOLINT const IdxT n_blocks = raft::div_rounding_up_unsafe(n_rows, n_threads); fill_indices_kernel<n_threads> <<<n_blocks, n_threads, 0, stream>>>(n_rows, data_indices, data_offsets, labels); return max_cluster_size; } template <typename IdxT> void set_centers(raft::resources const& handle, index<IdxT>* index, const float* cluster_centers) { auto stream = resource::get_cuda_stream(handle); auto* device_memory = resource::get_workspace_resource(handle); // combine cluster_centers and their norms RAFT_CUDA_TRY(cudaMemcpy2DAsync(index->centers().data_handle(), sizeof(float) * index->dim_ext(), cluster_centers, sizeof(float) * index->dim(), sizeof(float) * index->dim(), index->n_lists(), cudaMemcpyDefault, stream)); rmm::device_uvector<float> center_norms(index->n_lists(), stream, device_memory); raft::linalg::rowNorm(center_norms.data(), cluster_centers, index->dim(), index->n_lists(), raft::linalg::L2Norm, true, stream); RAFT_CUDA_TRY(cudaMemcpy2DAsync(index->centers().data_handle() + index->dim(), sizeof(float) * index->dim_ext(), center_norms.data(), sizeof(float), sizeof(float), index->n_lists(), cudaMemcpyDefault, stream)); // Rotate cluster_centers float alpha = 1.0; float beta = 0.0; linalg::gemm(handle, true, false, index->rot_dim(), index->n_lists(), index->dim(), &alpha, index->rotation_matrix().data_handle(), index->dim(), cluster_centers, index->dim(), &beta, index->centers_rot().data_handle(), index->rot_dim(), resource::get_cuda_stream(handle)); } template <typename IdxT> void transpose_pq_centers(const resources& handle, index<IdxT>& index, const float* pq_centers_source) { auto stream = resource::get_cuda_stream(handle); auto extents = index.pq_centers().extents(); static_assert(extents.rank() == 3); auto extents_source = make_extents<uint32_t>(extents.extent(0), extents.extent(2), extents.extent(1)); auto span_source = make_mdspan<const float, uint32_t, row_major, false, true>(pq_centers_source, extents_source); auto pq_centers_view = raft::make_device_vector_view<float, IdxT>( index.pq_centers().data_handle(), index.pq_centers().size()); linalg::map_offset(handle, pq_centers_view, [span_source, extents] __device__(size_t i) { uint32_t ii[3]; for (int r = 2; r > 0; r--) { ii[r] = i % extents.extent(r); i /= extents.extent(r); } ii[0] = i; return span_source(ii[0], ii[2], ii[1]); }); } template <typename IdxT> void train_per_subset(raft::resources const& handle, index<IdxT>& index, size_t n_rows, const float* trainset, // [n_rows, dim] const uint32_t* labels, // [n_rows] uint32_t kmeans_n_iters, rmm::mr::device_memory_resource* managed_memory) { auto stream = resource::get_cuda_stream(handle); auto device_memory = resource::get_workspace_resource(handle); rmm::device_uvector<float> pq_centers_tmp(index.pq_centers().size(), stream, device_memory); rmm::device_uvector<float> sub_trainset(n_rows * size_t(index.pq_len()), stream, device_memory); rmm::device_uvector<uint32_t> sub_labels(n_rows, stream, device_memory); rmm::device_uvector<uint32_t> pq_cluster_sizes(index.pq_book_size(), stream, device_memory); for (uint32_t j = 0; j < index.pq_dim(); j++) { common::nvtx::range<common::nvtx::domain::raft> pq_per_subspace_scope( "ivf_pq::build::per_subspace[%u]", j); // Get the rotated cluster centers for each training vector. // This will be subtracted from the input vectors afterwards. utils::copy_selected<float, float, size_t, uint32_t>( n_rows, index.pq_len(), index.centers_rot().data_handle() + index.pq_len() * j, labels, index.rot_dim(), sub_trainset.data(), index.pq_len(), stream); // sub_trainset is the slice of: rotate(trainset) - centers_rot float alpha = 1.0; float beta = -1.0; linalg::gemm(handle, true, false, index.pq_len(), n_rows, index.dim(), &alpha, index.rotation_matrix().data_handle() + index.dim() * index.pq_len() * j, index.dim(), trainset, index.dim(), &beta, sub_trainset.data(), index.pq_len(), stream); // train PQ codebook for this subspace auto sub_trainset_view = raft::make_device_matrix_view<const float, IdxT>(sub_trainset.data(), n_rows, index.pq_len()); auto centers_tmp_view = raft::make_device_matrix_view<float, IdxT>( pq_centers_tmp.data() + index.pq_book_size() * index.pq_len() * j, index.pq_book_size(), index.pq_len()); auto sub_labels_view = raft::make_device_vector_view<uint32_t, IdxT>(sub_labels.data(), n_rows); auto cluster_sizes_view = raft::make_device_vector_view<uint32_t, IdxT>(pq_cluster_sizes.data(), index.pq_book_size()); raft::cluster::kmeans_balanced_params kmeans_params; kmeans_params.n_iters = kmeans_n_iters; kmeans_params.metric = raft::distance::DistanceType::L2Expanded; raft::cluster::kmeans_balanced::helpers::build_clusters(handle, kmeans_params, sub_trainset_view, centers_tmp_view, sub_labels_view, cluster_sizes_view, utils::mapping<float>{}); } transpose_pq_centers(handle, index, pq_centers_tmp.data()); } template <typename IdxT> void train_per_cluster(raft::resources const& handle, index<IdxT>& index, size_t n_rows, const float* trainset, // [n_rows, dim] const uint32_t* labels, // [n_rows] uint32_t kmeans_n_iters, rmm::mr::device_memory_resource* managed_memory) { auto stream = resource::get_cuda_stream(handle); auto device_memory = resource::get_workspace_resource(handle); rmm::device_uvector<float> pq_centers_tmp(index.pq_centers().size(), stream, device_memory); rmm::device_uvector<uint32_t> cluster_sizes(index.n_lists(), stream, managed_memory); rmm::device_uvector<IdxT> indices_buf(n_rows, stream, device_memory); rmm::device_uvector<IdxT> offsets_buf(index.n_lists() + 1, stream, managed_memory); raft::stats::histogram<uint32_t, size_t>(raft::stats::HistTypeAuto, reinterpret_cast<int32_t*>(cluster_sizes.data()), index.n_lists(), labels, n_rows, 1, stream); auto cluster_offsets = offsets_buf.data(); auto indices = indices_buf.data(); uint32_t max_cluster_size = calculate_offsets_and_indices( IdxT(n_rows), index.n_lists(), labels, cluster_sizes.data(), cluster_offsets, indices, stream); rmm::device_uvector<uint32_t> pq_labels( size_t(max_cluster_size) * size_t(index.pq_dim()), stream, device_memory); rmm::device_uvector<uint32_t> pq_cluster_sizes(index.pq_book_size(), stream, device_memory); rmm::device_uvector<float> rot_vectors( size_t(max_cluster_size) * size_t(index.rot_dim()), stream, device_memory); resource::sync_stream(handle); // make sure cluster offsets are up-to-date for (uint32_t l = 0; l < index.n_lists(); l++) { auto cluster_size = cluster_sizes.data()[l]; if (cluster_size == 0) continue; common::nvtx::range<common::nvtx::domain::raft> pq_per_cluster_scope( "ivf_pq::build::per_cluster[%u](size = %u)", l, cluster_size); select_residuals(handle, rot_vectors.data(), IdxT(cluster_size), index.dim(), index.rot_dim(), index.rotation_matrix().data_handle(), index.centers().data_handle() + size_t(l) * size_t(index.dim_ext()), trainset, indices + cluster_offsets[l], device_memory); // limit the cluster size to bound the training time. // [sic] we interpret the data as pq_len-dimensional size_t big_enough = 256ul * std::max<size_t>(index.pq_book_size(), index.pq_dim()); size_t available_rows = size_t(cluster_size) * size_t(index.pq_dim()); auto pq_n_rows = uint32_t(std::min(big_enough, available_rows)); // train PQ codebook for this cluster auto rot_vectors_view = raft::make_device_matrix_view<const float, IdxT>( rot_vectors.data(), pq_n_rows, index.pq_len()); auto centers_tmp_view = raft::make_device_matrix_view<float, IdxT>( pq_centers_tmp.data() + static_cast<size_t>(index.pq_book_size()) * static_cast<size_t>(index.pq_len()) * static_cast<size_t>(l), index.pq_book_size(), index.pq_len()); auto pq_labels_view = raft::make_device_vector_view<uint32_t, IdxT>(pq_labels.data(), pq_n_rows); auto pq_cluster_sizes_view = raft::make_device_vector_view<uint32_t, IdxT>(pq_cluster_sizes.data(), index.pq_book_size()); raft::cluster::kmeans_balanced_params kmeans_params; kmeans_params.n_iters = kmeans_n_iters; kmeans_params.metric = raft::distance::DistanceType::L2Expanded; raft::cluster::kmeans_balanced::helpers::build_clusters(handle, kmeans_params, rot_vectors_view, centers_tmp_view, pq_labels_view, pq_cluster_sizes_view, utils::mapping<float>{}); } transpose_pq_centers(handle, index, pq_centers_tmp.data()); } /** * A helper function: given the dataset in the rotated space * [n_rows, rot_dim] = [n_rows, pq_dim * pq_len], * reinterpret the last dimension as two: [n_rows, pq_dim, pq_len] * * @tparam T * @tparam IdxT * * @param vectors input data [n_rows, rot_dim] * @param pq_centers codebook (used to infer the structure - pq_len) * @return reinterpreted vectors [n_rows, pq_dim, pq_len] */ template <typename T, typename IdxT> static __device__ auto reinterpret_vectors( device_matrix_view<T, IdxT, row_major> vectors, device_mdspan<const float, extent_3d<uint32_t>, row_major> pq_centers) -> device_mdspan<T, extent_3d<IdxT>, row_major> { const uint32_t pq_len = pq_centers.extent(1); const uint32_t pq_dim = vectors.extent(1) / pq_len; using layout_t = typename decltype(vectors)::layout_type; using accessor_t = typename decltype(vectors)::accessor_type; return mdspan<T, extent_3d<IdxT>, layout_t, accessor_t>( vectors.data_handle(), extent_3d<IdxT>{vectors.extent(0), pq_dim, pq_len}); } /** * A consumer for the `run_on_list` and `run_on_vector` that just flattens PQ codes * one-per-byte. That is, independent of the code width (pq_bits), one code uses * the whole byte, hence one vectors uses pq_dim bytes. */ struct unpack_codes { device_matrix_view<uint8_t, uint32_t, row_major> out_codes; /** * Create a callable to be passed to `run_on_list`. * * @param[out] out_codes the destination for the read codes. */ __device__ inline unpack_codes(device_matrix_view<uint8_t, uint32_t, row_major> out_codes) : out_codes{out_codes} { } /** Write j-th component (code) of the i-th vector into the output array. */ __device__ inline void operator()(uint8_t code, uint32_t i, uint32_t j) { out_codes(i, j) = code; } }; template <uint32_t BlockSize, uint32_t PqBits> __launch_bounds__(BlockSize) RAFT_KERNEL unpack_list_data_kernel( device_matrix_view<uint8_t, uint32_t, row_major> out_codes, device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> in_list_data, std::variant<uint32_t, const uint32_t*> offset_or_indices) { const uint32_t pq_dim = out_codes.extent(1); auto unpack_action = unpack_codes{out_codes}; run_on_list<PqBits>(in_list_data, offset_or_indices, out_codes.extent(0), pq_dim, unpack_action); } /** * Unpack flat PQ codes from an existing list by the given offset. * * @param[out] codes flat PQ codes, one code per byte [n_rows, pq_dim] * @param[in] list_data the packed ivf::list data. * @param[in] offset_or_indices how many records in the list to skip or the exact indices. * @param[in] pq_bits codebook size (1 << pq_bits) * @param[in] stream */ inline void unpack_list_data( device_matrix_view<uint8_t, uint32_t, row_major> codes, device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data, std::variant<uint32_t, const uint32_t*> offset_or_indices, uint32_t pq_bits, rmm::cuda_stream_view stream) { auto n_rows = codes.extent(0); if (n_rows == 0) { return; } constexpr uint32_t kBlockSize = 256; dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1); dim3 threads(kBlockSize, 1, 1); auto kernel = [pq_bits]() { switch (pq_bits) { case 4: return unpack_list_data_kernel<kBlockSize, 4>; case 5: return unpack_list_data_kernel<kBlockSize, 5>; case 6: return unpack_list_data_kernel<kBlockSize, 6>; case 7: return unpack_list_data_kernel<kBlockSize, 7>; case 8: return unpack_list_data_kernel<kBlockSize, 8>; default: RAFT_FAIL("Invalid pq_bits (%u), the value must be within [4, 8]", pq_bits); } }(); kernel<<<blocks, threads, 0, stream>>>(codes, list_data, offset_or_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** Unpack the list data; see the public interface for the api and usage. */ template <typename IdxT> void unpack_list_data(raft::resources const& res, const index<IdxT>& index, device_matrix_view<uint8_t, uint32_t, row_major> out_codes, uint32_t label, std::variant<uint32_t, const uint32_t*> offset_or_indices) { unpack_list_data(out_codes, index.lists()[label]->data.view(), offset_or_indices, index.pq_bits(), resource::get_cuda_stream(res)); } /** * A consumer for the `run_on_vector` that just flattens PQ codes * into a tightly packed matrix. That is, the codes are not expanded to one code-per-byte. */ template <uint32_t PqBits> struct unpack_contiguous { uint8_t* codes; uint32_t code_size; /** * Create a callable to be passed to `run_on_vector`. * * @param[in] codes flat compressed PQ codes */ __host__ __device__ inline unpack_contiguous(uint8_t* codes, uint32_t pq_dim) : codes{codes}, code_size{raft::ceildiv<uint32_t>(pq_dim * PqBits, 8)} { } /** Write j-th component (code) of the i-th vector into the output array. */ __host__ __device__ inline void operator()(uint8_t code, uint32_t i, uint32_t j) { bitfield_view_t<PqBits> code_view{codes + i * code_size}; code_view[j] = code; } }; template <uint32_t BlockSize, uint32_t PqBits> __launch_bounds__(BlockSize) RAFT_KERNEL unpack_contiguous_list_data_kernel( uint8_t* out_codes, device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> in_list_data, uint32_t n_rows, uint32_t pq_dim, std::variant<uint32_t, const uint32_t*> offset_or_indices) { run_on_list<PqBits>( in_list_data, offset_or_indices, n_rows, pq_dim, unpack_contiguous<PqBits>(out_codes, pq_dim)); } /** * Unpack flat PQ codes from an existing list by the given offset. * * @param[out] codes flat compressed PQ codes [n_rows, ceildiv(pq_dim * pq_bits, 8)] * @param[in] list_data the packed ivf::list data. * @param[in] offset_or_indices how many records in the list to skip or the exact indices. * @param[in] pq_bits codebook size (1 << pq_bits) * @param[in] stream */ inline void unpack_contiguous_list_data( uint8_t* codes, device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data, uint32_t n_rows, uint32_t pq_dim, std::variant<uint32_t, const uint32_t*> offset_or_indices, uint32_t pq_bits, rmm::cuda_stream_view stream) { if (n_rows == 0) { return; } constexpr uint32_t kBlockSize = 256; dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1); dim3 threads(kBlockSize, 1, 1); auto kernel = [pq_bits]() { switch (pq_bits) { case 4: return unpack_contiguous_list_data_kernel<kBlockSize, 4>; case 5: return unpack_contiguous_list_data_kernel<kBlockSize, 5>; case 6: return unpack_contiguous_list_data_kernel<kBlockSize, 6>; case 7: return unpack_contiguous_list_data_kernel<kBlockSize, 7>; case 8: return unpack_contiguous_list_data_kernel<kBlockSize, 8>; default: RAFT_FAIL("Invalid pq_bits (%u), the value must be within [4, 8]", pq_bits); } }(); kernel<<<blocks, threads, 0, stream>>>(codes, list_data, n_rows, pq_dim, offset_or_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** Unpack the list data; see the public interface for the api and usage. */ template <typename IdxT> void unpack_contiguous_list_data(raft::resources const& res, const index<IdxT>& index, uint8_t* out_codes, uint32_t n_rows, uint32_t label, std::variant<uint32_t, const uint32_t*> offset_or_indices) { unpack_contiguous_list_data(out_codes, index.lists()[label]->data.view(), n_rows, index.pq_dim(), offset_or_indices, index.pq_bits(), resource::get_cuda_stream(res)); } /** A consumer for the `run_on_list` and `run_on_vector` that approximates the original input data. */ struct reconstruct_vectors { codebook_gen codebook_kind; uint32_t cluster_ix; uint32_t pq_len; device_mdspan<const float, extent_3d<uint32_t>, row_major> pq_centers; device_mdspan<const float, extent_3d<uint32_t>, row_major> centers_rot; device_mdspan<float, extent_3d<uint32_t>, row_major> out_vectors; /** * Create a callable to be passed to `run_on_list`. * * @param[out] out_vectors the destination for the decoded vectors. * @param[in] pq_centers the codebook * @param[in] centers_rot * @param[in] codebook_kind * @param[in] cluster_ix label/id of the cluster. */ __device__ inline reconstruct_vectors( device_matrix_view<float, uint32_t, row_major> out_vectors, device_mdspan<const float, extent_3d<uint32_t>, row_major> pq_centers, device_matrix_view<const float, uint32_t, row_major> centers_rot, codebook_gen codebook_kind, uint32_t cluster_ix) : codebook_kind{codebook_kind}, cluster_ix{cluster_ix}, pq_len{pq_centers.extent(1)}, pq_centers{pq_centers}, centers_rot{reinterpret_vectors(centers_rot, pq_centers)}, out_vectors{reinterpret_vectors(out_vectors, pq_centers)} { } /** * Decode j-th component of the i-th vector by its code and write it into a chunk of the output * vectors (pq_len elements). */ __device__ inline void operator()(uint8_t code, uint32_t i, uint32_t j) { uint32_t partition_ix; switch (codebook_kind) { case codebook_gen::PER_CLUSTER: { partition_ix = cluster_ix; } break; case codebook_gen::PER_SUBSPACE: { partition_ix = j; } break; default: __builtin_unreachable(); } for (uint32_t k = 0; k < pq_len; k++) { out_vectors(i, j, k) = pq_centers(partition_ix, k, code) + centers_rot(cluster_ix, j, k); } } }; template <uint32_t BlockSize, uint32_t PqBits> __launch_bounds__(BlockSize) RAFT_KERNEL reconstruct_list_data_kernel( device_matrix_view<float, uint32_t, row_major> out_vectors, device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> in_list_data, device_mdspan<const float, extent_3d<uint32_t>, row_major> pq_centers, device_matrix_view<const float, uint32_t, row_major> centers_rot, codebook_gen codebook_kind, uint32_t cluster_ix, std::variant<uint32_t, const uint32_t*> offset_or_indices) { const uint32_t pq_dim = out_vectors.extent(1) / pq_centers.extent(1); auto reconstruct_action = reconstruct_vectors{out_vectors, pq_centers, centers_rot, codebook_kind, cluster_ix}; run_on_list<PqBits>( in_list_data, offset_or_indices, out_vectors.extent(0), pq_dim, reconstruct_action); } /** Decode the list data; see the public interface for the api and usage. */ template <typename T, typename IdxT> void reconstruct_list_data(raft::resources const& res, const index<IdxT>& index, device_matrix_view<T, uint32_t, row_major> out_vectors, uint32_t label, std::variant<uint32_t, const uint32_t*> offset_or_indices) { auto n_rows = out_vectors.extent(0); if (n_rows == 0) { return; } auto& list = index.lists()[label]; if (std::holds_alternative<uint32_t>(offset_or_indices)) { auto n_skip = std::get<uint32_t>(offset_or_indices); // sic! I'm using the upper bound `list.size` instead of exact `list_sizes(label)` // to avoid an extra device-host data copy and the stream sync. RAFT_EXPECTS(n_skip + n_rows <= list->size.load(), "offset + output size must be not bigger than the cluster size."); } auto tmp = make_device_mdarray<float>( res, resource::get_workspace_resource(res), make_extents<uint32_t>(n_rows, index.rot_dim())); constexpr uint32_t kBlockSize = 256; dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1); dim3 threads(kBlockSize, 1, 1); auto kernel = [](uint32_t pq_bits) { switch (pq_bits) { case 4: return reconstruct_list_data_kernel<kBlockSize, 4>; case 5: return reconstruct_list_data_kernel<kBlockSize, 5>; case 6: return reconstruct_list_data_kernel<kBlockSize, 6>; case 7: return reconstruct_list_data_kernel<kBlockSize, 7>; case 8: return reconstruct_list_data_kernel<kBlockSize, 8>; default: RAFT_FAIL("Invalid pq_bits (%u), the value must be within [4, 8]", pq_bits); } }(index.pq_bits()); kernel<<<blocks, threads, 0, resource::get_cuda_stream(res)>>>(tmp.view(), list->data.view(), index.pq_centers(), index.centers_rot(), index.codebook_kind(), label, offset_or_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); float* out_float_ptr = nullptr; rmm::device_uvector<float> out_float_buf( 0, resource::get_cuda_stream(res), resource::get_workspace_resource(res)); if constexpr (std::is_same_v<T, float>) { out_float_ptr = out_vectors.data_handle(); } else { out_float_buf.resize(size_t{n_rows} * size_t{index.dim()}, resource::get_cuda_stream(res)); out_float_ptr = out_float_buf.data(); } // Rotate the results back to the original space float alpha = 1.0; float beta = 0.0; linalg::gemm(res, false, false, index.dim(), n_rows, index.rot_dim(), &alpha, index.rotation_matrix().data_handle(), index.dim(), tmp.data_handle(), index.rot_dim(), &beta, out_float_ptr, index.dim(), resource::get_cuda_stream(res)); // Transform the data to the original type, if necessary if constexpr (!std::is_same_v<T, float>) { linalg::map(res, out_vectors, utils::mapping<T>{}, make_device_matrix_view<const float>(out_float_ptr, n_rows, index.dim())); } } /** * A producer for the `write_list` and `write_vector` reads the codes byte-by-byte. That is, * independent of the code width (pq_bits), one code uses the whole byte, hence one vectors uses * pq_dim bytes. */ struct pass_codes { device_matrix_view<const uint8_t, uint32_t, row_major> codes; /** * Create a callable to be passed to `run_on_list`. * * @param[in] codes the source codes. */ __device__ inline pass_codes(device_matrix_view<const uint8_t, uint32_t, row_major> codes) : codes{codes} { } /** Read j-th component (code) of the i-th vector from the source. */ __device__ inline auto operator()(uint32_t i, uint32_t j) const -> uint8_t { return codes(i, j); } }; template <uint32_t BlockSize, uint32_t PqBits> __launch_bounds__(BlockSize) RAFT_KERNEL pack_list_data_kernel( device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data, device_matrix_view<const uint8_t, uint32_t, row_major> codes, std::variant<uint32_t, const uint32_t*> offset_or_indices) { write_list<PqBits, 1>( list_data, offset_or_indices, codes.extent(0), codes.extent(1), pass_codes{codes}); } /** * Write flat PQ codes into an existing list by the given offset. * * NB: no memory allocation happens here; the list must fit the data (offset + n_rows). * * @param[out] list_data the packed ivf::list data. * @param[in] codes flat PQ codes, one code per byte [n_rows, pq_dim] * @param[in] offset_or_indices how many records in the list to skip or the exact indices. * @param[in] pq_bits codebook size (1 << pq_bits) * @param[in] stream */ inline void pack_list_data( device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data, device_matrix_view<const uint8_t, uint32_t, row_major> codes, std::variant<uint32_t, const uint32_t*> offset_or_indices, uint32_t pq_bits, rmm::cuda_stream_view stream) { auto n_rows = codes.extent(0); if (n_rows == 0) { return; } constexpr uint32_t kBlockSize = 256; dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1); dim3 threads(kBlockSize, 1, 1); auto kernel = [pq_bits]() { switch (pq_bits) { case 4: return pack_list_data_kernel<kBlockSize, 4>; case 5: return pack_list_data_kernel<kBlockSize, 5>; case 6: return pack_list_data_kernel<kBlockSize, 6>; case 7: return pack_list_data_kernel<kBlockSize, 7>; case 8: return pack_list_data_kernel<kBlockSize, 8>; default: RAFT_FAIL("Invalid pq_bits (%u), the value must be within [4, 8]", pq_bits); } }(); kernel<<<blocks, threads, 0, stream>>>(list_data, codes, offset_or_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <typename IdxT> void pack_list_data(raft::resources const& res, index<IdxT>* index, device_matrix_view<const uint8_t, uint32_t, row_major> new_codes, uint32_t label, std::variant<uint32_t, const uint32_t*> offset_or_indices) { pack_list_data(index->lists()[label]->data.view(), new_codes, offset_or_indices, index->pq_bits(), resource::get_cuda_stream(res)); } /** * A producer for the `write_vector` reads tightly packed flat codes. That is, * the codes are not expanded to one code-per-byte. */ template <uint32_t PqBits> struct pack_contiguous { const uint8_t* codes; uint32_t code_size; /** * Create a callable to be passed to `write_vector`. * * @param[in] codes flat compressed PQ codes */ __host__ __device__ inline pack_contiguous(const uint8_t* codes, uint32_t pq_dim) : codes{codes}, code_size{raft::ceildiv<uint32_t>(pq_dim * PqBits, 8)} { } /** Read j-th component (code) of the i-th vector from the source. */ __host__ __device__ inline auto operator()(uint32_t i, uint32_t j) -> uint8_t { bitfield_view_t<PqBits> code_view{const_cast<uint8_t*>(codes + i * code_size)}; return uint8_t(code_view[j]); } }; template <uint32_t BlockSize, uint32_t PqBits> __launch_bounds__(BlockSize) RAFT_KERNEL pack_contiguous_list_data_kernel( device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data, const uint8_t* codes, uint32_t n_rows, uint32_t pq_dim, std::variant<uint32_t, const uint32_t*> offset_or_indices) { write_list<PqBits, 1>( list_data, offset_or_indices, n_rows, pq_dim, pack_contiguous<PqBits>(codes, pq_dim)); } /** * Write flat PQ codes into an existing list by the given offset. * * NB: no memory allocation happens here; the list must fit the data (offset + n_rows). * * @param[out] list_data the packed ivf::list data. * @param[in] codes flat compressed PQ codes [n_rows, ceildiv(pq_dim * pq_bits, 8)] * @param[in] offset_or_indices how many records in the list to skip or the exact indices. * @param[in] pq_bits codebook size (1 << pq_bits) * @param[in] stream */ inline void pack_contiguous_list_data( device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data, const uint8_t* codes, uint32_t n_rows, uint32_t pq_dim, std::variant<uint32_t, const uint32_t*> offset_or_indices, uint32_t pq_bits, rmm::cuda_stream_view stream) { if (n_rows == 0) { return; } constexpr uint32_t kBlockSize = 256; dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize), 1, 1); dim3 threads(kBlockSize, 1, 1); auto kernel = [pq_bits]() { switch (pq_bits) { case 4: return pack_contiguous_list_data_kernel<kBlockSize, 4>; case 5: return pack_contiguous_list_data_kernel<kBlockSize, 5>; case 6: return pack_contiguous_list_data_kernel<kBlockSize, 6>; case 7: return pack_contiguous_list_data_kernel<kBlockSize, 7>; case 8: return pack_contiguous_list_data_kernel<kBlockSize, 8>; default: RAFT_FAIL("Invalid pq_bits (%u), the value must be within [4, 8]", pq_bits); } }(); kernel<<<blocks, threads, 0, stream>>>(list_data, codes, n_rows, pq_dim, offset_or_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <typename IdxT> void pack_contiguous_list_data(raft::resources const& res, index<IdxT>* index, const uint8_t* new_codes, uint32_t n_rows, uint32_t label, std::variant<uint32_t, const uint32_t*> offset_or_indices) { pack_contiguous_list_data(index->lists()[label]->data.view(), new_codes, n_rows, index->pq_dim(), offset_or_indices, index->pq_bits(), resource::get_cuda_stream(res)); } /** * * A producer for the `write_list` and `write_vector` that encodes level-1 input vector residuals * into lvl-2 PQ codes. * Computing a PQ code means finding the closest cluster in a pq_dim-subspace. * * @tparam SubWarpSize * how many threads work on a single vector; * bounded by either WarpSize or pq_book_size. * * @param pq_centers * - codebook_gen::PER_SUBSPACE: [pq_dim , pq_len, pq_book_size] * - codebook_gen::PER_CLUSTER: [n_lists, pq_len, pq_book_size] * @param new_vector a single input of length rot_dim, reinterpreted as [pq_dim, pq_len]. * the input must be already transformed to floats, rotated, and the level 1 cluster * center must be already substructed (i.e. this is the residual of a single input vector). * @param codebook_kind * @param j index along pq_dim "dimension" * @param cluster_ix is used for PER_CLUSTER codebooks. */ /** */ template <uint32_t SubWarpSize, typename IdxT> struct encode_vectors { codebook_gen codebook_kind; uint32_t cluster_ix; device_mdspan<const float, extent_3d<uint32_t>, row_major> pq_centers; device_mdspan<const float, extent_3d<IdxT>, row_major> in_vectors; __device__ inline encode_vectors( device_mdspan<const float, extent_3d<uint32_t>, row_major> pq_centers, device_matrix_view<const float, IdxT, row_major> in_vectors, codebook_gen codebook_kind, uint32_t cluster_ix) : codebook_kind{codebook_kind}, cluster_ix{cluster_ix}, pq_centers{pq_centers}, in_vectors{reinterpret_vectors(in_vectors, pq_centers)} { } /** * Decode j-th component of the i-th vector by its code and write it into a chunk of the output * vectors (pq_len elements). */ __device__ inline auto operator()(IdxT i, uint32_t j) -> uint8_t { uint32_t lane_id = Pow2<SubWarpSize>::mod(laneId()); uint32_t partition_ix; switch (codebook_kind) { case codebook_gen::PER_CLUSTER: { partition_ix = cluster_ix; } break; case codebook_gen::PER_SUBSPACE: { partition_ix = j; } break; default: __builtin_unreachable(); } const uint32_t pq_book_size = pq_centers.extent(2); const uint32_t pq_len = pq_centers.extent(1); float min_dist = std::numeric_limits<float>::infinity(); uint8_t code = 0; // calculate the distance for each PQ cluster, find the minimum for each thread for (uint32_t l = lane_id; l < pq_book_size; l += SubWarpSize) { // NB: the L2 quantifiers on residuals are always trained on L2 metric. float d = 0.0f; for (uint32_t k = 0; k < pq_len; k++) { auto t = in_vectors(i, j, k) - pq_centers(partition_ix, k, l); d += t * t; } if (d < min_dist) { min_dist = d; code = uint8_t(l); } } // reduce among threads #pragma unroll for (uint32_t stride = SubWarpSize >> 1; stride > 0; stride >>= 1) { const auto other_dist = shfl_xor(min_dist, stride, SubWarpSize); const auto other_code = shfl_xor(code, stride, SubWarpSize); if (other_dist < min_dist) { min_dist = other_dist; code = other_code; } } return code; } }; template <uint32_t BlockSize, uint32_t PqBits, typename IdxT> __launch_bounds__(BlockSize) RAFT_KERNEL process_and_fill_codes_kernel( device_matrix_view<const float, IdxT, row_major> new_vectors, std::variant<IdxT, const IdxT*> src_offset_or_indices, const uint32_t* new_labels, device_vector_view<uint32_t, uint32_t, row_major> list_sizes, device_vector_view<IdxT*, uint32_t, row_major> inds_ptrs, device_vector_view<uint8_t*, uint32_t, row_major> data_ptrs, device_mdspan<const float, extent_3d<uint32_t>, row_major> pq_centers, codebook_gen codebook_kind) { constexpr uint32_t kSubWarpSize = std::min<uint32_t>(WarpSize, 1u << PqBits); using subwarp_align = Pow2<kSubWarpSize>; const uint32_t lane_id = subwarp_align::mod(threadIdx.x); const IdxT row_ix = subwarp_align::div(IdxT{threadIdx.x} + IdxT{BlockSize} * IdxT{blockIdx.x}); if (row_ix >= new_vectors.extent(0)) { return; } const uint32_t cluster_ix = new_labels[row_ix]; uint32_t out_ix; if (lane_id == 0) { out_ix = atomicAdd(&list_sizes(cluster_ix), 1); } out_ix = shfl(out_ix, 0, kSubWarpSize); // write the label (one record per subwarp) auto pq_indices = inds_ptrs(cluster_ix); if (lane_id == 0) { if (std::holds_alternative<IdxT>(src_offset_or_indices)) { pq_indices[out_ix] = std::get<IdxT>(src_offset_or_indices) + row_ix; } else { pq_indices[out_ix] = std::get<const IdxT*>(src_offset_or_indices)[row_ix]; } } // write the codes (one record per subwarp): const uint32_t pq_dim = new_vectors.extent(1) / pq_centers.extent(1); auto pq_extents = list_spec<uint32_t, IdxT>{PqBits, pq_dim, true}.make_list_extents(out_ix + 1); auto pq_dataset = make_mdspan<uint8_t, uint32_t, row_major, false, true>(data_ptrs[cluster_ix], pq_extents); write_vector<PqBits, kSubWarpSize>( pq_dataset, out_ix, row_ix, pq_dim, encode_vectors<kSubWarpSize, IdxT>{pq_centers, new_vectors, codebook_kind, cluster_ix}); } template <uint32_t BlockSize, uint32_t PqBits> __launch_bounds__(BlockSize) RAFT_KERNEL encode_list_data_kernel( device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> list_data, device_matrix_view<const float, uint32_t, row_major> new_vectors, device_mdspan<const float, extent_3d<uint32_t>, row_major> pq_centers, codebook_gen codebook_kind, uint32_t cluster_ix, std::variant<uint32_t, const uint32_t*> offset_or_indices) { constexpr uint32_t kSubWarpSize = std::min<uint32_t>(WarpSize, 1u << PqBits); const uint32_t pq_dim = new_vectors.extent(1) / pq_centers.extent(1); auto encode_action = encode_vectors<kSubWarpSize, uint32_t>{pq_centers, new_vectors, codebook_kind, cluster_ix}; write_list<PqBits, kSubWarpSize>( list_data, offset_or_indices, new_vectors.extent(0), pq_dim, encode_action); } template <typename T, typename IdxT> void encode_list_data(raft::resources const& res, index<IdxT>* index, device_matrix_view<const T, uint32_t, row_major> new_vectors, uint32_t label, std::variant<uint32_t, const uint32_t*> offset_or_indices) { auto n_rows = new_vectors.extent(0); if (n_rows == 0) { return; } auto mr = resource::get_workspace_resource(res); auto new_vectors_residual = make_device_mdarray<float>(res, mr, make_extents<uint32_t>(n_rows, index->rot_dim())); flat_compute_residuals<T, uint32_t>(res, new_vectors_residual.data_handle(), n_rows, index->rotation_matrix(), index->centers(), new_vectors.data_handle(), label, mr); constexpr uint32_t kBlockSize = 256; const uint32_t threads_per_vec = std::min<uint32_t>(WarpSize, index->pq_book_size()); dim3 blocks(div_rounding_up_safe<uint32_t>(n_rows, kBlockSize / threads_per_vec), 1, 1); dim3 threads(kBlockSize, 1, 1); auto kernel = [](uint32_t pq_bits) { switch (pq_bits) { case 4: return encode_list_data_kernel<kBlockSize, 4>; case 5: return encode_list_data_kernel<kBlockSize, 5>; case 6: return encode_list_data_kernel<kBlockSize, 6>; case 7: return encode_list_data_kernel<kBlockSize, 7>; case 8: return encode_list_data_kernel<kBlockSize, 8>; default: RAFT_FAIL("Invalid pq_bits (%u), the value must be within [4, 8]", pq_bits); } }(index->pq_bits()); kernel<<<blocks, threads, 0, resource::get_cuda_stream(res)>>>(index->lists()[label]->data.view(), new_vectors_residual.view(), index->pq_centers(), index->codebook_kind(), label, offset_or_indices); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** * Assuming the index already has some data and allocated the space for more, write more data in it. * There must be enough free space in `pq_dataset()` and `indices()`, as computed using * `list_offsets()` and `list_sizes()`. * * NB: Since the pq_dataset is stored in the interleaved blocked format (see ivf_pq_types.hpp), one * cannot just concatenate the old and the new codes; the positions for the codes are determined the * same way as in the ivfpq_compute_similarity_kernel (see ivf_pq_search.cuh). * * @tparam T * @tparam IdxT * * @param handle * @param index * @param[in] new_vectors * a pointer to a row-major device array [index.dim(), n_rows]; * @param[in] src_offset_or_indices * references for the new data: * either a starting index for the auto-indexing * or a pointer to a device array of explicit indices [n_rows]; * @param[in] new_labels * cluster ids (first-level quantization) - a device array [n_rows]; * @param n_rows * the number of records to write in. * @param mr * a memory resource to use for device allocations */ template <typename T, typename IdxT> void process_and_fill_codes(raft::resources const& handle, index<IdxT>& index, const T* new_vectors, std::variant<IdxT, const IdxT*> src_offset_or_indices, const uint32_t* new_labels, IdxT n_rows, rmm::mr::device_memory_resource* mr) { auto new_vectors_residual = make_device_mdarray<float>(handle, mr, make_extents<IdxT>(n_rows, index.rot_dim())); flat_compute_residuals<T, IdxT>(handle, new_vectors_residual.data_handle(), n_rows, index.rotation_matrix(), index.centers(), new_vectors, new_labels, mr); constexpr uint32_t kBlockSize = 256; const uint32_t threads_per_vec = std::min<uint32_t>(WarpSize, index.pq_book_size()); dim3 blocks(div_rounding_up_safe<IdxT>(n_rows, kBlockSize / threads_per_vec), 1, 1); dim3 threads(kBlockSize, 1, 1); auto kernel = [](uint32_t pq_bits) { switch (pq_bits) { case 4: return process_and_fill_codes_kernel<kBlockSize, 4, IdxT>; case 5: return process_and_fill_codes_kernel<kBlockSize, 5, IdxT>; case 6: return process_and_fill_codes_kernel<kBlockSize, 6, IdxT>; case 7: return process_and_fill_codes_kernel<kBlockSize, 7, IdxT>; case 8: return process_and_fill_codes_kernel<kBlockSize, 8, IdxT>; default: RAFT_FAIL("Invalid pq_bits (%u), the value must be within [4, 8]", pq_bits); } }(index.pq_bits()); kernel<<<blocks, threads, 0, resource::get_cuda_stream(handle)>>>(new_vectors_residual.view(), src_offset_or_indices, new_labels, index.list_sizes(), index.inds_ptrs(), index.data_ptrs(), index.pq_centers(), index.codebook_kind()); RAFT_CUDA_TRY(cudaPeekAtLastError()); } /** Update the state of the dependent index members. */ template <typename IdxT> void recompute_internal_state(const raft::resources& res, index<IdxT>& index) { auto stream = resource::get_cuda_stream(res); auto tmp_res = resource::get_workspace_resource(res); rmm::device_uvector<uint32_t> sorted_sizes(index.n_lists(), stream, tmp_res); // Actualize the list pointers auto data_ptrs = index.data_ptrs(); auto inds_ptrs = index.inds_ptrs(); for (uint32_t label = 0; label < index.n_lists(); label++) { auto& list = index.lists()[label]; const auto data_ptr = list ? list->data.data_handle() : nullptr; const auto inds_ptr = list ? list->indices.data_handle() : nullptr; copy(&data_ptrs(label), &data_ptr, 1, stream); copy(&inds_ptrs(label), &inds_ptr, 1, stream); } // Sort the cluster sizes in the descending order. int begin_bit = 0; int end_bit = sizeof(uint32_t) * 8; size_t cub_workspace_size = 0; cub::DeviceRadixSort::SortKeysDescending(nullptr, cub_workspace_size, index.list_sizes().data_handle(), sorted_sizes.data(), index.n_lists(), begin_bit, end_bit, stream); rmm::device_buffer cub_workspace(cub_workspace_size, stream, tmp_res); cub::DeviceRadixSort::SortKeysDescending(cub_workspace.data(), cub_workspace_size, index.list_sizes().data_handle(), sorted_sizes.data(), index.n_lists(), begin_bit, end_bit, stream); // copy the results to CPU std::vector<uint32_t> sorted_sizes_host(index.n_lists()); copy(sorted_sizes_host.data(), sorted_sizes.data(), index.n_lists(), stream); resource::sync_stream(res); // accumulate the sorted cluster sizes auto accum_sorted_sizes = index.accum_sorted_sizes(); accum_sorted_sizes(0) = 0; for (uint32_t label = 0; label < sorted_sizes_host.size(); label++) { accum_sorted_sizes(label + 1) = accum_sorted_sizes(label) + sorted_sizes_host[label]; } } /** * Helper function: allocate enough space in the list, compute the offset, at which to start * writing, and fill-in indices. * * @return offset for writing the data */ template <typename IdxT> auto extend_list_prepare(raft::resources const& res, index<IdxT>* index, device_vector_view<const IdxT, uint32_t, row_major> new_indices, uint32_t label) -> uint32_t { uint32_t n_rows = new_indices.extent(0); uint32_t offset; // Allocate the lists to fit the new data copy(&offset, index->list_sizes().data_handle() + label, 1, resource::get_cuda_stream(res)); resource::sync_stream(res); uint32_t new_size = offset + n_rows; copy(index->list_sizes().data_handle() + label, &new_size, 1, resource::get_cuda_stream(res)); auto spec = list_spec<uint32_t, IdxT>{ index->pq_bits(), index->pq_dim(), index->conservative_memory_allocation()}; auto& list = index->lists()[label]; ivf::resize_list(res, list, spec, new_size, offset); copy(list->indices.data_handle() + offset, new_indices.data_handle(), n_rows, resource::get_cuda_stream(res)); return offset; } /** * Extend one list of the index in-place, by the list label, skipping the classification and * encoding steps. * See the public interface for the api and usage. */ template <typename IdxT> void extend_list_with_codes(raft::resources const& res, index<IdxT>* index, device_matrix_view<const uint8_t, uint32_t, row_major> new_codes, device_vector_view<const IdxT, uint32_t, row_major> new_indices, uint32_t label) { // Allocate memory and write indices auto offset = extend_list_prepare(res, index, new_indices, label); // Pack the data pack_list_data<IdxT>(res, index, new_codes, label, offset); // Update the pointers and the sizes recompute_internal_state(res, *index); } /** * Extend one list of the index in-place, by the list label, skipping the classification step. * See the public interface for the api and usage. */ template <typename T, typename IdxT> void extend_list(raft::resources const& res, index<IdxT>* index, device_matrix_view<const T, uint32_t, row_major> new_vectors, device_vector_view<const IdxT, uint32_t, row_major> new_indices, uint32_t label) { // Allocate memory and write indices auto offset = extend_list_prepare(res, index, new_indices, label); // Encode the data encode_list_data<T, IdxT>(res, index, new_vectors, label, offset); // Update the pointers and the sizes recompute_internal_state(res, *index); } /** * Remove all data from a single list. * See the public interface for the api and usage. */ template <typename IdxT> void erase_list(raft::resources const& res, index<IdxT>* index, uint32_t label) { uint32_t zero = 0; copy(index->list_sizes().data_handle() + label, &zero, 1, resource::get_cuda_stream(res)); index->lists()[label].reset(); recompute_internal_state(res, *index); } /** Copy the state of an index into a new index, but share the list data among the two. */ template <typename IdxT> auto clone(const raft::resources& res, const index<IdxT>& source) -> index<IdxT> { auto stream = resource::get_cuda_stream(res); // Allocate the new index index<IdxT> target(res, source.metric(), source.codebook_kind(), source.n_lists(), source.dim(), source.pq_bits(), source.pq_dim()); // Copy the independent parts copy(target.list_sizes().data_handle(), source.list_sizes().data_handle(), source.list_sizes().size(), stream); copy(target.rotation_matrix().data_handle(), source.rotation_matrix().data_handle(), source.rotation_matrix().size(), stream); copy(target.pq_centers().data_handle(), source.pq_centers().data_handle(), source.pq_centers().size(), stream); copy(target.centers().data_handle(), source.centers().data_handle(), source.centers().size(), stream); copy(target.centers_rot().data_handle(), source.centers_rot().data_handle(), source.centers_rot().size(), stream); // Copy shared pointers target.lists() = source.lists(); // Make sure the device pointers point to the new lists recompute_internal_state(res, target); return target; } /** * Extend the index in-place. * See raft::spatial::knn::ivf_pq::extend docs. */ template <typename T, typename IdxT> void extend(raft::resources const& handle, index<IdxT>* index, const T* new_vectors, const IdxT* new_indices, IdxT n_rows) { common::nvtx::range<common::nvtx::domain::raft> fun_scope( "ivf_pq::extend(%zu, %u)", size_t(n_rows), index->dim()); resource::detail::warn_non_pool_workspace(handle, "raft::ivf_pq::extend"); auto stream = resource::get_cuda_stream(handle); const auto n_clusters = index->n_lists(); RAFT_EXPECTS(new_indices != nullptr || index->size() == 0, "You must pass data indices when the index is non-empty."); static_assert(std::is_same_v<T, float> || std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t>, "Unsupported data type"); rmm::mr::device_memory_resource* device_memory = nullptr; auto pool_guard = raft::get_pool_memory_resource(device_memory, 1024 * 1024); if (pool_guard) { RAFT_LOG_DEBUG("ivf_pq::extend: using pool memory resource"); } rmm::mr::managed_memory_resource managed_memory_upstream; rmm::mr::pool_memory_resource<rmm::mr::managed_memory_resource> managed_memory( &managed_memory_upstream, 1024 * 1024); // The spec defines how the clusters look like auto spec = list_spec<uint32_t, IdxT>{ index->pq_bits(), index->pq_dim(), index->conservative_memory_allocation()}; // Try to allocate an index with the same parameters and the projected new size // (which can be slightly larger than index->size() + n_rows, due to padding). // If this fails, the index would be too big to fit in the device anyway. std::optional<list_data<IdxT, size_t>> placeholder_list( std::in_place_t{}, handle, list_spec<size_t, IdxT>{spec}, n_rows + (kIndexGroupSize - 1) * std::min<IdxT>(n_clusters, n_rows)); // Available device memory size_t free_mem, total_mem; RAFT_CUDA_TRY(cudaMemGetInfo(&free_mem, &total_mem)); // Decide on an approximate threshold when we'd better start saving device memory by using // managed allocations for large device buffers rmm::mr::device_memory_resource* labels_mr = device_memory; rmm::mr::device_memory_resource* batches_mr = device_memory; if (n_rows * (index->dim() * sizeof(T) + index->pq_dim() + sizeof(IdxT) + sizeof(uint32_t)) > free_mem) { labels_mr = &managed_memory; } // Allocate a buffer for the new labels (classifying the new data) rmm::device_uvector<uint32_t> new_data_labels(n_rows, stream, labels_mr); if (labels_mr == device_memory) { free_mem -= sizeof(uint32_t) * n_rows; } // Calculate the batch size for the input data if it's not accessible directly from the device constexpr size_t kReasonableMaxBatchSize = 65536; size_t max_batch_size = std::min<size_t>(n_rows, kReasonableMaxBatchSize); { size_t size_factor = 0; // we'll use two temporary buffers for converted inputs when computing the codes. size_factor += (index->dim() + index->rot_dim()) * sizeof(float); // ...and another buffer for indices size_factor += sizeof(IdxT); // if the input data is not accessible on device, we'd need a buffer for it. switch (utils::check_pointer_residency(new_vectors)) { case utils::pointer_residency::device_only: case utils::pointer_residency::host_and_device: break; default: size_factor += index->dim() * sizeof(T); } // the same with indices if (new_indices != nullptr) { switch (utils::check_pointer_residency(new_indices)) { case utils::pointer_residency::device_only: case utils::pointer_residency::host_and_device: break; default: size_factor += sizeof(IdxT); } } // make the batch size fit into the remaining memory while (size_factor * max_batch_size > free_mem && max_batch_size > 128) { max_batch_size >>= 1; } if (size_factor * max_batch_size > free_mem) { // if that still doesn't fit, resort to the UVM batches_mr = &managed_memory; max_batch_size = kReasonableMaxBatchSize; } else { // If we're keeping the batches in device memory, update the available mem tracker. free_mem -= size_factor * max_batch_size; } } // Predict the cluster labels for the new data, in batches if necessary utils::batch_load_iterator<T> vec_batches( new_vectors, n_rows, index->dim(), max_batch_size, stream, batches_mr); // Release the placeholder memory, because we don't intend to allocate any more long-living // temporary buffers before we allocate the index data. // This memory could potentially speed up UVM accesses, if any. placeholder_list.reset(); { // The cluster centers in the index are stored padded, which is not acceptable by // the kmeans_balanced::predict. Thus, we need the restructuring copy. rmm::device_uvector<float> cluster_centers( size_t(n_clusters) * size_t(index->dim()), stream, device_memory); RAFT_CUDA_TRY(cudaMemcpy2DAsync(cluster_centers.data(), sizeof(float) * index->dim(), index->centers().data_handle(), sizeof(float) * index->dim_ext(), sizeof(float) * index->dim(), n_clusters, cudaMemcpyDefault, stream)); for (const auto& batch : vec_batches) { auto batch_data_view = raft::make_device_matrix_view<const T, IdxT>(batch.data(), batch.size(), index->dim()); auto batch_labels_view = raft::make_device_vector_view<uint32_t, IdxT>( new_data_labels.data() + batch.offset(), batch.size()); auto centers_view = raft::make_device_matrix_view<const float, IdxT>( cluster_centers.data(), n_clusters, index->dim()); raft::cluster::kmeans_balanced_params kmeans_params; kmeans_params.metric = index->metric(); raft::cluster::kmeans_balanced::predict(handle, kmeans_params, batch_data_view, centers_view, batch_labels_view, utils::mapping<float>{}); } } auto list_sizes = index->list_sizes().data_handle(); // store the current cluster sizes, because we'll need them later rmm::device_uvector<uint32_t> orig_list_sizes(n_clusters, stream, device_memory); copy(orig_list_sizes.data(), list_sizes, n_clusters, stream); // Get the combined cluster sizes raft::stats::histogram<uint32_t, IdxT>(raft::stats::HistTypeAuto, reinterpret_cast<int32_t*>(list_sizes), IdxT(n_clusters), new_data_labels.data(), n_rows, 1, stream); linalg::add(list_sizes, list_sizes, orig_list_sizes.data(), n_clusters, stream); // Allocate the lists to fit the new data { std::vector<uint32_t> new_cluster_sizes(n_clusters); std::vector<uint32_t> old_cluster_sizes(n_clusters); copy(new_cluster_sizes.data(), list_sizes, n_clusters, stream); copy(old_cluster_sizes.data(), orig_list_sizes.data(), n_clusters, stream); resource::sync_stream(handle); for (uint32_t label = 0; label < n_clusters; label++) { ivf::resize_list( handle, index->lists()[label], spec, new_cluster_sizes[label], old_cluster_sizes[label]); } } // Update the pointers and the sizes recompute_internal_state(handle, *index); // Recover old cluster sizes: they are used as counters in the fill-codes kernel copy(list_sizes, orig_list_sizes.data(), n_clusters, stream); // By this point, the index state is updated and valid except it doesn't contain the new data // Fill the extended index with the new data (possibly, in batches) utils::batch_load_iterator<IdxT> idx_batches( new_indices, n_rows, 1, max_batch_size, stream, batches_mr); for (const auto& vec_batch : vec_batches) { const auto& idx_batch = *idx_batches++; process_and_fill_codes(handle, *index, vec_batch.data(), new_indices != nullptr ? std::variant<IdxT, const IdxT*>(idx_batch.data()) : std::variant<IdxT, const IdxT*>(IdxT(idx_batch.offset())), new_data_labels.data() + vec_batch.offset(), IdxT(vec_batch.size()), batches_mr); } } /** * Create a new index that contains more data. * See raft::spatial::knn::ivf_pq::extend docs. */ template <typename T, typename IdxT> auto extend(raft::resources const& handle, const index<IdxT>& orig_index, const T* new_vectors, const IdxT* new_indices, IdxT n_rows) -> index<IdxT> { auto ext_index = clone(handle, orig_index); detail::extend(handle, &ext_index, new_vectors, new_indices, n_rows); return ext_index; } /** See raft::spatial::knn::ivf_pq::build docs */ template <typename T, typename IdxT> auto build(raft::resources const& handle, const index_params& params, const T* dataset, IdxT n_rows, uint32_t dim) -> index<IdxT> { common::nvtx::range<common::nvtx::domain::raft> fun_scope( "ivf_pq::build(%zu, %u)", size_t(n_rows), dim); resource::detail::warn_non_pool_workspace(handle, "raft::ivf_pq::build"); static_assert(std::is_same_v<T, float> || std::is_same_v<T, uint8_t> || std::is_same_v<T, int8_t>, "Unsupported data type"); RAFT_EXPECTS(n_rows > 0 && dim > 0, "empty dataset"); RAFT_EXPECTS(n_rows >= params.n_lists, "number of rows can't be less than n_lists"); auto stream = resource::get_cuda_stream(handle); index<IdxT> index(handle, params, dim); utils::memzero( index.accum_sorted_sizes().data_handle(), index.accum_sorted_sizes().size(), stream); utils::memzero(index.list_sizes().data_handle(), index.list_sizes().size(), stream); utils::memzero(index.data_ptrs().data_handle(), index.data_ptrs().size(), stream); utils::memzero(index.inds_ptrs().data_handle(), index.inds_ptrs().size(), stream); { auto trainset_ratio = std::max<size_t>( 1, size_t(n_rows) / std::max<size_t>(params.kmeans_trainset_fraction * n_rows, index.n_lists())); size_t n_rows_train = n_rows / trainset_ratio; auto* device_memory = resource::get_workspace_resource(handle); rmm::mr::managed_memory_resource managed_memory_upstream; rmm::mr::pool_memory_resource<rmm::mr::managed_memory_resource> managed_memory( &managed_memory_upstream, 1024 * 1024); // If the trainset is small enough to comfortably fit into device memory, put it there. // Otherwise, use the managed memory. constexpr size_t kTolerableRatio = 4; rmm::mr::device_memory_resource* big_memory_resource = &managed_memory; if (sizeof(float) * n_rows_train * index.dim() * kTolerableRatio < resource::get_workspace_free_bytes(handle)) { big_memory_resource = device_memory; } // Besides just sampling, we transform the input dataset into floats to make it easier // to use gemm operations from cublas. rmm::device_uvector<float> trainset(n_rows_train * index.dim(), stream, big_memory_resource); // TODO: a proper sampling if constexpr (std::is_same_v<T, float>) { RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset.data(), sizeof(T) * index.dim(), dataset, sizeof(T) * index.dim() * trainset_ratio, sizeof(T) * index.dim(), n_rows_train, cudaMemcpyDefault, stream)); } else { size_t dim = index.dim(); cudaPointerAttributes dataset_attr; RAFT_CUDA_TRY(cudaPointerGetAttributes(&dataset_attr, dataset)); if (dataset_attr.devicePointer != nullptr) { // data is available on device: just run the kernel to copy and map the data auto p = reinterpret_cast<T*>(dataset_attr.devicePointer); auto trainset_view = raft::make_device_vector_view<float, IdxT>(trainset.data(), dim * n_rows_train); linalg::map_offset(handle, trainset_view, [p, trainset_ratio, dim] __device__(size_t i) { auto col = i % dim; return utils::mapping<float>{}(p[(i - col) * size_t(trainset_ratio) + col]); }); } else { // data is not available: first copy, then map inplace auto trainset_tmp = reinterpret_cast<T*>(reinterpret_cast<uint8_t*>(trainset.data()) + (sizeof(float) - sizeof(T)) * index.dim()); // We copy the data in strides, one row at a time, and place the smaller rows of type T // at the end of float rows. RAFT_CUDA_TRY(cudaMemcpy2DAsync(trainset_tmp, sizeof(float) * index.dim(), dataset, sizeof(T) * index.dim() * trainset_ratio, sizeof(T) * index.dim(), n_rows_train, cudaMemcpyDefault, stream)); // Transform the input `{T -> float}`, one row per warp. // The threads in each warp copy the data synchronously; this and the layout of the data // (content is aligned to the end of the rows) together allow doing the transform in-place. copy_warped(trainset.data(), index.dim(), trainset_tmp, index.dim() * sizeof(float) / sizeof(T), index.dim(), n_rows_train, stream); } } // NB: here cluster_centers is used as if it is [n_clusters, data_dim] not [n_clusters, // dim_ext]! rmm::device_uvector<float> cluster_centers_buf( index.n_lists() * index.dim(), stream, device_memory); auto cluster_centers = cluster_centers_buf.data(); // Train balanced hierarchical kmeans clustering auto trainset_const_view = raft::make_device_matrix_view<const float, IdxT>(trainset.data(), n_rows_train, index.dim()); auto centers_view = raft::make_device_matrix_view<float, IdxT>(cluster_centers, index.n_lists(), index.dim()); raft::cluster::kmeans_balanced_params kmeans_params; kmeans_params.n_iters = params.kmeans_n_iters; kmeans_params.metric = index.metric(); raft::cluster::kmeans_balanced::fit( handle, kmeans_params, trainset_const_view, centers_view, utils::mapping<float>{}); // Trainset labels are needed for training PQ codebooks rmm::device_uvector<uint32_t> labels(n_rows_train, stream, big_memory_resource); auto centers_const_view = raft::make_device_matrix_view<const float, IdxT>( cluster_centers, index.n_lists(), index.dim()); auto labels_view = raft::make_device_vector_view<uint32_t, IdxT>(labels.data(), n_rows_train); raft::cluster::kmeans_balanced::predict(handle, kmeans_params, trainset_const_view, centers_const_view, labels_view, utils::mapping<float>()); // Make rotation matrix make_rotation_matrix(handle, params.force_random_rotation, index.rot_dim(), index.dim(), index.rotation_matrix().data_handle()); set_centers(handle, &index, cluster_centers); // Train PQ codebooks switch (index.codebook_kind()) { case codebook_gen::PER_SUBSPACE: train_per_subset(handle, index, n_rows_train, trainset.data(), labels.data(), params.kmeans_n_iters, &managed_memory); break; case codebook_gen::PER_CLUSTER: train_per_cluster(handle, index, n_rows_train, trainset.data(), labels.data(), params.kmeans_n_iters, &managed_memory); break; default: RAFT_FAIL("Unreachable code"); } } // add the data if necessary if (params.add_data_on_build) { detail::extend<T, IdxT>(handle, &index, dataset, nullptr, n_rows); } return index; } } // namespace raft::neighbors::ivf_pq::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_dummy_block_sort.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/matrix/detail/select_warpsort.cuh> // matrix::detail::select::warpsort::warp_sort_distributed /* * This header file is a bit of an ugly duckling. The type dummy_block_sort is * needed by both ivf_pq_search.cuh and ivf_pq_compute_similarity.cuh. * * I have decided to move it to it's own header file, which is overkill. Perhaps * there is a nicer solution. * */ namespace raft::neighbors::ivf_pq::detail { template <typename T, typename IdxT> struct dummy_block_sort_t { using queue_t = matrix::detail::select::warpsort::warp_sort_distributed<WarpSize, true, T, IdxT>; template <typename... Args> __device__ dummy_block_sort_t(int k, Args...){}; }; } // namespace raft::neighbors::ivf_pq::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_flat_interleaved_scan-inl.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/logger.hpp> // RAFT_LOG_TRACE #include <raft/core/operators.hpp> #include <raft/distance/distance_types.hpp> #include <raft/matrix/detail/select_warpsort.cuh> #include <raft/neighbors/ivf_flat_types.hpp> #include <raft/neighbors/sample_filter_types.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/util/cuda_rt_essentials.hpp> // RAFT_CUDA_TRY #include <raft/util/device_loads_stores.cuh> #include <raft/util/integer_utils.hpp> #include <raft/util/pow2_utils.cuh> #include <raft/util/vectorized.cuh> #include <rmm/cuda_stream_view.hpp> namespace raft::neighbors::ivf_flat::detail { using namespace raft::spatial::knn::detail; // NOLINT constexpr int kThreadsPerBlock = 128; /** * @brief Copy `n` elements per block from one place to another. * * @param[out] out target pointer (unique per block) * @param[in] in source pointer * @param n number of elements to copy */ template <int VecBytes = 16, typename T> __device__ inline void copy_vectorized(T* out, const T* in, uint32_t n) { constexpr int VecElems = VecBytes / sizeof(T); // NOLINT using align_bytes = Pow2<(size_t)VecBytes>; if constexpr (VecElems > 1) { using align_elems = Pow2<VecElems>; if (!align_bytes::areSameAlignOffsets(out, in)) { return copy_vectorized<(VecBytes >> 1), T>(out, in, n); } { // process unaligned head uint32_t head = align_bytes::roundUp(in) - in; if (head > 0) { copy_vectorized<sizeof(T), T>(out, in, head); n -= head; in += head; out += head; } } { // process main part vectorized using vec_t = typename IOType<T, VecElems>::Type; copy_vectorized<sizeof(vec_t), vec_t>( reinterpret_cast<vec_t*>(out), reinterpret_cast<const vec_t*>(in), align_elems::div(n)); } { // process unaligned tail uint32_t tail = align_elems::mod(n); if (tail > 0) { n -= tail; copy_vectorized<sizeof(T), T>(out + n, in + n, tail); } } } if constexpr (VecElems <= 1) { for (int i = threadIdx.x; i < n; i += blockDim.x) { out[i] = in[i]; } } } /** * @brief Load a part of a vector from the index and from query, compute the (part of the) distance * between them, and aggregate it using the provided Lambda; one structure per thread, per query, * and per index item. * * @tparam kUnroll elements per loop (normally, kUnroll = WarpSize / Veclen) * @tparam Lambda computing the part of the distance for one dimension and aggregating it: * void (AccT& acc, AccT x, AccT y) * @tparam Veclen size of the vectorized load * @tparam T type of the data in the query and the index * @tparam AccT type of the accumulated value (an optimization for 8bit values to be loaded as 32bit * values) */ template <int kUnroll, typename Lambda, int Veclen, typename T, typename AccT> struct loadAndComputeDist { Lambda compute_dist; AccT& dist; __device__ __forceinline__ loadAndComputeDist(AccT& dist, Lambda op) : dist(dist), compute_dist(op) { } /** * Load parts of vectors from the index and query and accumulates the partial distance. * This version assumes the query is stored in shared memory. * Every thread here processes exactly kUnroll * Veclen elements independently of others. */ template <typename IdxT> __device__ __forceinline__ void runLoadShmemCompute(const T* const& data, const T* query_shared, IdxT loadIndex, IdxT shmemIndex) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { T encV[Veclen]; ldg(encV, data + (loadIndex + j * kIndexGroupSize) * Veclen); T queryRegs[Veclen]; lds(queryRegs, &query_shared[shmemIndex + j * Veclen]); #pragma unroll for (int k = 0; k < Veclen; ++k) { compute_dist(dist, queryRegs[k], encV[k]); } } } /** * Load parts of vectors from the index and query and accumulates the partial distance. * This version assumes the query is stored in the global memory and is different for every * thread. One warp loads exactly WarpSize query elements at once and then reshuffles them into * corresponding threads (`WarpSize / (kUnroll * Veclen)` elements per thread at once). */ template <typename IdxT> __device__ __forceinline__ void runLoadShflAndCompute(const T*& data, const T* query, IdxT baseLoadIndex, const int lane_id) { T queryReg = query[baseLoadIndex + lane_id]; constexpr int stride = kUnroll * Veclen; constexpr int totalIter = WarpSize / stride; constexpr int gmemStride = stride * kIndexGroupSize; #pragma unroll for (int i = 0; i < totalIter; ++i, data += gmemStride) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { T encV[Veclen]; ldg(encV, data + (lane_id + j * kIndexGroupSize) * Veclen); const int d = (i * kUnroll + j) * Veclen; #pragma unroll for (int k = 0; k < Veclen; ++k) { compute_dist(dist, shfl(queryReg, d + k, WarpSize), encV[k]); } } } } /** * Load parts of vectors from the index and query and accumulates the partial distance. * This version augments `runLoadShflAndCompute` when `dim` is not a multiple of `WarpSize`. */ __device__ __forceinline__ void runLoadShflAndComputeRemainder( const T*& data, const T* query, const int lane_id, const int dim, const int dimBlocks) { const int loadDim = dimBlocks + lane_id; T queryReg = loadDim < dim ? query[loadDim] : 0; const int loadDataIdx = lane_id * Veclen; for (int d = 0; d < dim - dimBlocks; d += Veclen, data += kIndexGroupSize * Veclen) { T enc[Veclen]; ldg(enc, data + loadDataIdx); #pragma unroll for (int k = 0; k < Veclen; k++) { compute_dist(dist, shfl(queryReg, d + k, WarpSize), enc[k]); } } } }; // This handles uint8_t 8, 16 Veclens template <int kUnroll, typename Lambda, int uint8_veclen> struct loadAndComputeDist<kUnroll, Lambda, uint8_veclen, uint8_t, uint32_t> { Lambda compute_dist; uint32_t& dist; __device__ __forceinline__ loadAndComputeDist(uint32_t& dist, Lambda op) : dist(dist), compute_dist(op) { } __device__ __forceinline__ void runLoadShmemCompute(const uint8_t* const& data, const uint8_t* query_shared, int loadIndex, int shmemIndex) { constexpr int veclen_int = uint8_veclen / 4; // converting uint8_t veclens to int loadIndex = loadIndex * veclen_int; #pragma unroll for (int j = 0; j < kUnroll; ++j) { uint32_t encV[veclen_int]; ldg(encV, reinterpret_cast<unsigned const*>(data) + loadIndex + j * kIndexGroupSize * veclen_int); uint32_t queryRegs[veclen_int]; lds(queryRegs, reinterpret_cast<unsigned const*>(query_shared + shmemIndex) + j * veclen_int); #pragma unroll for (int k = 0; k < veclen_int; k++) { compute_dist(dist, queryRegs[k], encV[k]); } } } __device__ __forceinline__ void runLoadShflAndCompute(const uint8_t*& data, const uint8_t* query, int baseLoadIndex, const int lane_id) { constexpr int veclen_int = uint8_veclen / 4; // converting uint8_t veclens to int uint32_t queryReg = (lane_id < 8) ? reinterpret_cast<unsigned const*>(query + baseLoadIndex)[lane_id] : 0; constexpr int stride = kUnroll * uint8_veclen; #pragma unroll for (int i = 0; i < WarpSize / stride; ++i, data += stride * kIndexGroupSize) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { uint32_t encV[veclen_int]; ldg(encV, reinterpret_cast<unsigned const*>(data) + (lane_id + j * kIndexGroupSize) * veclen_int); const int d = (i * kUnroll + j) * veclen_int; #pragma unroll for (int k = 0; k < veclen_int; ++k) { compute_dist(dist, shfl(queryReg, d + k, WarpSize), encV[k]); } } } } __device__ __forceinline__ void runLoadShflAndComputeRemainder(const uint8_t*& data, const uint8_t* query, const int lane_id, const int dim, const int dimBlocks) { constexpr int veclen_int = uint8_veclen / 4; const int loadDim = dimBlocks + lane_id * 4; // Here 4 is for 1 - int uint32_t queryReg = loadDim < dim ? reinterpret_cast<uint32_t const*>(query + loadDim)[0] : 0; for (int d = 0; d < dim - dimBlocks; d += uint8_veclen, data += kIndexGroupSize * uint8_veclen) { uint32_t enc[veclen_int]; ldg(enc, reinterpret_cast<uint32_t const*>(data) + lane_id * veclen_int); #pragma unroll for (int k = 0; k < veclen_int; k++) { uint32_t q = shfl(queryReg, (d / 4) + k, WarpSize); compute_dist(dist, q, enc[k]); } } } }; // Keep this specialized uint8 Veclen = 4, because compiler is generating suboptimal code while // using above common template of int2/int4 template <int kUnroll, typename Lambda> struct loadAndComputeDist<kUnroll, Lambda, 4, uint8_t, uint32_t> { Lambda compute_dist; uint32_t& dist; __device__ __forceinline__ loadAndComputeDist(uint32_t& dist, Lambda op) : dist(dist), compute_dist(op) { } __device__ __forceinline__ void runLoadShmemCompute(const uint8_t* const& data, const uint8_t* query_shared, int loadIndex, int shmemIndex) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { uint32_t encV = reinterpret_cast<unsigned const*>(data)[loadIndex + j * kIndexGroupSize]; uint32_t queryRegs = reinterpret_cast<unsigned const*>(query_shared + shmemIndex)[j]; compute_dist(dist, queryRegs, encV); } } __device__ __forceinline__ void runLoadShflAndCompute(const uint8_t*& data, const uint8_t* query, int baseLoadIndex, const int lane_id) { uint32_t queryReg = (lane_id < 8) ? reinterpret_cast<unsigned const*>(query + baseLoadIndex)[lane_id] : 0; constexpr int veclen = 4; constexpr int stride = kUnroll * veclen; #pragma unroll for (int i = 0; i < WarpSize / stride; ++i, data += stride * kIndexGroupSize) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { uint32_t encV = reinterpret_cast<unsigned const*>(data)[lane_id + j * kIndexGroupSize]; uint32_t q = shfl(queryReg, i * kUnroll + j, WarpSize); compute_dist(dist, q, encV); } } } __device__ __forceinline__ void runLoadShflAndComputeRemainder(const uint8_t*& data, const uint8_t* query, const int lane_id, const int dim, const int dimBlocks) { constexpr int veclen = 4; const int loadDim = dimBlocks + lane_id; uint32_t queryReg = loadDim < dim ? reinterpret_cast<unsigned const*>(query)[loadDim] : 0; for (int d = 0; d < dim - dimBlocks; d += veclen, data += kIndexGroupSize * veclen) { uint32_t enc = reinterpret_cast<unsigned const*>(data)[lane_id]; uint32_t q = shfl(queryReg, d / veclen, WarpSize); compute_dist(dist, q, enc); } } }; template <int kUnroll, typename Lambda> struct loadAndComputeDist<kUnroll, Lambda, 2, uint8_t, uint32_t> { Lambda compute_dist; uint32_t& dist; __device__ __forceinline__ loadAndComputeDist(uint32_t& dist, Lambda op) : dist(dist), compute_dist(op) { } __device__ __forceinline__ void runLoadShmemCompute(const uint8_t* const& data, const uint8_t* query_shared, int loadIndex, int shmemIndex) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { uint32_t encV = reinterpret_cast<uint16_t const*>(data)[loadIndex + j * kIndexGroupSize]; uint32_t queryRegs = reinterpret_cast<uint16_t const*>(query_shared + shmemIndex)[j]; compute_dist(dist, queryRegs, encV); } } __device__ __forceinline__ void runLoadShflAndCompute(const uint8_t*& data, const uint8_t* query, int baseLoadIndex, const int lane_id) { uint32_t queryReg = (lane_id < 16) ? reinterpret_cast<uint16_t const*>(query + baseLoadIndex)[lane_id] : 0; constexpr int veclen = 2; constexpr int stride = kUnroll * veclen; #pragma unroll for (int i = 0; i < WarpSize / stride; ++i, data += stride * kIndexGroupSize) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { uint32_t encV = reinterpret_cast<uint16_t const*>(data)[lane_id + j * kIndexGroupSize]; uint32_t q = shfl(queryReg, i * kUnroll + j, WarpSize); compute_dist(dist, q, encV); } } } __device__ __forceinline__ void runLoadShflAndComputeRemainder(const uint8_t*& data, const uint8_t* query, const int lane_id, const int dim, const int dimBlocks) { constexpr int veclen = 2; int loadDim = dimBlocks + lane_id * veclen; uint32_t queryReg = loadDim < dim ? reinterpret_cast<uint16_t const*>(query + loadDim)[0] : 0; for (int d = 0; d < dim - dimBlocks; d += veclen, data += kIndexGroupSize * veclen) { uint32_t enc = reinterpret_cast<uint16_t const*>(data)[lane_id]; uint32_t q = shfl(queryReg, d / veclen, WarpSize); compute_dist(dist, q, enc); } } }; template <int kUnroll, typename Lambda> struct loadAndComputeDist<kUnroll, Lambda, 1, uint8_t, uint32_t> { Lambda compute_dist; uint32_t& dist; __device__ __forceinline__ loadAndComputeDist(uint32_t& dist, Lambda op) : dist(dist), compute_dist(op) { } __device__ __forceinline__ void runLoadShmemCompute(const uint8_t* const& data, const uint8_t* query_shared, int loadIndex, int shmemIndex) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { uint32_t encV = data[loadIndex + j * kIndexGroupSize]; uint32_t queryRegs = query_shared[shmemIndex + j]; compute_dist(dist, queryRegs, encV); } } __device__ __forceinline__ void runLoadShflAndCompute(const uint8_t*& data, const uint8_t* query, int baseLoadIndex, const int lane_id) { uint32_t queryReg = query[baseLoadIndex + lane_id]; constexpr int veclen = 1; constexpr int stride = kUnroll * veclen; #pragma unroll for (int i = 0; i < WarpSize / stride; ++i, data += stride * kIndexGroupSize) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { uint32_t encV = data[lane_id + j * kIndexGroupSize]; uint32_t q = shfl(queryReg, i * kUnroll + j, WarpSize); compute_dist(dist, q, encV); } } } __device__ __forceinline__ void runLoadShflAndComputeRemainder(const uint8_t*& data, const uint8_t* query, const int lane_id, const int dim, const int dimBlocks) { constexpr int veclen = 1; int loadDim = dimBlocks + lane_id; uint32_t queryReg = loadDim < dim ? query[loadDim] : 0; for (int d = 0; d < dim - dimBlocks; d += veclen, data += kIndexGroupSize * veclen) { uint32_t enc = data[lane_id]; uint32_t q = shfl(queryReg, d, WarpSize); compute_dist(dist, q, enc); } } }; // This device function is for int8 veclens 4, 8 and 16 template <int kUnroll, typename Lambda, int int8_veclen> struct loadAndComputeDist<kUnroll, Lambda, int8_veclen, int8_t, int32_t> { Lambda compute_dist; int32_t& dist; __device__ __forceinline__ loadAndComputeDist(int32_t& dist, Lambda op) : dist(dist), compute_dist(op) { } __device__ __forceinline__ void runLoadShmemCompute(const int8_t* const& data, const int8_t* query_shared, int loadIndex, int shmemIndex) { constexpr int veclen_int = int8_veclen / 4; // converting int8_t veclens to int #pragma unroll for (int j = 0; j < kUnroll; ++j) { int32_t encV[veclen_int]; ldg(encV, reinterpret_cast<int32_t const*>(data) + (loadIndex + j * kIndexGroupSize) * veclen_int); int32_t queryRegs[veclen_int]; lds(queryRegs, reinterpret_cast<int32_t const*>(query_shared + shmemIndex) + j * veclen_int); #pragma unroll for (int k = 0; k < veclen_int; k++) { compute_dist(dist, queryRegs[k], encV[k]); } } } __device__ __forceinline__ void runLoadShflAndCompute(const int8_t*& data, const int8_t* query, int baseLoadIndex, const int lane_id) { constexpr int veclen_int = int8_veclen / 4; // converting int8_t veclens to int int32_t queryReg = (lane_id < 8) ? reinterpret_cast<int32_t const*>(query + baseLoadIndex)[lane_id] : 0; constexpr int stride = kUnroll * int8_veclen; #pragma unroll for (int i = 0; i < WarpSize / stride; ++i, data += stride * kIndexGroupSize) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { int32_t encV[veclen_int]; ldg(encV, reinterpret_cast<int32_t const*>(data) + (lane_id + j * kIndexGroupSize) * veclen_int); const int d = (i * kUnroll + j) * veclen_int; #pragma unroll for (int k = 0; k < veclen_int; ++k) { int32_t q = shfl(queryReg, d + k, WarpSize); compute_dist(dist, q, encV[k]); } } } } __device__ __forceinline__ void runLoadShflAndComputeRemainder( const int8_t*& data, const int8_t* query, const int lane_id, const int dim, const int dimBlocks) { constexpr int veclen_int = int8_veclen / 4; const int loadDim = dimBlocks + lane_id * 4; // Here 4 is for 1 - int; int32_t queryReg = loadDim < dim ? reinterpret_cast<int32_t const*>(query + loadDim)[0] : 0; for (int d = 0; d < dim - dimBlocks; d += int8_veclen, data += kIndexGroupSize * int8_veclen) { int32_t enc[veclen_int]; ldg(enc, reinterpret_cast<int32_t const*>(data) + lane_id * veclen_int); #pragma unroll for (int k = 0; k < veclen_int; k++) { int32_t q = shfl(queryReg, (d / 4) + k, WarpSize); // Here 4 is for 1 - int; compute_dist(dist, q, enc[k]); } } } }; template <int kUnroll, typename Lambda> struct loadAndComputeDist<kUnroll, Lambda, 2, int8_t, int32_t> { Lambda compute_dist; int32_t& dist; __device__ __forceinline__ loadAndComputeDist(int32_t& dist, Lambda op) : dist(dist), compute_dist(op) { } __device__ __forceinline__ void runLoadShmemCompute(const int8_t* const& data, const int8_t* query_shared, int loadIndex, int shmemIndex) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { int32_t encV = reinterpret_cast<uint16_t const*>(data)[loadIndex + j * kIndexGroupSize]; int32_t queryRegs = reinterpret_cast<uint16_t const*>(query_shared + shmemIndex)[j]; compute_dist(dist, queryRegs, encV); } } __device__ __forceinline__ void runLoadShflAndCompute(const int8_t*& data, const int8_t* query, int baseLoadIndex, const int lane_id) { int32_t queryReg = (lane_id < 16) ? reinterpret_cast<uint16_t const*>(query + baseLoadIndex)[lane_id] : 0; constexpr int veclen = 2; constexpr int stride = kUnroll * veclen; #pragma unroll for (int i = 0; i < WarpSize / stride; ++i, data += stride * kIndexGroupSize) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { int32_t encV = reinterpret_cast<uint16_t const*>(data)[lane_id + j * kIndexGroupSize]; int32_t q = shfl(queryReg, i * kUnroll + j, WarpSize); compute_dist(dist, q, encV); } } } __device__ __forceinline__ void runLoadShflAndComputeRemainder( const int8_t*& data, const int8_t* query, const int lane_id, const int dim, const int dimBlocks) { constexpr int veclen = 2; int loadDim = dimBlocks + lane_id * veclen; int32_t queryReg = loadDim < dim ? reinterpret_cast<uint16_t const*>(query + loadDim)[0] : 0; for (int d = 0; d < dim - dimBlocks; d += veclen, data += kIndexGroupSize * veclen) { int32_t enc = reinterpret_cast<uint16_t const*>(data + lane_id * veclen)[0]; int32_t q = shfl(queryReg, d / veclen, WarpSize); compute_dist(dist, q, enc); } } }; template <int kUnroll, typename Lambda> struct loadAndComputeDist<kUnroll, Lambda, 1, int8_t, int32_t> { Lambda compute_dist; int32_t& dist; __device__ __forceinline__ loadAndComputeDist(int32_t& dist, Lambda op) : dist(dist), compute_dist(op) { } __device__ __forceinline__ void runLoadShmemCompute(const int8_t* const& data, const int8_t* query_shared, int loadIndex, int shmemIndex) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { compute_dist(dist, query_shared[shmemIndex + j], data[loadIndex + j * kIndexGroupSize]); } } __device__ __forceinline__ void runLoadShflAndCompute(const int8_t*& data, const int8_t* query, int baseLoadIndex, const int lane_id) { constexpr int veclen = 1; constexpr int stride = kUnroll * veclen; int32_t queryReg = query[baseLoadIndex + lane_id]; #pragma unroll for (int i = 0; i < WarpSize / stride; ++i, data += stride * kIndexGroupSize) { #pragma unroll for (int j = 0; j < kUnroll; ++j) { compute_dist( dist, shfl(queryReg, i * kUnroll + j, WarpSize), data[lane_id + j * kIndexGroupSize]); } } } __device__ __forceinline__ void runLoadShflAndComputeRemainder( const int8_t*& data, const int8_t* query, const int lane_id, const int dim, const int dimBlocks) { constexpr int veclen = 1; const int loadDim = dimBlocks + lane_id; int32_t queryReg = loadDim < dim ? query[loadDim] : 0; for (int d = 0; d < dim - dimBlocks; d += veclen, data += kIndexGroupSize * veclen) { compute_dist(dist, shfl(queryReg, d, WarpSize), data[lane_id]); } } }; /** * Scan clusters for nearest neighbors of the query vectors. * See `ivfflat_interleaved_scan` for more information. * * The clusters are stored in the interleaved index format described in ivf_flat_types.hpp. * For each query vector, a set of clusters is probed: the distance to each vector in the cluster is * calculated, and the top-k nearest neighbors are selected. * * @param compute_dist distance function * @param query_smem_elems number of dimensions of the query vector to fit in a shared memory of a * block; this number must be a multiple of `WarpSize * Veclen`. * @param[in] query a pointer to all queries in a row-major contiguous format [gridDim.y, dim] * @param[in] coarse_index a pointer to the cluster indices to search through [n_probes] * @param[in] list_indices index<T, IdxT>.indices * @param[in] list_data index<T, IdxT>.data * @param[in] list_sizes index<T, IdxT>.list_sizes * @param[in] list_offsets index<T, IdxT>.list_offsets * @param n_probes * @param k * @param dim * @param sample_filter * @param[out] neighbors * @param[out] distances */ template <int Capacity, int Veclen, bool Ascending, typename T, typename AccT, typename IdxT, typename IvfSampleFilterT, typename Lambda, typename PostLambda> RAFT_KERNEL __launch_bounds__(kThreadsPerBlock) interleaved_scan_kernel(Lambda compute_dist, PostLambda post_process, const uint32_t query_smem_elems, const T* query, const uint32_t* coarse_index, const IdxT* const* list_indices_ptrs, const T* const* list_data_ptrs, const uint32_t* list_sizes, const uint32_t queries_offset, const uint32_t n_probes, const uint32_t k, const uint32_t dim, IvfSampleFilterT sample_filter, IdxT* neighbors, float* distances) { extern __shared__ __align__(256) uint8_t interleaved_scan_kernel_smem[]; // Using shared memory for the (part of the) query; // This allows to save on global memory bandwidth when reading index and query // data at the same time. // Its size is `query_smem_elems`. T* query_shared = reinterpret_cast<T*>(interleaved_scan_kernel_smem); // Make the query input and output point to this block's shared query { const int query_id = blockIdx.y; query += query_id * dim; neighbors += query_id * k * gridDim.x + blockIdx.x * k; distances += query_id * k * gridDim.x + blockIdx.x * k; coarse_index += query_id * n_probes; } // Copy a part of the query into shared memory for faster processing copy_vectorized(query_shared, query, std::min(dim, query_smem_elems)); __syncthreads(); using block_sort_t = matrix::detail::select::warpsort::block_sort< matrix::detail::select::warpsort::warp_sort_filtered, Capacity, Ascending, float, IdxT>; block_sort_t queue(k); { using align_warp = Pow2<WarpSize>; const int lane_id = align_warp::mod(threadIdx.x); // How many full warps needed to compute the distance (without remainder) const uint32_t full_warps_along_dim = align_warp::roundDown(dim); const uint32_t shm_assisted_dim = (dim > query_smem_elems) ? query_smem_elems : full_warps_along_dim; // Every CUDA block scans one cluster at a time. for (int probe_id = blockIdx.x; probe_id < n_probes; probe_id += gridDim.x) { const uint32_t list_id = coarse_index[probe_id]; // The id of cluster(list) // The number of vectors in each cluster(list); [nlist] const uint32_t list_length = list_sizes[list_id]; // The number of interleaved groups to be processed const uint32_t num_groups = align_warp::div(list_length + align_warp::Mask); // ceildiv by power of 2 constexpr int kUnroll = WarpSize / Veclen; constexpr uint32_t kNumWarps = kThreadsPerBlock / WarpSize; // Every warp reads WarpSize vectors and computes the distances to them. // Then, the distances and corresponding ids are distributed among the threads, // and each thread adds one (id, dist) pair to the filtering queue. for (uint32_t group_id = align_warp::div(threadIdx.x); group_id < num_groups; group_id += kNumWarps) { AccT dist = 0; // This is where this warp begins reading data (start position of an interleaved group) const T* data = list_data_ptrs[list_id] + (group_id * kIndexGroupSize) * dim; // This is the vector a given lane/thread handles const uint32_t vec_id = group_id * WarpSize + lane_id; const bool valid = vec_id < list_length && sample_filter(queries_offset + blockIdx.y, list_id, vec_id); // Process first shm_assisted_dim dimensions (always using shared memory) if (valid) { loadAndComputeDist<kUnroll, decltype(compute_dist), Veclen, T, AccT> lc(dist, compute_dist); for (int pos = 0; pos < shm_assisted_dim; pos += WarpSize, data += kIndexGroupSize * WarpSize) { lc.runLoadShmemCompute(data, query_shared, lane_id, pos); } } if (dim > query_smem_elems) { // The default path - using shfl ops - for dimensions beyond query_smem_elems loadAndComputeDist<kUnroll, decltype(compute_dist), Veclen, T, AccT> lc(dist, compute_dist); for (int pos = shm_assisted_dim; pos < full_warps_along_dim; pos += WarpSize) { lc.runLoadShflAndCompute(data, query, pos, lane_id); } lc.runLoadShflAndComputeRemainder(data, query, lane_id, dim, full_warps_along_dim); } else { // when shm_assisted_dim == full_warps_along_dim < dim if (valid) { loadAndComputeDist<1, decltype(compute_dist), Veclen, T, AccT> lc(dist, compute_dist); for (int pos = full_warps_along_dim; pos < dim; pos += Veclen, data += kIndexGroupSize * Veclen) { lc.runLoadShmemCompute(data, query_shared, lane_id, pos); } } } // Enqueue one element per thread const float val = valid ? static_cast<float>(dist) : block_sort_t::queue_t::kDummy; const size_t idx = valid ? static_cast<size_t>(list_indices_ptrs[list_id][vec_id]) : 0; queue.add(val, idx); } } } // finalize and store selected neighbours __syncthreads(); queue.done(interleaved_scan_kernel_smem); queue.store(distances, neighbors, post_process); } /** * Configure the gridDim.x to maximize GPU occupancy, but reduce the output size */ template <typename T> uint32_t configure_launch_x(uint32_t numQueries, uint32_t n_probes, int32_t sMemSize, T func) { int dev_id; RAFT_CUDA_TRY(cudaGetDevice(&dev_id)); int num_sms; RAFT_CUDA_TRY(cudaDeviceGetAttribute(&num_sms, cudaDevAttrMultiProcessorCount, dev_id)); int num_blocks_per_sm = 0; RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &num_blocks_per_sm, func, kThreadsPerBlock, sMemSize)); size_t min_grid_size = num_sms * num_blocks_per_sm; size_t min_grid_x = ceildiv<size_t>(min_grid_size, numQueries); return min_grid_x > n_probes ? n_probes : static_cast<uint32_t>(min_grid_x); } template <int Capacity, int Veclen, bool Ascending, typename T, typename AccT, typename IdxT, typename IvfSampleFilterT, typename Lambda, typename PostLambda> void launch_kernel(Lambda lambda, PostLambda post_process, const index<T, IdxT>& index, const T* queries, const uint32_t* coarse_index, const uint32_t num_queries, const uint32_t queries_offset, const uint32_t n_probes, const uint32_t k, IvfSampleFilterT sample_filter, IdxT* neighbors, float* distances, uint32_t& grid_dim_x, rmm::cuda_stream_view stream) { RAFT_EXPECTS(Veclen == index.veclen(), "Configured Veclen does not match the index interleaving pattern."); constexpr auto kKernel = interleaved_scan_kernel<Capacity, Veclen, Ascending, T, AccT, IdxT, IvfSampleFilterT, Lambda, PostLambda>; const int max_query_smem = 16384; int query_smem_elems = std::min<int>(max_query_smem / sizeof(T), Pow2<Veclen * WarpSize>::roundUp(index.dim())); int smem_size = query_smem_elems * sizeof(T); constexpr int kSubwarpSize = std::min<int>(Capacity, WarpSize); auto block_merge_mem = raft::matrix::detail::select::warpsort::calc_smem_size_for_block_wide<AccT, IdxT>( kThreadsPerBlock / kSubwarpSize, k); smem_size += std::max<int>(smem_size, block_merge_mem); // power-of-two less than cuda limit (for better addr alignment) constexpr uint32_t kMaxGridY = 32768; if (grid_dim_x == 0) { grid_dim_x = configure_launch_x(std::min(kMaxGridY, num_queries), n_probes, smem_size, kKernel); return; } for (uint32_t query_offset = 0; query_offset < num_queries; query_offset += kMaxGridY) { uint32_t grid_dim_y = std::min<uint32_t>(kMaxGridY, num_queries - query_offset); dim3 grid_dim(grid_dim_x, grid_dim_y, 1); dim3 block_dim(kThreadsPerBlock); RAFT_LOG_TRACE( "Launching the ivf-flat interleaved_scan_kernel (%d, %d, 1) x (%d, 1, 1), n_probes = %d, " "smem_size = %d", grid_dim.x, grid_dim.y, block_dim.x, n_probes, smem_size); kKernel<<<grid_dim, block_dim, smem_size, stream>>>(lambda, post_process, query_smem_elems, queries, coarse_index, index.inds_ptrs().data_handle(), index.data_ptrs().data_handle(), index.list_sizes().data_handle(), queries_offset + query_offset, n_probes, k, index.dim(), sample_filter, neighbors, distances); queries += grid_dim_y * index.dim(); neighbors += grid_dim_y * grid_dim_x * k; distances += grid_dim_y * grid_dim_x * k; coarse_index += grid_dim_y * n_probes; } } template <int Veclen, typename T, typename AccT> struct euclidean_dist { __device__ __forceinline__ void operator()(AccT& acc, AccT x, AccT y) { const auto diff = x - y; acc += diff * diff; } }; template <int Veclen> struct euclidean_dist<Veclen, uint8_t, uint32_t> { __device__ __forceinline__ void operator()(uint32_t& acc, uint32_t x, uint32_t y) { if constexpr (Veclen > 1) { const auto diff = __vabsdiffu4(x, y); acc = dp4a(diff, diff, acc); } else { const auto diff = __usad(x, y, 0u); acc += diff * diff; } } }; template <int Veclen> struct euclidean_dist<Veclen, int8_t, int32_t> { __device__ __forceinline__ void operator()(int32_t& acc, int32_t x, int32_t y) { if constexpr (Veclen > 1) { // Note that we enforce here that the unsigned version of dp4a is used, because the difference // between two int8 numbers can be greater than 127 and therefore represented as a negative // number in int8. Casting from int8 to int32 would yield incorrect results, while casting // from uint8 to uint32 is correct. const auto diff = __vabsdiffs4(x, y); acc = dp4a(diff, diff, static_cast<uint32_t>(acc)); } else { const auto diff = x - y; acc += diff * diff; } } }; template <int Veclen, typename T, typename AccT> struct inner_prod_dist { __device__ __forceinline__ void operator()(AccT& acc, AccT x, AccT y) { if constexpr (Veclen > 1 && (std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>)) { acc = dp4a(x, y, acc); } else { acc += x * y; } } }; /** Select the distance computation function and forward the rest of the arguments. */ template <int Capacity, int Veclen, bool Ascending, typename T, typename AccT, typename IdxT, typename IvfSampleFilterT, typename... Args> void launch_with_fixed_consts(raft::distance::DistanceType metric, Args&&... args) { switch (metric) { case raft::distance::DistanceType::L2Expanded: case raft::distance::DistanceType::L2Unexpanded: return launch_kernel<Capacity, Veclen, Ascending, T, AccT, IdxT, IvfSampleFilterT, euclidean_dist<Veclen, T, AccT>, raft::identity_op>({}, {}, std::forward<Args>(args)...); case raft::distance::DistanceType::L2SqrtExpanded: case raft::distance::DistanceType::L2SqrtUnexpanded: return launch_kernel<Capacity, Veclen, Ascending, T, AccT, IdxT, IvfSampleFilterT, euclidean_dist<Veclen, T, AccT>, raft::sqrt_op>({}, {}, std::forward<Args>(args)...); case raft::distance::DistanceType::InnerProduct: return launch_kernel<Capacity, Veclen, Ascending, T, AccT, IdxT, IvfSampleFilterT, inner_prod_dist<Veclen, T, AccT>, raft::identity_op>({}, {}, std::forward<Args>(args)...); // NB: update the description of `knn::ivf_flat::build` when adding here a new metric. default: RAFT_FAIL("The chosen distance metric is not supported (%d)", int(metric)); } } /** * Lift the `capacity` and `veclen` parameters to the template level, * forward the rest of the arguments unmodified to `launch_interleaved_scan_kernel`. */ template <typename T, typename AccT, typename IdxT, typename IvfSampleFilterT, int Capacity = matrix::detail::select::warpsort::kMaxCapacity, int Veclen = std::max<int>(1, 16 / sizeof(T))> struct select_interleaved_scan_kernel { /** * Recursively reduce the `Capacity` and `Veclen` parameters until they match the * corresponding runtime arguments. * By default, this recursive process starts with maximum possible values of the * two parameters and ends with both values equal to 1. */ template <typename... Args> static inline void run(int capacity, int veclen, bool select_min, Args&&... args) { if constexpr (Capacity > 1) { if (capacity * 2 <= Capacity) { return select_interleaved_scan_kernel<T, AccT, IdxT, IvfSampleFilterT, Capacity / 2, Veclen>::run(capacity, veclen, select_min, std::forward<Args>(args)...); } } if constexpr (Veclen > 1) { if (veclen % Veclen != 0) { return select_interleaved_scan_kernel<T, AccT, IdxT, IvfSampleFilterT, Capacity, 1>::run( capacity, 1, select_min, std::forward<Args>(args)...); } } // NB: this is the limitation of the warpsort structures that use a huge number of // registers (used in the main kernel here). RAFT_EXPECTS(capacity == Capacity, "Capacity must be power-of-two not bigger than the maximum allowed size " "matrix::detail::select::warpsort::kMaxCapacity (%d).", matrix::detail::select::warpsort::kMaxCapacity); RAFT_EXPECTS( veclen == Veclen, "Veclen must be power-of-two not bigger than the maximum allowed size for this data type."); if (select_min) { launch_with_fixed_consts<Capacity, Veclen, true, T, AccT, IdxT, IvfSampleFilterT>( std::forward<Args>(args)...); } else { launch_with_fixed_consts<Capacity, Veclen, false, T, AccT, IdxT, IvfSampleFilterT>( std::forward<Args>(args)...); } } }; /** * @brief Configure and launch an appropriate template instance of the interleaved scan kernel. * * @tparam T value type * @tparam AccT accumulated type * @tparam IdxT type of the indices * * @param index previously built ivf-flat index * @param[in] queries device pointer to the query vectors [batch_size, dim] * @param[in] coarse_query_results device pointer to the cluster (list) ids [batch_size, n_probes] * @param n_queries batch size * @param[in] queries_offset * An offset of the current query batch. It is used for feeding sample_filter with the * correct query index. * @param metric type of the measured distance * @param n_probes number of nearest clusters to query * @param k number of nearest neighbors. * NB: the maximum value of `k` is limited statically by `kMaxCapacity`. * @param select_min whether to select nearest (true) or furthest (false) points w.r.t. the given * metric. * @param[out] neighbors device pointer to the result indices for each query and cluster * [batch_size, grid_dim_x, k] * @param[out] distances device pointer to the result distances for each query and cluster * [batch_size, grid_dim_x, k] * @param[inout] grid_dim_x number of blocks launched across all n_probes clusters; * (one block processes one or more probes, hence: 1 <= grid_dim_x <= n_probes) * @param stream * @param sample_filter * A filter that selects samples for a given query. Use an instance of none_ivf_sample_filter to * provide a green light for every sample. */ template <typename T, typename AccT, typename IdxT, typename IvfSampleFilterT> void ivfflat_interleaved_scan(const index<T, IdxT>& index, const T* queries, const uint32_t* coarse_query_results, const uint32_t n_queries, const uint32_t queries_offset, const raft::distance::DistanceType metric, const uint32_t n_probes, const uint32_t k, const bool select_min, IvfSampleFilterT sample_filter, IdxT* neighbors, float* distances, uint32_t& grid_dim_x, rmm::cuda_stream_view stream) { const int capacity = bound_by_power_of_two(k); auto filter_adapter = raft::neighbors::filtering::ivf_to_sample_filter( index.inds_ptrs().data_handle(), sample_filter); select_interleaved_scan_kernel<T, AccT, IdxT, decltype(filter_adapter)>::run(capacity, index.veclen(), select_min, metric, index, queries, coarse_query_results, n_queries, queries_offset, n_probes, k, filter_adapter, neighbors, distances, grid_dim_x, stream); } } // namespace raft::neighbors::ivf_flat::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_compute_similarity.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #if !defined(RAFT_EXPLICIT_INSTANTIATE_ONLY) #include "ivf_pq_compute_similarity-inl.cuh" #endif #ifdef RAFT_COMPILED #include "ivf_pq_compute_similarity-ext.cuh" #endif
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/div_utils.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef _RAFT_HAS_CUDA #include <raft/util/pow2_utils.cuh> #else #include <raft/util/integer_utils.hpp> #endif /** * @brief A simple wrapper for raft::Pow2 which uses Pow2 utils only when available and regular * integer division otherwise. This is done to allow a common interface for division arithmetic for * non CUDA headers. * * @tparam Value_ a compile-time value representable as a power-of-two. */ namespace raft::neighbors::detail { template <auto Value_> struct div_utils { typedef decltype(Value_) Type; static constexpr Type Value = Value_; template <typename T> static constexpr _RAFT_HOST_DEVICE inline auto roundDown(T x) { #if defined(_RAFT_HAS_CUDA) return Pow2<Value_>::roundDown(x); #else return raft::round_down_safe(x, Value_); #endif } template <typename T> static constexpr _RAFT_HOST_DEVICE inline auto mod(T x) { #if defined(_RAFT_HAS_CUDA) return Pow2<Value_>::mod(x); #else return x % Value_; #endif } template <typename T> static constexpr _RAFT_HOST_DEVICE inline auto div(T x) { #if defined(_RAFT_HAS_CUDA) return Pow2<Value_>::div(x); #else return x / Value_; #endif } }; } // namespace raft::neighbors::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_flat_interleaved_scan-ext.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> // uintX_t #include <raft/neighbors/ivf_flat_types.hpp> // raft::neighbors::ivf_flat::index #include <raft/neighbors/sample_filter_types.hpp> // none_ivf_sample_filter #include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT #include <rmm/cuda_stream_view.hpp> // rmm:cuda_stream_view #ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY namespace raft::neighbors::ivf_flat::detail { template <typename T, typename AccT, typename IdxT, typename IvfSampleFilterT> void ivfflat_interleaved_scan(const raft::neighbors::ivf_flat::index<T, IdxT>& index, const T* queries, const uint32_t* coarse_query_results, const uint32_t n_queries, const uint32_t queries_offset, const raft::distance::DistanceType metric, const uint32_t n_probes, const uint32_t k, const bool select_min, IvfSampleFilterT sample_filter, IdxT* neighbors, float* distances, uint32_t& grid_dim_x, rmm::cuda_stream_view stream) RAFT_EXPLICIT; } // namespace raft::neighbors::ivf_flat::detail #endif // RAFT_EXPLICIT_INSTANTIATE_ONLY #define instantiate_raft_neighbors_ivf_flat_detail_ivfflat_interleaved_scan( \ T, AccT, IdxT, IvfSampleFilterT) \ extern template void \ raft::neighbors::ivf_flat::detail::ivfflat_interleaved_scan<T, AccT, IdxT, IvfSampleFilterT>( \ const raft::neighbors::ivf_flat::index<T, IdxT>& index, \ const T* queries, \ const uint32_t* coarse_query_results, \ const uint32_t n_queries, \ const uint32_t queries_offset, \ const raft::distance::DistanceType metric, \ const uint32_t n_probes, \ const uint32_t k, \ const bool select_min, \ IvfSampleFilterT sample_filter, \ IdxT* neighbors, \ float* distances, \ uint32_t& grid_dim_x, \ rmm::cuda_stream_view stream) instantiate_raft_neighbors_ivf_flat_detail_ivfflat_interleaved_scan( float, float, int64_t, raft::neighbors::filtering::none_ivf_sample_filter); instantiate_raft_neighbors_ivf_flat_detail_ivfflat_interleaved_scan( int8_t, int32_t, int64_t, raft::neighbors::filtering::none_ivf_sample_filter); instantiate_raft_neighbors_ivf_flat_detail_ivfflat_interleaved_scan( uint8_t, uint32_t, int64_t, raft::neighbors::filtering::none_ivf_sample_filter); #undef instantiate_raft_neighbors_ivf_flat_detail_ivfflat_interleaved_scan
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_codepacking.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/neighbors/ivf_list.hpp> #include <raft/neighbors/ivf_pq_types.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/util/cuda_utils.cuh> #include <raft/util/device_atomics.cuh> #include <raft/util/integer_utils.hpp> #include <raft/util/pow2_utils.cuh> #include <raft/util/vectorized.cuh> #include <variant> namespace raft::neighbors::ivf_pq::detail { /** A chunk of PQ-encoded vector managed by one CUDA thread. */ using pq_vec_t = TxN_t<uint8_t, kIndexGroupVecLen>::io_t; /** * This type mimics the `uint8_t&` for the indexing operator of `bitfield_view_t`. * * @tparam Bits number of bits comprising the value. */ template <uint32_t Bits> struct bitfield_ref_t { static_assert(Bits <= 8 && Bits > 0, "Bit code must fit one byte"); constexpr static uint8_t kMask = static_cast<uint8_t>((1u << Bits) - 1u); uint8_t* ptr; uint32_t offset; constexpr operator uint8_t() // NOLINT { auto pair = static_cast<uint16_t>(ptr[0]); if (offset + Bits > 8) { pair |= static_cast<uint16_t>(ptr[1]) << 8; } return static_cast<uint8_t>((pair >> offset) & kMask); } constexpr auto operator=(uint8_t code) -> bitfield_ref_t& { if (offset + Bits > 8) { auto pair = static_cast<uint16_t>(ptr[0]); pair |= static_cast<uint16_t>(ptr[1]) << 8; pair &= ~(static_cast<uint16_t>(kMask) << offset); pair |= static_cast<uint16_t>(code) << offset; ptr[0] = static_cast<uint8_t>(Pow2<256>::mod(pair)); ptr[1] = static_cast<uint8_t>(Pow2<256>::div(pair)); } else { ptr[0] = (ptr[0] & ~(kMask << offset)) | (code << offset); } return *this; } }; /** * View a byte array as an array of unsigned integers of custom small bit size. * * @tparam Bits number of bits comprising a single element of the array. */ template <uint32_t Bits> struct bitfield_view_t { static_assert(Bits <= 8 && Bits > 0, "Bit code must fit one byte"); uint8_t* raw; constexpr auto operator[](uint32_t i) -> bitfield_ref_t<Bits> { uint32_t bit_offset = i * Bits; return bitfield_ref_t<Bits>{raw + Pow2<8>::div(bit_offset), Pow2<8>::mod(bit_offset)}; } }; /** * Process a single vector in a list. * * @tparam PqBits * @tparam Action tells how to process a single vector (e.g. reconstruct or just unpack) * * @param[in] in_list_data the encoded cluster data. * @param[in] in_ix in-cluster index of the vector to be decoded (one-per-thread). * @param[in] out_ix the output index passed to the action * @param[in] pq_dim * @param action a callable action to be invoked on each PQ code (component of the encoding) * type: void (uint8_t code, uint32_t out_ix, uint32_t j), where j = [0..pq_dim). */ template <uint32_t PqBits, typename Action> __device__ void run_on_vector( device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> in_list_data, uint32_t in_ix, uint32_t out_ix, uint32_t pq_dim, Action action) { using group_align = Pow2<kIndexGroupSize>; const uint32_t group_ix = group_align::div(in_ix); const uint32_t ingroup_ix = group_align::mod(in_ix); pq_vec_t code_chunk; bitfield_view_t<PqBits> code_view{reinterpret_cast<uint8_t*>(&code_chunk)}; constexpr uint32_t kChunkSize = (sizeof(pq_vec_t) * 8u) / PqBits; for (uint32_t j = 0, i = 0; j < pq_dim; i++) { // read the chunk code_chunk = *reinterpret_cast<const pq_vec_t*>(&in_list_data(group_ix, i, ingroup_ix, 0)); // read the codes, one/pq_dim at a time #pragma unroll for (uint32_t k = 0; k < kChunkSize && j < pq_dim; k++, j++) { // read a piece of the reconstructed vector action(code_view[k], out_ix, j); } } } /** * Process a single vector in a list. * * @tparam PqBits * @tparam SubWarpSize how many threads work on the same ix (only the first thread writes data). * @tparam IdxT type of the index passed to the action * @tparam Action tells how to process a single vector (e.g. encode or just pack) * * @param[in] out_list_data the encoded cluster data. * @param[in] out_ix in-cluster index of the vector to be processed (one-per-SubWarpSize threads). * @param[in] in_ix the input index passed to the action (one-per-SubWarpSize threads). * @param[in] pq_dim * @param action a callable action to be invoked on each PQ code (component of the encoding) * type: (uint32_t in_ix, uint32_t j) -> uint8_t, where j = [0..pq_dim). */ template <uint32_t PqBits, uint32_t SubWarpSize, typename IdxT, typename Action> __device__ void write_vector( device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> out_list_data, uint32_t out_ix, IdxT in_ix, uint32_t pq_dim, Action action) { const uint32_t lane_id = Pow2<SubWarpSize>::mod(threadIdx.x); using group_align = Pow2<kIndexGroupSize>; const uint32_t group_ix = group_align::div(out_ix); const uint32_t ingroup_ix = group_align::mod(out_ix); pq_vec_t code_chunk; bitfield_view_t<PqBits> code_view{reinterpret_cast<uint8_t*>(&code_chunk)}; constexpr uint32_t kChunkSize = (sizeof(pq_vec_t) * 8u) / PqBits; for (uint32_t j = 0, i = 0; j < pq_dim; i++) { // clear the chunk if (lane_id == 0) { code_chunk = pq_vec_t{}; } // write the codes, one/pq_dim at a time #pragma unroll for (uint32_t k = 0; k < kChunkSize && j < pq_dim; k++, j++) { // write a single code uint8_t code = action(in_ix, j); if (lane_id == 0) { code_view[k] = code; } } // write the chunk to the list if (lane_id == 0) { *reinterpret_cast<pq_vec_t*>(&out_list_data(group_ix, i, ingroup_ix, 0)) = code_chunk; } } } /** Process the given indices or a block of a single list (cluster). */ template <uint32_t PqBits, typename Action> __device__ void run_on_list( device_mdspan<const uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> in_list_data, std::variant<uint32_t, const uint32_t*> offset_or_indices, uint32_t len, uint32_t pq_dim, Action action) { for (uint32_t ix = threadIdx.x + blockDim.x * blockIdx.x; ix < len; ix += blockDim.x) { const uint32_t src_ix = std::holds_alternative<uint32_t>(offset_or_indices) ? std::get<uint32_t>(offset_or_indices) + ix : std::get<const uint32_t*>(offset_or_indices)[ix]; run_on_vector<PqBits>(in_list_data, src_ix, ix, pq_dim, action); } } /** Process the given indices or a block of a single list (cluster). */ template <uint32_t PqBits, uint32_t SubWarpSize, typename Action> __device__ void write_list( device_mdspan<uint8_t, list_spec<uint32_t, uint32_t>::list_extents, row_major> out_list_data, std::variant<uint32_t, const uint32_t*> offset_or_indices, uint32_t len, uint32_t pq_dim, Action action) { using subwarp_align = Pow2<SubWarpSize>; uint32_t stride = subwarp_align::div(blockDim.x); uint32_t ix = subwarp_align::div(threadIdx.x + blockDim.x * blockIdx.x); for (; ix < len; ix += stride) { const uint32_t dst_ix = std::holds_alternative<uint32_t>(offset_or_indices) ? std::get<uint32_t>(offset_or_indices) + ix : std::get<const uint32_t*>(offset_or_indices)[ix]; write_vector<PqBits, SubWarpSize>(out_list_data, dst_ix, ix, pq_dim, action); } } } // namespace raft::neighbors::ivf_pq::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/refine.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "refine_device.cuh" #include "refine_host.hpp"
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/selection_faiss-ext.cuh
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> // size_t #include <cstdint> // uint32_t #include <cuda_fp16.h> // __half #include <raft/neighbors/detail/selection_faiss_helpers.cuh> // kFaissMaxK #include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT #if defined(RAFT_EXPLICIT_INSTANTIATE_ONLY) namespace raft::neighbors::detail { template <typename payload_t = int, typename key_t = float> void select_k(const key_t* inK, const payload_t* inV, size_t n_rows, size_t n_cols, key_t* outK, payload_t* outV, bool select_min, int k, cudaStream_t stream) RAFT_EXPLICIT; }; // namespace raft::neighbors::detail #endif // RAFT_EXPLICIT_INSTANTIATE_ONLY #define instantiate_raft_neighbors_detail_select_k(payload_t, key_t) \ extern template void raft::neighbors::detail::select_k(const key_t* inK, \ const payload_t* inV, \ size_t n_rows, \ size_t n_cols, \ key_t* outK, \ payload_t* outV, \ bool select_min, \ int k, \ cudaStream_t stream) instantiate_raft_neighbors_detail_select_k(uint32_t, float); instantiate_raft_neighbors_detail_select_k(int32_t, float); instantiate_raft_neighbors_detail_select_k(long, float); instantiate_raft_neighbors_detail_select_k(size_t, double); // test/neighbors/selection.cu instantiate_raft_neighbors_detail_select_k(int, double); instantiate_raft_neighbors_detail_select_k(size_t, float); instantiate_raft_neighbors_detail_select_k(uint32_t, double); instantiate_raft_neighbors_detail_select_k(int64_t, double); instantiate_raft_neighbors_detail_select_k(uint32_t, __half); instantiate_raft_neighbors_detail_select_k(int64_t, __half); #undef instantiate_raft_neighbors_detail_select_k
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_flat_interleaved_scan.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #if !defined(RAFT_EXPLICIT_INSTANTIATE_ONLY) #include "ivf_flat_interleaved_scan-inl.cuh" #endif #ifdef RAFT_COMPILED #include "ivf_flat_interleaved_scan-ext.cuh" #endif
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/refine_host.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "refine_host-inl.hpp" #endif #ifdef RAFT_COMPILED #include "refine_host-ext.hpp" #endif
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/selection_faiss_helpers.cuh
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace raft::neighbors::detail { // This function is used in cpp/test/neighbors/select.cu. We want to make it // available through both the selection_faiss-inl.cuh and // selection_faiss-ext.cuh headers. template <typename payload_t, typename key_t> constexpr int kFaissMaxK() { if (sizeof(key_t) >= 8) { return sizeof(payload_t) >= 8 ? 512 : 1024; } return 2048; } } // namespace raft::neighbors::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/knn_brute_force_batch_k_query.cuh
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/linalg/norm.cuh> #include <raft/matrix/slice.cuh> #include <raft/neighbors/brute_force_types.hpp> #include <raft/neighbors/detail/knn_brute_force.cuh> namespace raft::neighbors::brute_force::detail { template <typename T, typename IdxT = int64_t> class gpu_batch_k_query : public batch_k_query<T, IdxT> { public: gpu_batch_k_query(const raft::resources& res, const raft::neighbors::brute_force::index<T>& index, raft::device_matrix_view<const T, int64_t, row_major> query, int64_t batch_size) : batch_k_query<T, IdxT>(res, index.size(), query.extent(0), batch_size), index(index), query(query) { auto metric = index.metric(); // precompute query norms, and re-use across batches if (metric == raft::distance::DistanceType::L2Expanded || metric == raft::distance::DistanceType::L2SqrtExpanded || metric == raft::distance::DistanceType::CosineExpanded) { query_norms = make_device_vector<T, int64_t>(res, query.extent(0)); if (metric == raft::distance::DistanceType::CosineExpanded) { raft::linalg::norm(res, query, query_norms->view(), raft::linalg::NormType::L2Norm, raft::linalg::Apply::ALONG_ROWS, raft::sqrt_op{}); } else { raft::linalg::norm(res, query, query_norms->view(), raft::linalg::NormType::L2Norm, raft::linalg::Apply::ALONG_ROWS); } } } protected: void load_batch(int64_t offset, int64_t next_batch_size, batch<T, IdxT>* output) const override { if (offset >= index.size()) { return; } // we're aiming to load multiple batches here - since we don't know the max iteration // grow the size we're loading exponentially int64_t batch_size = std::min(std::max(offset * 2, next_batch_size * 2), this->index_size); output->resize(this->res, this->query_size, batch_size); std::optional<raft::device_vector_view<const float, int64_t>> query_norms_view; if (query_norms) { query_norms_view = query_norms->view(); } raft::neighbors::detail::brute_force_search<T, IdxT>( this->res, index, query, output->indices(), output->distances(), query_norms_view); }; void slice_batch(const batch<T, IdxT>& input, int64_t offset, int64_t batch_size, batch<T, IdxT>* output) const override { auto num_queries = input.indices().extent(0); batch_size = std::min(batch_size, index.size() - offset); output->resize(this->res, num_queries, batch_size); if (!num_queries || !batch_size) { return; } matrix::slice_coordinates<int64_t> coords{0, offset, num_queries, offset + batch_size}; matrix::slice(this->res, input.indices(), output->indices(), coords); matrix::slice(this->res, input.distances(), output->distances(), coords); } const raft::neighbors::brute_force::index<T>& index; raft::device_matrix_view<const T, int64_t, row_major> query; std::optional<device_vector<T, int64_t>> query_norms; }; } // namespace raft::neighbors::brute_force::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/refine_device.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/device_mdarray.hpp> #include <raft/core/host_mdspan.hpp> #include <raft/core/nvtx.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/thrust_policy.hpp> #include <raft/core/resources.hpp> #include <raft/matrix/detail/select_warpsort.cuh> #include <raft/neighbors/detail/ivf_flat_build.cuh> #include <raft/neighbors/detail/ivf_flat_interleaved_scan.cuh> #include <raft/neighbors/detail/refine_common.hpp> #include <raft/neighbors/sample_filter_types.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <thrust/sequence.h> namespace raft::neighbors::detail { /** * See raft::neighbors::refine for docs. */ template <typename idx_t, typename data_t, typename distance_t, typename matrix_idx> void refine_device(raft::resources const& handle, raft::device_matrix_view<const data_t, matrix_idx, row_major> dataset, raft::device_matrix_view<const data_t, matrix_idx, row_major> queries, raft::device_matrix_view<const idx_t, matrix_idx, row_major> neighbor_candidates, raft::device_matrix_view<idx_t, matrix_idx, row_major> indices, raft::device_matrix_view<distance_t, matrix_idx, row_major> distances, distance::DistanceType metric = distance::DistanceType::L2Unexpanded) { matrix_idx n_candidates = neighbor_candidates.extent(1); matrix_idx n_queries = queries.extent(0); matrix_idx dim = dataset.extent(1); uint32_t k = static_cast<uint32_t>(indices.extent(1)); RAFT_EXPECTS(k <= raft::matrix::detail::select::warpsort::kMaxCapacity, "k must be lest than topk::kMaxCapacity (%d).", raft::matrix::detail::select::warpsort::kMaxCapacity); common::nvtx::range<common::nvtx::domain::raft> fun_scope( "neighbors::refine(%zu, %u)", size_t(n_queries), uint32_t(n_candidates)); refine_check_input(dataset.extents(), queries.extents(), neighbor_candidates.extents(), indices.extents(), distances.extents(), metric); // The refinement search can be mapped to an IVF flat search: // - We consider that the candidate vectors form a cluster, separately for each query. // - In other words, the n_queries * n_candidates vectors form n_queries clusters, each with // n_candidates elements. // - We consider that the coarse level search is already performed and assigned a single cluster // to search for each query (the cluster formed from the corresponding candidates). // - We run IVF flat search with n_probes=1 to select the best k elements of the candidates. rmm::device_uvector<uint32_t> fake_coarse_idx(n_queries, resource::get_cuda_stream(handle)); thrust::sequence(resource::get_thrust_policy(handle), fake_coarse_idx.data(), fake_coarse_idx.data() + n_queries); raft::neighbors::ivf_flat::index<data_t, idx_t> refinement_index( handle, metric, n_queries, false, true, dim); raft::neighbors::ivf_flat::detail::fill_refinement_index(handle, &refinement_index, dataset.data_handle(), neighbor_candidates.data_handle(), n_queries, n_candidates); uint32_t grid_dim_x = 1; raft::neighbors::ivf_flat::detail::ivfflat_interleaved_scan< data_t, typename raft::spatial::knn::detail::utils::config<data_t>::value_t, idx_t>(refinement_index, queries.data_handle(), fake_coarse_idx.data(), static_cast<uint32_t>(n_queries), 0, refinement_index.metric(), 1, k, raft::distance::is_min_close(metric), raft::neighbors::filtering::none_ivf_sample_filter(), indices.data_handle(), distances.data_handle(), grid_dim_x, resource::get_cuda_stream(handle)); } } // namespace raft::neighbors::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/selection_faiss-inl.cuh
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/util/cudart_utils.hpp> #include <raft/util/pow2_utils.cuh> #include <raft/neighbors/detail/faiss_select/Select.cuh> #include <raft/neighbors/detail/selection_faiss_helpers.cuh> // kFaissMaxK namespace raft::neighbors::detail { template <typename payload_t, typename key_t, bool select_min, int warp_q, int thread_q, int tpb> RAFT_KERNEL select_k_kernel(const key_t* inK, const payload_t* inV, size_t n_rows, size_t n_cols, key_t* outK, payload_t* outV, key_t initK, payload_t initV, int k) { using align_warp = Pow2<WarpSize>; constexpr int kNumWarps = align_warp::div(tpb); __shared__ key_t smemK[kNumWarps * warp_q]; __shared__ payload_t smemV[kNumWarps * warp_q]; faiss_select::BlockSelect<key_t, payload_t, select_min, faiss_select::Comparator<key_t>, warp_q, thread_q, tpb> heap(initK, initV, smemK, smemV, k); // Grid is exactly sized to rows available int row = blockIdx.x; { size_t i = size_t(threadIdx.x); inK += row * n_cols; if (inV != nullptr) { inV += row * n_cols; } // Whole warps must participate in the selection size_t limit = align_warp::roundDown(n_cols); for (; i < limit; i += tpb) { heap.add(inK[i], (inV != nullptr) ? inV[i] : payload_t(i)); } // Handle last remainder fraction of a warp of elements if (i < n_cols) { heap.addThreadQ(inK[i], (inV != nullptr) ? inV[i] : payload_t(i)); } } heap.reduce(); for (int i = threadIdx.x; i < k; i += tpb) { outK[row * k + i] = smemK[i]; outV[row * k + i] = smemV[i]; } } template <typename payload_t = int, typename key_t = float, int warp_q, int thread_q> inline void select_k_impl(const key_t* inK, const payload_t* inV, size_t n_rows, size_t n_cols, key_t* outK, payload_t* outV, bool select_min, int k, cudaStream_t stream) { auto grid = dim3(n_rows); constexpr int n_threads = (warp_q <= 1024) ? 128 : 64; auto block = dim3(n_threads); auto kInit = select_min ? upper_bound<key_t>() : lower_bound<key_t>(); auto vInit = -1; if (select_min) { select_k_kernel<payload_t, key_t, false, warp_q, thread_q, n_threads> <<<grid, block, 0, stream>>>(inK, inV, n_rows, n_cols, outK, outV, kInit, vInit, k); } else { select_k_kernel<payload_t, key_t, true, warp_q, thread_q, n_threads> <<<grid, block, 0, stream>>>(inK, inV, n_rows, n_cols, outK, outV, kInit, vInit, k); } RAFT_CUDA_TRY(cudaGetLastError()); } /** * @brief Select the k-nearest neighbors from dense * distance and index matrices. * * @param[in] inK partitioned knn distance matrix * @param[in] inV partitioned knn index matrix * @param[in] n_rows number of rows in distance and index matrices * @param[in] n_cols number of columns in distance and index matrices * @param[out] outK merged knn distance matrix * @param[out] outV merged knn index matrix * @param[in] select_min whether to select the min or the max distances * @param[in] k number of neighbors per partition (also number of merged neighbors) * @param[in] stream CUDA stream to use */ template <typename payload_t = int, typename key_t = float> inline void select_k(const key_t* inK, const payload_t* inV, size_t n_rows, size_t n_cols, key_t* outK, payload_t* outV, bool select_min, int k, cudaStream_t stream) { constexpr int max_k = kFaissMaxK<payload_t, key_t>(); if (k == 1) select_k_impl<payload_t, key_t, 1, 1>( inK, inV, n_rows, n_cols, outK, outV, select_min, k, stream); else if (k <= 32) select_k_impl<payload_t, key_t, 32, 2>( inK, inV, n_rows, n_cols, outK, outV, select_min, k, stream); else if (k <= 64) select_k_impl<payload_t, key_t, 64, 3>( inK, inV, n_rows, n_cols, outK, outV, select_min, k, stream); else if (k <= 128) select_k_impl<payload_t, key_t, 128, 3>( inK, inV, n_rows, n_cols, outK, outV, select_min, k, stream); else if (k <= 256) select_k_impl<payload_t, key_t, 256, 4>( inK, inV, n_rows, n_cols, outK, outV, select_min, k, stream); else if (k <= 512) select_k_impl<payload_t, key_t, 512, 8>( inK, inV, n_rows, n_cols, outK, outV, select_min, k, stream); else if (k <= 1024 && k <= max_k) // note: have to use constexpr std::min here to avoid instantiating templates // for parameters we don't support select_k_impl<payload_t, key_t, std::min(1024, max_k), 8>( inK, inV, n_rows, n_cols, outK, outV, select_min, k, stream); else if (k <= 2048 && k <= max_k) select_k_impl<payload_t, key_t, std::min(2048, max_k), 8>( inK, inV, n_rows, n_cols, outK, outV, select_min, k, stream); else ASSERT(k <= max_k, "Current max k is %d (requested %d)", max_k, k); } }; // namespace raft::neighbors::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_compute_similarity-inl.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/distance/distance_types.hpp> // raft::distance::DistanceType #include <raft/matrix/detail/select_warpsort.cuh> // matrix::detail::select::warpsort::warp_sort_distributed #include <raft/neighbors/detail/ivf_pq_dummy_block_sort.cuh> // dummy_block_sort_t #include <raft/neighbors/ivf_pq_types.hpp> // codebook_gen #include <raft/neighbors/sample_filter_types.hpp> // none_ivf_sample_filter #include <raft/util/cuda_rt_essentials.hpp> // RAFT_CUDA_TRY #include <raft/util/device_atomics.cuh> // raft::atomicMin #include <raft/util/pow2_utils.cuh> // raft::Pow2 #include <raft/util/vectorized.cuh> // raft::TxN_t #include <rmm/cuda_stream_view.hpp> // rmm::cuda_stream_view namespace raft::neighbors::ivf_pq::detail { /** * Maximum value of k for the fused calculate & select in ivfpq. * * If runtime value of k is larger than this, the main search operation * is split into two kernels (per batch, first calculate distance, then select top-k). */ static constexpr int kMaxCapacity = 128; static_assert((kMaxCapacity >= 32) && !(kMaxCapacity & (kMaxCapacity - 1)), "kMaxCapacity must be a power of two, not smaller than the WarpSize."); // using weak attribute here, because it may be compiled multiple times. auto RAFT_WEAK_FUNCTION is_local_topk_feasible(uint32_t k, uint32_t n_probes, uint32_t n_queries) -> bool { if (k > kMaxCapacity) { return false; } // warp_sort not possible if (n_queries * n_probes <= 16) { return false; } // overall amount of work is too small return true; } template <int Capacity, typename T, typename IdxT> struct pq_block_sort { using type = matrix::detail::select::warpsort::block_sort< matrix::detail::select::warpsort::warp_sort_distributed_ext, Capacity, true, T, IdxT>; static auto get_mem_required(uint32_t k_max) { if (k_max == 0 || k_max > Capacity) { return pq_block_sort<0, T, IdxT>::get_mem_required(k_max); } if constexpr (Capacity > 1) { if (k_max * 2 <= Capacity) { return pq_block_sort<(Capacity / 2), T, IdxT>::get_mem_required(k_max); } } return type::queue_t::mem_required; } }; template <typename T, typename IdxT> struct pq_block_sort<0, T, IdxT> : dummy_block_sort_t<T, IdxT> { using type = dummy_block_sort_t<T, IdxT>; static auto mem_required(uint32_t) -> size_t { return 0; } static auto get_mem_required(uint32_t) { return mem_required; } }; template <int Capacity, typename T, typename IdxT> using block_sort_t = typename pq_block_sort<Capacity, T, IdxT>::type; /** * Estimate a carveout value as expected by `cudaFuncAttributePreferredSharedMemoryCarveout` * (which does not take into account `reservedSharedMemPerBlock`), * given by a desired schmem-L1 split and a per-block memory requirement in bytes. * * NB: As per the programming guide, the memory carveout setting is just a hint for the driver; it's * free to choose any shmem-L1 configuration it deems appropriate. For example, if you set the * carveout to zero, it will choose a non-zero config that will allow to run at least one active * block per SM. * * @param shmem_fraction * a fraction representing a desired split (shmem / (shmem + L1)) [0, 1]. * @param shmem_per_block * a shared memory usage per block (dynamic + static shared memory sizes), in bytes. * @param dev_props * device properties. * @return * a carveout value in percents [0, 100]. */ constexpr inline auto estimate_carveout(double shmem_fraction, size_t shmem_per_block, const cudaDeviceProp& dev_props) -> int { using shmem_unit = Pow2<128>; size_t m = shmem_unit::roundUp(shmem_per_block); size_t r = dev_props.reservedSharedMemPerBlock; size_t s = dev_props.sharedMemPerMultiprocessor; return (size_t(100 * s * m * shmem_fraction) - (m - 1) * r) / (s * (m + r)); } /* Manually unrolled loop over a chunk of pq_dataset that fits into one VecT. */ template <typename OutT, typename LutT, typename VecT, bool CheckBounds, uint32_t PqBits, uint32_t BitsLeft = 0, uint32_t Ix = 0> __device__ __forceinline__ void ivfpq_compute_chunk(OutT& score /* NOLINT */, typename VecT::math_t& pq_code, const VecT& pq_codes, const LutT*& lut_head, const LutT*& lut_end) { if constexpr (CheckBounds) { if (lut_head >= lut_end) { return; } } constexpr uint32_t kTotalBits = 8 * sizeof(typename VecT::math_t); constexpr uint32_t kPqShift = 1u << PqBits; constexpr uint32_t kPqMask = kPqShift - 1u; if constexpr (BitsLeft >= PqBits) { uint8_t code = pq_code & kPqMask; pq_code >>= PqBits; score += OutT(lut_head[code]); lut_head += kPqShift; return ivfpq_compute_chunk<OutT, LutT, VecT, CheckBounds, PqBits, BitsLeft - PqBits, Ix>( score, pq_code, pq_codes, lut_head, lut_end); } else if constexpr (Ix < VecT::Ratio) { uint8_t code = pq_code; pq_code = pq_codes.val.data[Ix]; constexpr uint32_t kRemBits = PqBits - BitsLeft; constexpr uint32_t kRemMask = (1u << kRemBits) - 1u; code |= (pq_code & kRemMask) << BitsLeft; pq_code >>= kRemBits; score += OutT(lut_head[code]); lut_head += kPqShift; return ivfpq_compute_chunk<OutT, LutT, VecT, CheckBounds, PqBits, kTotalBits - kRemBits, Ix + 1>(score, pq_code, pq_codes, lut_head, lut_end); } } /* Compute the similarity for one vector in the pq_dataset */ template <typename OutT, typename LutT, typename VecT, uint32_t PqBits> __device__ auto ivfpq_compute_score(uint32_t pq_dim, const typename VecT::io_t* pq_head, const LutT* lut_scores, OutT early_stop_limit) -> OutT { constexpr uint32_t kChunkSize = sizeof(VecT) * 8u / PqBits; auto lut_head = lut_scores; auto lut_end = lut_scores + (pq_dim << PqBits); VecT pq_codes; OutT score{0}; for (; pq_dim >= kChunkSize; pq_dim -= kChunkSize) { *pq_codes.vectorized_data() = *pq_head; pq_head += kIndexGroupSize; typename VecT::math_t pq_code = 0; ivfpq_compute_chunk<OutT, LutT, VecT, false, PqBits>( score, pq_code, pq_codes, lut_head, lut_end); // Early stop when it makes sense (otherwise early_stop_limit is kDummy/infinity). if (score >= early_stop_limit) { return score; } } if (pq_dim > 0) { *pq_codes.vectorized_data() = *pq_head; typename VecT::math_t pq_code = 0; ivfpq_compute_chunk<OutT, LutT, VecT, true, PqBits>( score, pq_code, pq_codes, lut_head, lut_end); } return score; } /** * The main kernel that computes similarity scores across multiple queries and probes. * When `Capacity > 0`, it also selects top K candidates for each query and probe * (which need to be merged across probes afterwards). * * Each block processes a (query, probe) pair: it calculates the distance between the single query * vector and all the dataset vector in the cluster that we are probing. * * @tparam OutT * The output type - distances. * @tparam LutT * The lookup table element type (lut_scores). * @tparam PqBits * The bit length of an encoded vector element after compression by PQ * (NB: pq_book_size = 1 << PqBits). * @tparam Capacity * Power-of-two; the maximum possible `k` in top-k. Value zero disables fused top-k search. * @tparam PrecompBaseDiff * Defines whether we should precompute part of the distance and keep it in shared memory * before the main part (score calculation) to increase memory usage efficiency in the latter. * For L2, this is the distance between the query and the cluster center. * @tparam EnableSMemLut * Defines whether to use the shared memory for the lookup table (`lut_scores`). * Setting this to `false` allows to reduce the shared memory usage (and maximum data dim) * at the cost of reducing global memory reading throughput. * * @param dim the dimensionality of the data (NB: after rotation transform, i.e. `index.rot_dim()`). * @param n_probes the number of clusters to search for each query * @param pq_dim * The dimensionality of an encoded vector after compression by PQ. * @param n_queries the number of queries. * @param queries_offset * An offset of the current query batch. It is used for feeding sample_filter with the * correct query index. * @param metric the distance type. * @param codebook_kind Defines the way PQ codebooks have been trained. * @param topk the `k` in the select top-k. * @param max_samples the size of the output for a single query. * @param cluster_centers * The device pointer to the cluster centers in the original space (NB: after rotation) * [n_clusters, dim]. * @param pq_centers * The device pointer to the cluster centers in the PQ space * [pq_dim, pq_book_size, pq_len] or [n_clusters, pq_book_size, pq_len]. * @param pq_dataset * The device pointer to the PQ index (data) [n_rows, ...]. * @param cluster_labels * The device pointer to the labels (clusters) for each query and probe [n_queries, n_probes]. * @param _chunk_indices * The device pointer to the data offsets for each query and probe [n_queries, n_probes]. * @param queries * The device pointer to the queries (NB: after rotation) [n_queries, dim]. * @param index_list * An optional device pointer to the enforced order of search [n_queries, n_probes]. * One can pass reordered indices here to try to improve data reading locality. * @param query_kth * query_kths keep the current state of the filtering - atomically updated distances to the * k-th closest neighbors for each query [n_queries]. * @param sample_filter * A filter that selects samples for a given query. Use an instance of none_ivf_sample_filter to * provide a green light for every sample. * @param lut_scores * The device pointer for storing the lookup table globally [gridDim.x, pq_dim << PqBits]. * Ignored when `EnableSMemLut == true`. * @param _out_scores * The device pointer to the output scores * [n_queries, max_samples] or [n_queries, n_probes, topk]. * @param _out_indices * The device pointer to the output indices [n_queries, n_probes, topk]. * These are the indices of the records as they appear in the database view formed by the probed * clusters / defined by the `_chunk_indices`. * The indices can have values within the range [0, max_samples). * Ignored when `Capacity == 0`. */ template <typename OutT, typename LutT, typename IvfSampleFilterT, uint32_t PqBits, int Capacity, bool PrecompBaseDiff, bool EnableSMemLut> RAFT_KERNEL compute_similarity_kernel(uint32_t dim, uint32_t n_probes, uint32_t pq_dim, uint32_t n_queries, uint32_t queries_offset, distance::DistanceType metric, codebook_gen codebook_kind, uint32_t topk, uint32_t max_samples, const float* cluster_centers, const float* pq_centers, const uint8_t* const* pq_dataset, const uint32_t* cluster_labels, const uint32_t* _chunk_indices, const float* queries, const uint32_t* index_list, float* query_kths, IvfSampleFilterT sample_filter, LutT* lut_scores, OutT* _out_scores, uint32_t* _out_indices) { /* Shared memory: * lut_scores: lookup table (LUT) of size = `pq_dim << PqBits` (when EnableSMemLut) * lut_end+: * base_diff: size = dim (which is equal to `pq_dim * pq_len`) or dim*2 * topk::warp_sort::mem_required - local topk temporary buffer (if necessary) * topk::block_sort: some amount of shared memory, but overlaps with the rest: block_sort only needs shared memory for `.done()` operation, which can come very last. */ extern __shared__ __align__(256) uint8_t smem_buf[]; // NOLINT constexpr bool kManageLocalTopK = Capacity > 0; constexpr uint32_t PqShift = 1u << PqBits; // NOLINT constexpr uint32_t PqMask = PqShift - 1u; // NOLINT const uint32_t pq_len = dim / pq_dim; const uint32_t lut_size = pq_dim * PqShift; if constexpr (EnableSMemLut) { lut_scores = reinterpret_cast<LutT*>(smem_buf); } else { lut_scores += lut_size * blockIdx.x; } uint8_t* lut_end = nullptr; if constexpr (EnableSMemLut) { lut_end = reinterpret_cast<uint8_t*>(lut_scores + lut_size); } else { lut_end = smem_buf; } for (int ib = blockIdx.x; ib < n_queries * n_probes; ib += gridDim.x) { if (ib >= gridDim.x) { // sync shared memory accesses on the second and further iterations __syncthreads(); } uint32_t query_ix; uint32_t probe_ix; if (index_list == nullptr) { query_ix = ib % n_queries; probe_ix = ib / n_queries; } else { auto ordered_ix = index_list[ib]; query_ix = ordered_ix / n_probes; probe_ix = ordered_ix % n_probes; } const uint32_t* chunk_indices = _chunk_indices + (n_probes * query_ix); const float* query = queries + (dim * query_ix); OutT* out_scores; uint32_t* out_indices = nullptr; if constexpr (kManageLocalTopK) { // Store topk calculated distances to out_scores (and its indices to out_indices) const uint64_t out_offset = probe_ix + n_probes * query_ix; out_scores = _out_scores + out_offset * topk; out_indices = _out_indices + out_offset * topk; } else { // Store all calculated distances to out_scores out_scores = _out_scores + uint64_t(max_samples) * query_ix; } uint32_t label = cluster_labels[n_probes * query_ix + probe_ix]; const float* cluster_center = cluster_centers + dim * label; const float* pq_center; if (codebook_kind == codebook_gen::PER_SUBSPACE) { pq_center = pq_centers; } else { pq_center = pq_centers + (pq_len << PqBits) * label; } if constexpr (PrecompBaseDiff) { // Reduce number of memory reads later by pre-computing parts of the score switch (metric) { case distance::DistanceType::L2SqrtExpanded: case distance::DistanceType::L2Expanded: { for (uint32_t i = threadIdx.x; i < dim; i += blockDim.x) { reinterpret_cast<float*>(lut_end)[i] = query[i] - cluster_center[i]; } } break; case distance::DistanceType::InnerProduct: { float2 pvals; for (uint32_t i = threadIdx.x; i < dim; i += blockDim.x) { pvals.x = query[i]; pvals.y = cluster_center[i] * pvals.x; reinterpret_cast<float2*>(lut_end)[i] = pvals; } } break; default: __builtin_unreachable(); } __syncthreads(); } { // Create a lookup table // For each subspace, the lookup table stores the distance between the actual query vector // (projected into the subspace) and all possible pq vectors in that subspace. for (uint32_t i = threadIdx.x; i < lut_size; i += blockDim.x) { const uint32_t i_pq = i >> PqBits; uint32_t j = i_pq * pq_len; const uint32_t j_end = pq_len + j; auto cur_pq_center = pq_center + (i & PqMask) + (codebook_kind == codebook_gen::PER_SUBSPACE ? j * PqShift : 0u); float score = 0.0; do { float pq_c = *cur_pq_center; cur_pq_center += PqShift; switch (metric) { case distance::DistanceType::L2SqrtExpanded: case distance::DistanceType::L2Expanded: { float diff; if constexpr (PrecompBaseDiff) { diff = reinterpret_cast<float*>(lut_end)[j]; } else { diff = query[j] - cluster_center[j]; } diff -= pq_c; score += diff * diff; } break; case distance::DistanceType::InnerProduct: { // NB: we negate the scores as we hardcoded select-topk to always compute the minimum float q; if constexpr (PrecompBaseDiff) { float2 pvals = reinterpret_cast<float2*>(lut_end)[j]; q = pvals.x; score -= pvals.y; } else { q = query[j]; score -= q * cluster_center[j]; } score -= q * pq_c; } break; default: __builtin_unreachable(); } } while (++j < j_end); lut_scores[i] = LutT(score); } } // Define helper types for efficient access to the pq_dataset, which is stored in an interleaved // format. The chunks of PQ data are stored in kIndexGroupVecLen-bytes-long chunks, interleaved // in groups of kIndexGroupSize elems (which is normally equal to the warp size) for the fastest // possible access by thread warps. // // Consider one record in the pq_dataset is `pq_dim * pq_bits`-bit-long. // Assuming `kIndexGroupVecLen = 16`, one chunk of data read by a thread at once is 128-bits. // Then, such a chunk contains `chunk_size = 128 / pq_bits` record elements, and the record // consists of `ceildiv(pq_dim, chunk_size)` chunks. The chunks are interleaved in groups of 32, // so that the warp can achieve the best coalesced read throughput. using group_align = Pow2<kIndexGroupSize>; using vec_align = Pow2<kIndexGroupVecLen>; using local_topk_t = block_sort_t<Capacity, OutT, uint32_t>; using op_t = uint32_t; using vec_t = TxN_t<op_t, kIndexGroupVecLen / sizeof(op_t)>; uint32_t sample_offset = 0; if (probe_ix > 0) { sample_offset = chunk_indices[probe_ix - 1]; } uint32_t n_samples = chunk_indices[probe_ix] - sample_offset; uint32_t n_samples_aligned = group_align::roundUp(n_samples); constexpr uint32_t kChunkSize = (kIndexGroupVecLen * 8u) / PqBits; uint32_t pq_line_width = div_rounding_up_unsafe(pq_dim, kChunkSize) * kIndexGroupVecLen; auto pq_thread_data = pq_dataset[label] + group_align::roundDown(threadIdx.x) * pq_line_width + group_align::mod(threadIdx.x) * vec_align::Value; pq_line_width *= blockDim.x; constexpr OutT kDummy = upper_bound<OutT>(); OutT query_kth = kDummy; if constexpr (kManageLocalTopK) { query_kth = OutT(query_kths[query_ix]); } OutT early_stop_limit = kDummy; switch (metric) { // If the metric is non-negative, we can use the query_kth approximation as an early stop // threshold to skip some iterations when computing the score. Add such metrics here. case distance::DistanceType::L2SqrtExpanded: case distance::DistanceType::L2Expanded: { early_stop_limit = query_kth; } break; default: break; } // Ensure lut_scores is written by all threads before using it in ivfpq-compute-score __threadfence_block(); __syncthreads(); local_topk_t block_topk(topk, lut_end, query_kth); // Compute a distance for each sample for (uint32_t i = threadIdx.x; i < n_samples_aligned; i += blockDim.x, pq_thread_data += pq_line_width) { OutT score = kDummy; bool valid = i < n_samples; // Check bounds and that the sample is acceptable for the query if (valid && sample_filter(queries_offset + query_ix, label, i)) { score = ivfpq_compute_score<OutT, LutT, vec_t, PqBits>( pq_dim, reinterpret_cast<const vec_t::io_t*>(pq_thread_data), lut_scores, early_stop_limit); } if constexpr (kManageLocalTopK) { block_topk.add(score, sample_offset + i); } else { if (valid) { out_scores[sample_offset + i] = score; } } } if constexpr (kManageLocalTopK) { // sync threads before the topk merging operation, because we reuse smem_buf __syncthreads(); block_topk.done(smem_buf); block_topk.store(out_scores, out_indices); if (threadIdx.x == 0) { atomicMin(query_kths + query_ix, float(out_scores[topk - 1])); } } else { // fill in the rest of the out_scores with dummy values if (probe_ix + 1 == n_probes) { for (uint32_t i = threadIdx.x + sample_offset + n_samples; i < max_samples; i += blockDim.x) { out_scores[i] = kDummy; } } } } } // The signature of the kernel defined by a minimal set of template parameters template <typename OutT, typename LutT, typename IvfSampleFilterT = raft::neighbors::filtering::none_ivf_sample_filter> using compute_similarity_kernel_t = decltype(&compute_similarity_kernel<OutT, LutT, IvfSampleFilterT, 8, 0, true, true>); // The config struct lifts the runtime parameters to the template parameters template <typename OutT, typename LutT, bool PrecompBaseDiff, bool EnableSMemLut, typename IvfSampleFilterT = raft::neighbors::filtering::none_ivf_sample_filter> struct compute_similarity_kernel_config { public: static auto get(uint32_t pq_bits, uint32_t k_max) -> compute_similarity_kernel_t<OutT, LutT, IvfSampleFilterT> { return kernel_choose_bits(pq_bits, k_max); } private: static auto kernel_choose_bits(uint32_t pq_bits, uint32_t k_max) -> compute_similarity_kernel_t<OutT, LutT, IvfSampleFilterT> { switch (pq_bits) { case 4: return kernel_try_capacity<4, kMaxCapacity>(k_max); case 5: return kernel_try_capacity<5, kMaxCapacity>(k_max); case 6: return kernel_try_capacity<6, kMaxCapacity>(k_max); case 7: return kernel_try_capacity<7, kMaxCapacity>(k_max); case 8: return kernel_try_capacity<8, kMaxCapacity>(k_max); default: RAFT_FAIL("Invalid pq_bits (%u), the value must be within [4, 8]", pq_bits); } } template <uint32_t PqBits, int Capacity> static auto kernel_try_capacity(uint32_t k_max) -> compute_similarity_kernel_t<OutT, LutT, IvfSampleFilterT> { if constexpr (Capacity > 0) { if (k_max == 0 || k_max > Capacity) { return kernel_try_capacity<PqBits, 0>(k_max); } } if constexpr (Capacity > 1) { if (k_max * 2 <= Capacity) { return kernel_try_capacity<PqBits, (Capacity / 2)>(k_max); } } return compute_similarity_kernel<OutT, LutT, IvfSampleFilterT, PqBits, Capacity, PrecompBaseDiff, EnableSMemLut>; } }; // A standalone accessor function was necessary to make sure template // instantiation work correctly. This accessor function is not used anymore and // may be removed. template <typename OutT, typename LutT, bool PrecompBaseDiff, bool EnableSMemLut, typename IvfSampleFilterT = raft::neighbors::filtering::none_ivf_sample_filter> auto get_compute_similarity_kernel(uint32_t pq_bits, uint32_t k_max) -> compute_similarity_kernel_t<OutT, LutT, IvfSampleFilterT> { return compute_similarity_kernel_config<OutT, LutT, PrecompBaseDiff, EnableSMemLut, IvfSampleFilterT>::get(pq_bits, k_max); } /** Estimate the occupancy for the given kernel on the given device. */ template <typename OutT, typename LutT, typename IvfSampleFilterT> struct occupancy_t { using shmem_unit = Pow2<128>; int blocks_per_sm = 0; double occupancy = 0.0; double shmem_use = 1.0; inline occupancy_t() = default; inline occupancy_t(size_t smem, uint32_t n_threads, compute_similarity_kernel_t<OutT, LutT, IvfSampleFilterT> kernel, const cudaDeviceProp& dev_props) { RAFT_CUDA_TRY( cudaOccupancyMaxActiveBlocksPerMultiprocessor(&blocks_per_sm, kernel, n_threads, smem)); occupancy = double(blocks_per_sm * n_threads) / double(dev_props.maxThreadsPerMultiProcessor); shmem_use = double(shmem_unit::roundUp(smem) * blocks_per_sm) / double(dev_props.sharedMemPerMultiprocessor); } }; template <typename OutT, typename LutT, typename IvfSampleFilterT> struct selected { compute_similarity_kernel_t<OutT, LutT, IvfSampleFilterT> kernel; dim3 grid_dim; dim3 block_dim; size_t smem_size; size_t device_lut_size; }; template <typename OutT, typename LutT, typename IvfSampleFilterT = raft::neighbors::filtering::none_ivf_sample_filter> void compute_similarity_run(selected<OutT, LutT, IvfSampleFilterT> s, rmm::cuda_stream_view stream, uint32_t dim, uint32_t n_probes, uint32_t pq_dim, uint32_t n_queries, uint32_t queries_offset, distance::DistanceType metric, codebook_gen codebook_kind, uint32_t topk, uint32_t max_samples, const float* cluster_centers, const float* pq_centers, const uint8_t* const* pq_dataset, const uint32_t* cluster_labels, const uint32_t* _chunk_indices, const float* queries, const uint32_t* index_list, float* query_kths, IvfSampleFilterT sample_filter, LutT* lut_scores, OutT* _out_scores, uint32_t* _out_indices) { s.kernel<<<s.grid_dim, s.block_dim, s.smem_size, stream>>>(dim, n_probes, pq_dim, n_queries, queries_offset, metric, codebook_kind, topk, max_samples, cluster_centers, pq_centers, pq_dataset, cluster_labels, _chunk_indices, queries, index_list, query_kths, sample_filter, lut_scores, _out_scores, _out_indices); RAFT_CHECK_CUDA(stream); } /** * Use heuristics to choose an optimal instance of the search kernel. * It selects among a few kernel variants (with/out using shared mem for * lookup tables / precomputed distances) and tries to choose the block size * to maximize kernel occupancy. * * @param manage_local_topk * whether use the fused calculate+select or just calculate the distances for each * query and probed cluster. * * @param locality_hint * beyond this limit do not consider increasing the number of active blocks per SM * would improve locality anymore. */ template <typename OutT, typename LutT, typename IvfSampleFilterT = raft::neighbors::filtering::none_ivf_sample_filter> auto compute_similarity_select(const cudaDeviceProp& dev_props, bool manage_local_topk, int locality_hint, double preferred_shmem_carveout, uint32_t pq_bits, uint32_t pq_dim, uint32_t precomp_data_count, uint32_t n_queries, uint32_t n_probes, uint32_t topk) -> selected<OutT, LutT, IvfSampleFilterT> { // Shared memory for storing the lookup table size_t lut_mem = sizeof(LutT) * (pq_dim << pq_bits); // Shared memory for storing pre-computed pieces to speedup the lookup table construction // (e.g. the distance between a cluster center and the query for L2). size_t bdf_mem = sizeof(float) * precomp_data_count; // Shared memory used by the fused top-k during cluster scanning; // may overlap with the precomputed distance array struct ltk_add_mem_t { size_t (*mem_required)(uint32_t); ltk_add_mem_t(bool manage_local_topk, uint32_t topk) : mem_required(pq_block_sort<kMaxCapacity, OutT, uint32_t>::get_mem_required( manage_local_topk ? topk : 0)) { } [[nodiscard]] auto operator()(uint32_t n_threads) const -> size_t { return mem_required(n_threads); } } ltk_add_mem{manage_local_topk, topk}; // Shared memory for the fused top-k component; // may overlap with all other uses of shared memory struct ltk_reduce_mem_t { uint32_t subwarp_size; uint32_t topk; bool manage_local_topk; ltk_reduce_mem_t(bool manage_local_topk, uint32_t topk) : manage_local_topk(manage_local_topk), topk(topk) { subwarp_size = WarpSize; while (topk * 2 <= subwarp_size) { subwarp_size /= 2; } } [[nodiscard]] auto operator()(uint32_t n_threads) const -> size_t { return manage_local_topk ? matrix::detail::select::warpsort::template calc_smem_size_for_block_wide<OutT, uint32_t>( n_threads / subwarp_size, topk) : 0; } } ltk_reduce_mem{manage_local_topk, topk}; struct total_shared_mem_t { ltk_add_mem_t& ltk_add_mem; ltk_reduce_mem_t& ltk_reduce_mem; size_t lut_mem; size_t bdf_mem; [[nodiscard]] auto operator()(uint32_t n_threads) const -> size_t { return std::max(ltk_reduce_mem(n_threads), lut_mem + std::max(bdf_mem, ltk_add_mem(n_threads))); } }; // Total amount of work; should be enough to occupy the GPU. uint32_t n_blocks = n_queries * n_probes; // The minimum block size we may want: // 1. It's a power-of-two for efficient L1 caching of pq_centers values // (multiples of `1 << pq_bits`). // 2. It should be large enough to fully utilize an SM. uint32_t n_threads_min = WarpSize; while (dev_props.maxBlocksPerMultiProcessor * int(n_threads_min) < dev_props.maxThreadsPerMultiProcessor) { n_threads_min *= 2; } // Further increase the minimum block size to make sure full device occupancy // (NB: this may lead to `n_threads_min` being larger than the kernel's maximum) while (int(n_blocks * n_threads_min) < dev_props.multiProcessorCount * dev_props.maxThreadsPerMultiProcessor && int(n_threads_min) < dev_props.maxThreadsPerBlock) { n_threads_min *= 2; } // Even further, increase it to allow less blocks per SM if there not enough queries. // With this, we reduce the chance of different clusters being processed by two blocks // on the same SM and thus improve the data locality for L1 caching. while (int(n_queries * n_threads_min) < dev_props.maxThreadsPerMultiProcessor && int(n_threads_min) < dev_props.maxThreadsPerBlock) { n_threads_min *= 2; } // Granularity of changing the number of threads when computing the maximum block size. // It's good to have it multiple of the PQ book width. uint32_t n_threads_gty = round_up_safe<uint32_t>(1u << pq_bits, WarpSize); /* Shared memory / L1 cache balance is the main limiter of this kernel. The more blocks per SM we launch, the more shared memory we need. Besides that, we have three versions of the kernel varying in performance and shmem usage. We try the most demanding and the fastest kernel first, trying to maximize occupancy with the minimum number of blocks (just one, really). Then, we tweak the `n_threads` to further optimize occupancy and data locality for the L1 cache. */ auto conf_fast = get_compute_similarity_kernel<OutT, LutT, true, true, IvfSampleFilterT>; auto conf_no_basediff = get_compute_similarity_kernel<OutT, LutT, false, true, IvfSampleFilterT>; auto conf_no_smem_lut = get_compute_similarity_kernel<OutT, LutT, true, false, IvfSampleFilterT>; auto topk_or_zero = manage_local_topk ? topk : 0u; std::array candidates{ std::make_tuple(conf_fast(pq_bits, topk_or_zero), total_shared_mem_t{ltk_add_mem, ltk_reduce_mem, lut_mem, bdf_mem}, true), std::make_tuple(conf_no_basediff(pq_bits, topk_or_zero), total_shared_mem_t{ltk_add_mem, ltk_reduce_mem, lut_mem, 0}, true), std::make_tuple(conf_no_smem_lut(pq_bits, topk_or_zero), total_shared_mem_t{ltk_add_mem, ltk_reduce_mem, 0, bdf_mem}, false)}; // we may allow slightly lower than 100% occupancy; constexpr double kTargetOccupancy = 0.75; // This struct is used to select the better candidate occupancy_t<OutT, LutT, IvfSampleFilterT> selected_perf{}; selected<OutT, LutT, IvfSampleFilterT> selected_config; for (auto [kernel, smem_size_f, lut_is_in_shmem] : candidates) { if (smem_size_f(WarpSize) > dev_props.sharedMemPerBlockOptin) { // Even a single block cannot fit into an SM due to shmem requirements. Skip the candidate. continue; } // First, we set the carveout hint to the preferred value. The driver will increase this if // needed to run at least one block per SM. At the same time, if more blocks fit into one SM, // this carveout value will limit the calculated occupancy. When we're done selecting the best // launch configuration, we will tighten the carveout once more, based on the final memory // usage and occupancy. const int max_carveout = estimate_carveout(preferred_shmem_carveout, smem_size_f(WarpSize), dev_props); RAFT_CUDA_TRY( cudaFuncSetAttribute(kernel, cudaFuncAttributePreferredSharedMemoryCarveout, max_carveout)); // Get the theoretical maximum possible number of threads per block cudaFuncAttributes kernel_attrs; RAFT_CUDA_TRY(cudaFuncGetAttributes(&kernel_attrs, kernel)); uint32_t n_threads = round_down_safe<uint32_t>(kernel_attrs.maxThreadsPerBlock, n_threads_gty); // Actual required shmem depens on the number of threads size_t smem_size = smem_size_f(n_threads); // Make sure the kernel can get enough shmem. cudaError_t cuda_status = cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size); if (cuda_status != cudaSuccess) { RAFT_EXPECTS( cuda_status == cudaGetLastError(), "Tried to reset the expected cuda error code, but it didn't match the expectation"); // Failed to request enough shmem for the kernel. Skip the candidate. continue; } occupancy_t<OutT, LutT, IvfSampleFilterT> cur(smem_size, n_threads, kernel, dev_props); if (cur.blocks_per_sm <= 0) { // For some reason, we still cannot make this kernel run. Skip the candidate. continue; } { // Try to reduce the number of threads to increase occupancy and data locality auto n_threads_tmp = n_threads_min; while (n_threads_tmp * 2 < n_threads) { n_threads_tmp *= 2; } if (n_threads_tmp < n_threads) { while (n_threads_tmp >= n_threads_min) { auto smem_size_tmp = smem_size_f(n_threads_tmp); occupancy_t<OutT, LutT, IvfSampleFilterT> tmp( smem_size_tmp, n_threads_tmp, kernel, dev_props); bool select_it = false; if (lut_is_in_shmem && locality_hint >= tmp.blocks_per_sm) { // Normally, the smaller the block the better for L1 cache hit rate. // Hence, the occupancy should be "just good enough" select_it = tmp.occupancy >= min(kTargetOccupancy, cur.occupancy); } else if (lut_is_in_shmem) { // If we don't have enough repeating probes (locality_hint < tmp.blocks_per_sm), // the locality is not going to improve with increasing the number of blocks per SM. // Hence, the only metric here is the occupancy. bool improves_occupancy = tmp.occupancy > cur.occupancy; // Otherwise, the performance still improves with a smaller block size, // given there is enough work to do bool improves_parallelism = tmp.occupancy == cur.occupancy && 7u * tmp.blocks_per_sm * dev_props.multiProcessorCount <= n_blocks; select_it = improves_occupancy || improves_parallelism; } else { // If we don't use shared memory for the lookup table, increasing the number of blocks // is very taxing on the global memory usage. // In this case, the occupancy must increase a lot to make it worth the cost. select_it = tmp.occupancy >= min(1.0, cur.occupancy / kTargetOccupancy); } if (select_it) { n_threads = n_threads_tmp; smem_size = smem_size_tmp; cur = tmp; } n_threads_tmp /= 2; } } } { if (selected_perf.occupancy <= 0.0 // no candidate yet || (selected_perf.occupancy < cur.occupancy * kTargetOccupancy && selected_perf.shmem_use >= cur.shmem_use) // much improved occupancy ) { selected_perf = cur; if (lut_is_in_shmem) { selected_config = { kernel, dim3(n_blocks, 1, 1), dim3(n_threads, 1, 1), smem_size, size_t(0)}; } else { // When the global memory is used for the lookup table, we need to minimize the grid // size; otherwise, the kernel may quickly run out of memory. auto n_blocks_min = std::min<uint32_t>(n_blocks, cur.blocks_per_sm * dev_props.multiProcessorCount); selected_config = {kernel, dim3(n_blocks_min, 1, 1), dim3(n_threads, 1, 1), smem_size, size_t(n_blocks_min) * size_t(pq_dim << pq_bits)}; } // Actual shmem/L1 split wildly rounds up the specified preferred carveout, so we set here // a rather conservative bar; most likely, the kernel gets more shared memory than this, // and the occupancy doesn't get hurt. auto carveout = std::min<int>(max_carveout, std::ceil(100.0 * cur.shmem_use)); RAFT_CUDA_TRY( cudaFuncSetAttribute(kernel, cudaFuncAttributePreferredSharedMemoryCarveout, carveout)); if (cur.occupancy >= kTargetOccupancy) { break; } } else if (selected_perf.occupancy > 0.0) { // If we found a reasonable candidate on a previous iteration, and this one is not better, // then don't try any more candidates because they are much slower anyway. break; } } } RAFT_EXPECTS(selected_perf.occupancy > 0.0, "Couldn't determine a working kernel launch configuration."); return selected_config; } } // namespace raft::neighbors::ivf_pq::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_fp_8bit.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/neighbors/ivf_pq_types.hpp> #include <raft/core/cudart_utils.hpp> #include <raft/core/device_mdarray.hpp> #include <raft/core/logger.hpp> #include <raft/core/nvtx.hpp> #include <raft/core/operators.hpp> #include <raft/core/resources.hpp> #include <raft/distance/distance_types.hpp> #include <raft/linalg/gemm.cuh> #include <raft/linalg/map.cuh> #include <raft/linalg/unary_op.cuh> #include <raft/matrix/detail/select_k.cuh> #include <raft/matrix/detail/select_warpsort.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/device_atomics.cuh> #include <raft/util/device_loads_stores.cuh> #include <raft/util/pow2_utils.cuh> #include <raft/util/vectorized.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <cub/cub.cuh> #include <cuda_fp16.h> #include <optional> namespace raft::neighbors::ivf_pq::detail { /** 8-bit floating-point storage type. * * This is a custom type for the current IVF-PQ implementation. No arithmetic operations defined * only conversion to and from fp32. This type is unrelated to the proposed FP8 specification. */ template <uint32_t ExpBits, bool Signed> struct fp_8bit { static_assert(ExpBits + uint8_t{Signed} <= 8, "The type does not fit in 8 bits."); constexpr static uint32_t ExpMask = (1u << (ExpBits - 1u)) - 1u; // NOLINT constexpr static uint32_t ValBits = 8u - ExpBits; // NOLINT public: uint8_t bitstring; HDI explicit fp_8bit(uint8_t bs) : bitstring(bs) {} HDI explicit fp_8bit(float fp) : fp_8bit(float2fp_8bit(fp).bitstring) {} HDI auto operator=(float fp) -> fp_8bit<ExpBits, Signed>& { bitstring = float2fp_8bit(fp).bitstring; return *this; } HDI explicit operator float() const { return fp_8bit2float(*this); } HDI explicit operator half() const { return fp_8bit2half(*this); } private: static constexpr float kMin = 1.0f / float(1u << ExpMask); static constexpr float kMax = float(1u << (ExpMask + 1)) * (2.0f - 1.0f / float(1u << ValBits)); static HDI auto float2fp_8bit(float v) -> fp_8bit<ExpBits, Signed> { if constexpr (Signed) { auto u = fp_8bit<ExpBits, false>(std::abs(v)).bitstring; u = (u & 0xfeu) | uint8_t{v < 0}; // set the sign bit return fp_8bit<ExpBits, true>(u); } else { // sic! all small and negative numbers are truncated to zero. if (v < kMin) { return fp_8bit<ExpBits, false>{static_cast<uint8_t>(0)}; } // protect from overflow if (v >= kMax) { return fp_8bit<ExpBits, false>{static_cast<uint8_t>(0xffu)}; } // the rest of possible float values should be within the normalized range return fp_8bit<ExpBits, false>{static_cast<uint8_t>( (*reinterpret_cast<uint32_t*>(&v) + (ExpMask << 23u) - 0x3f800000u) >> (15u + ExpBits))}; } } static HDI auto fp_8bit2float(const fp_8bit<ExpBits, Signed>& v) -> float { uint32_t u = v.bitstring; if constexpr (Signed) { u &= ~1; // zero the sign bit } float r; constexpr uint32_t kBase32 = (0x3f800000u | (0x00400000u >> ValBits)) - (ExpMask << 23); *reinterpret_cast<uint32_t*>(&r) = kBase32 + (u << (15u + ExpBits)); if constexpr (Signed) { // recover the sign bit if (v.bitstring & 1) { r = -r; } } return r; } static HDI auto fp_8bit2half(const fp_8bit<ExpBits, Signed>& v) -> half { uint16_t u = v.bitstring; if constexpr (Signed) { u &= ~1; // zero the sign bit } half r; constexpr uint16_t kBase16 = (0x3c00u | (0x0200u >> ValBits)) - (ExpMask << 10); *reinterpret_cast<uint16_t*>(&r) = kBase16 + (u << (2u + ExpBits)); if constexpr (Signed) { // recover the sign bit if (v.bitstring & 1) { r = -r; } } return r; } }; } // namespace raft::neighbors::ivf_pq::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_flat_search.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "ivf_flat_search-inl.cuh" #endif #ifdef RAFT_COMPILED #include "ivf_flat_search-ext.cuh" #endif
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/refine_host-ext.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> // int64_t #include <raft/core/host_mdspan.hpp> // raft::host_matrix_view #include <raft/distance/distance_types.hpp> // raft::distance::DistanceType #include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT #ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY namespace raft::neighbors::detail { template <typename IdxT, typename DataT, typename DistanceT, typename ExtentsT> [[gnu::optimize(3), gnu::optimize("tree-vectorize")]] void refine_host( raft::host_matrix_view<const DataT, ExtentsT, row_major> dataset, raft::host_matrix_view<const DataT, ExtentsT, row_major> queries, raft::host_matrix_view<const IdxT, ExtentsT, row_major> neighbor_candidates, raft::host_matrix_view<IdxT, ExtentsT, row_major> indices, raft::host_matrix_view<DistanceT, ExtentsT, row_major> distances, distance::DistanceType metric = distance::DistanceType::L2Unexpanded) RAFT_EXPLICIT; } #endif // RAFT_EXPLICIT_INSTANTIATE_ONLY #define instantiate_raft_neighbors_refine(IdxT, DataT, DistanceT, ExtentsT) \ extern template void raft::neighbors::detail::refine_host<IdxT, DataT, DistanceT, ExtentsT>( \ raft::host_matrix_view<const DataT, ExtentsT, row_major> dataset, \ raft::host_matrix_view<const DataT, ExtentsT, row_major> queries, \ raft::host_matrix_view<const IdxT, ExtentsT, row_major> neighbor_candidates, \ raft::host_matrix_view<IdxT, ExtentsT, row_major> indices, \ raft::host_matrix_view<DistanceT, ExtentsT, row_major> distances, \ distance::DistanceType metric); instantiate_raft_neighbors_refine(int64_t, float, float, int64_t); instantiate_raft_neighbors_refine(int64_t, int8_t, float, int64_t); instantiate_raft_neighbors_refine(int64_t, uint8_t, float, int64_t); #undef instantiate_raft_neighbors_refine
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_pq_compute_similarity-ext.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_fp16.h> // __half #include <raft/core/detail/macros.hpp> // RAFT_WEAK_FUNCTION #include <raft/distance/distance_types.hpp> // raft::distance::DistanceType #include <raft/neighbors/detail/ivf_pq_fp_8bit.cuh> // raft::neighbors::ivf_pq::detail::fp_8bit #include <raft/neighbors/ivf_pq_types.hpp> // raft::neighbors::ivf_pq::codebook_gen #include <raft/neighbors/sample_filter_types.hpp> // none_ivf_sample_filter #include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT #include <rmm/cuda_stream_view.hpp> // rmm::cuda_stream_view #ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY namespace raft::neighbors::ivf_pq::detail { // is_local_topk_feasible is not inline here, because we would have to define it // here as well. That would run the risk of the definitions here and in the // -inl.cuh header diverging. auto RAFT_WEAK_FUNCTION is_local_topk_feasible(uint32_t k, uint32_t n_probes, uint32_t n_queries) -> bool; template <typename OutT, typename LutT, typename IvfSampleFilterT, uint32_t PqBits, int Capacity, bool PrecompBaseDiff, bool EnableSMemLut> RAFT_KERNEL compute_similarity_kernel(uint32_t dim, uint32_t n_probes, uint32_t pq_dim, uint32_t n_queries, uint32_t queries_offset, distance::DistanceType metric, codebook_gen codebook_kind, uint32_t topk, uint32_t max_samples, const float* cluster_centers, const float* pq_centers, const uint8_t* const* pq_dataset, const uint32_t* cluster_labels, const uint32_t* _chunk_indices, const float* queries, const uint32_t* index_list, float* query_kths, IvfSampleFilterT sample_filter, LutT* lut_scores, OutT* _out_scores, uint32_t* _out_indices) RAFT_EXPLICIT; // The signature of the kernel defined by a minimal set of template parameters template <typename OutT, typename LutT, typename IvfSampleFilterT> using compute_similarity_kernel_t = decltype(&compute_similarity_kernel<OutT, LutT, IvfSampleFilterT, 8, 0, true, true>); template <typename OutT, typename LutT, typename IvfSampleFilterT> struct selected { compute_similarity_kernel_t<OutT, LutT, IvfSampleFilterT> kernel; dim3 grid_dim; dim3 block_dim; size_t smem_size; size_t device_lut_size; }; template <typename OutT, typename LutT, typename IvfSampleFilterT> void compute_similarity_run(selected<OutT, LutT, IvfSampleFilterT> s, rmm::cuda_stream_view stream, uint32_t dim, uint32_t n_probes, uint32_t pq_dim, uint32_t n_queries, uint32_t queries_offset, distance::DistanceType metric, codebook_gen codebook_kind, uint32_t topk, uint32_t max_samples, const float* cluster_centers, const float* pq_centers, const uint8_t* const* pq_dataset, const uint32_t* cluster_labels, const uint32_t* _chunk_indices, const float* queries, const uint32_t* index_list, float* query_kths, IvfSampleFilterT sample_filter, LutT* lut_scores, OutT* _out_scores, uint32_t* _out_indices) RAFT_EXPLICIT; /** * Use heuristics to choose an optimal instance of the search kernel. * It selects among a few kernel variants (with/out using shared mem for * lookup tables / precomputed distances) and tries to choose the block size * to maximize kernel occupancy. * * @param manage_local_topk * whether use the fused calculate+select or just calculate the distances for each * query and probed cluster. * * @param locality_hint * beyond this limit do not consider increasing the number of active blocks per SM * would improve locality anymore. */ template <typename OutT, typename LutT, typename IvfSampleFilterT> auto compute_similarity_select(const cudaDeviceProp& dev_props, bool manage_local_topk, int locality_hint, double preferred_shmem_carveout, uint32_t pq_bits, uint32_t pq_dim, uint32_t precomp_data_count, uint32_t n_queries, uint32_t n_probes, uint32_t topk) -> selected<OutT, LutT, IvfSampleFilterT> RAFT_EXPLICIT; } // namespace raft::neighbors::ivf_pq::detail #endif // RAFT_EXPLICIT_INSTANTIATE_ONLY #define instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( \ OutT, LutT, IvfSampleFilterT) \ extern template auto \ raft::neighbors::ivf_pq::detail::compute_similarity_select<OutT, LutT, IvfSampleFilterT>( \ const cudaDeviceProp& dev_props, \ bool manage_local_topk, \ int locality_hint, \ double preferred_shmem_carveout, \ uint32_t pq_bits, \ uint32_t pq_dim, \ uint32_t precomp_data_count, \ uint32_t n_queries, \ uint32_t n_probes, \ uint32_t topk) \ ->raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT>; \ \ extern template void \ raft::neighbors::ivf_pq::detail::compute_similarity_run<OutT, LutT, IvfSampleFilterT>( \ raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT> s, \ rmm::cuda_stream_view stream, \ uint32_t dim, \ uint32_t n_probes, \ uint32_t pq_dim, \ uint32_t n_queries, \ uint32_t queries_offset, \ raft::distance::DistanceType metric, \ raft::neighbors::ivf_pq::codebook_gen codebook_kind, \ uint32_t topk, \ uint32_t max_samples, \ const float* cluster_centers, \ const float* pq_centers, \ const uint8_t* const* pq_dataset, \ const uint32_t* cluster_labels, \ const uint32_t* _chunk_indices, \ const float* queries, \ const uint32_t* index_list, \ float* query_kths, \ IvfSampleFilterT sample_filter, \ LutT* lut_scores, \ OutT* _out_scores, \ uint32_t* _out_indices); #define COMMA , instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( half, raft::neighbors::ivf_pq::detail::fp_8bit<5u COMMA false>, raft::neighbors::filtering::ivf_to_sample_filter< int64_t COMMA raft::neighbors::filtering::none_ivf_sample_filter>); instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( half, raft::neighbors::ivf_pq::detail::fp_8bit<5u COMMA true>, raft::neighbors::filtering::ivf_to_sample_filter< int64_t COMMA raft::neighbors::filtering::none_ivf_sample_filter>); instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( half, half, raft::neighbors::filtering::ivf_to_sample_filter< int64_t COMMA raft::neighbors::filtering::none_ivf_sample_filter>); instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( float, half, raft::neighbors::filtering::ivf_to_sample_filter< int64_t COMMA raft::neighbors::filtering::none_ivf_sample_filter>); instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( float, float, raft::neighbors::filtering::ivf_to_sample_filter< int64_t COMMA raft::neighbors::filtering::none_ivf_sample_filter>); instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( float, raft::neighbors::ivf_pq::detail::fp_8bit<5u COMMA false>, raft::neighbors::filtering::ivf_to_sample_filter< int64_t COMMA raft::neighbors::filtering::none_ivf_sample_filter>); instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( float, raft::neighbors::ivf_pq::detail::fp_8bit<5u COMMA true>, raft::neighbors::filtering::ivf_to_sample_filter< int64_t COMMA raft::neighbors::filtering::none_ivf_sample_filter>); #undef COMMA #undef instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/refine_common.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/mdspan.hpp> #include <raft/distance/distance_types.hpp> namespace raft::neighbors::detail { /** Checks whether the input data extents are compatible. */ template <typename ExtentsT> void refine_check_input(ExtentsT dataset, ExtentsT queries, ExtentsT candidates, ExtentsT indices, ExtentsT distances, distance::DistanceType metric) { auto n_queries = queries.extent(0); auto k = distances.extent(1); RAFT_EXPECTS(indices.extent(0) == n_queries && distances.extent(0) == n_queries && candidates.extent(0) == n_queries, "Number of rows in output indices, distances and candidates matrices must be equal" " with the number of rows in search matrix. Expected %d, got %d, %d, and %d", static_cast<int>(n_queries), static_cast<int>(indices.extent(0)), static_cast<int>(distances.extent(0)), static_cast<int>(candidates.extent(0))); RAFT_EXPECTS(indices.extent(1) == k, "Number of columns in output indices and distances matrices must be equal to k"); RAFT_EXPECTS(queries.extent(1) == dataset.extent(1), "Number of columns must be equal for dataset and queries"); RAFT_EXPECTS(candidates.extent(1) >= k, "Number of neighbor candidates must not be smaller than k (%d vs %d)", static_cast<int>(candidates.extent(1)), static_cast<int>(k)); } } // namespace raft::neighbors::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_flat_search-inl.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/logger.hpp> // RAFT_LOG_TRACE #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> // raft::resources #include <raft/distance/distance_types.hpp> // is_min_close, DistanceType #include <raft/linalg/gemm.cuh> // raft::linalg::gemm #include <raft/linalg/norm.cuh> // raft::linalg::norm #include <raft/linalg/unary_op.cuh> // raft::linalg::unary_op #include <raft/matrix/detail/select_k.cuh> // matrix::detail::select_k #include <raft/neighbors/detail/ivf_flat_interleaved_scan.cuh> // interleaved_scan #include <raft/neighbors/ivf_flat_types.hpp> // raft::neighbors::ivf_flat::index #include <raft/neighbors/sample_filter_types.hpp> // none_ivf_sample_filter #include <raft/spatial/knn/detail/ann_utils.cuh> // utils::mapping #include <rmm/mr/device/per_device_resource.hpp> // rmm::device_memory_resource namespace raft::neighbors::ivf_flat::detail { using namespace raft::spatial::knn::detail; // NOLINT template <typename T, typename AccT, typename IdxT, typename IvfSampleFilterT> void search_impl(raft::resources const& handle, const raft::neighbors::ivf_flat::index<T, IdxT>& index, const T* queries, uint32_t n_queries, uint32_t queries_offset, uint32_t k, uint32_t n_probes, bool select_min, IdxT* neighbors, AccT* distances, rmm::mr::device_memory_resource* search_mr, IvfSampleFilterT sample_filter) { auto stream = resource::get_cuda_stream(handle); // The norm of query rmm::device_uvector<float> query_norm_dev(n_queries, stream, search_mr); // The distance value of cluster(list) and queries rmm::device_uvector<float> distance_buffer_dev(n_queries * index.n_lists(), stream, search_mr); // The topk distance value of cluster(list) and queries rmm::device_uvector<float> coarse_distances_dev(n_queries * n_probes, stream, search_mr); // The topk index of cluster(list) and queries rmm::device_uvector<uint32_t> coarse_indices_dev(n_queries * n_probes, stream, search_mr); // The topk distance value of candidate vectors from each cluster(list) rmm::device_uvector<AccT> refined_distances_dev(n_queries * n_probes * k, stream, search_mr); // The topk index of candidate vectors from each cluster(list) rmm::device_uvector<IdxT> refined_indices_dev(n_queries * n_probes * k, stream, search_mr); size_t float_query_size; if constexpr (std::is_integral_v<T>) { float_query_size = n_queries * index.dim(); } else { float_query_size = 0; } rmm::device_uvector<float> converted_queries_dev(float_query_size, stream, search_mr); float* converted_queries_ptr = converted_queries_dev.data(); if constexpr (std::is_same_v<T, float>) { converted_queries_ptr = const_cast<float*>(queries); } else { linalg::unaryOp( converted_queries_ptr, queries, n_queries * index.dim(), utils::mapping<float>{}, stream); } float alpha = 1.0f; float beta = 0.0f; // todo(lsugy): raft distance? (if performance is similar/better than gemm) switch (index.metric()) { case raft::distance::DistanceType::L2Expanded: case raft::distance::DistanceType::L2SqrtExpanded: { alpha = -2.0f; beta = 1.0f; raft::linalg::rowNorm(query_norm_dev.data(), converted_queries_ptr, static_cast<IdxT>(index.dim()), static_cast<IdxT>(n_queries), raft::linalg::L2Norm, true, stream); utils::outer_add(query_norm_dev.data(), (IdxT)n_queries, index.center_norms()->data_handle(), (IdxT)index.n_lists(), distance_buffer_dev.data(), stream); RAFT_LOG_TRACE_VEC(index.center_norms()->data_handle(), std::min<uint32_t>(20, index.dim())); RAFT_LOG_TRACE_VEC(distance_buffer_dev.data(), std::min<uint32_t>(20, index.n_lists())); break; } default: { alpha = 1.0f; beta = 0.0f; } } linalg::gemm(handle, true, false, index.n_lists(), n_queries, index.dim(), &alpha, index.centers().data_handle(), index.dim(), converted_queries_ptr, index.dim(), &beta, distance_buffer_dev.data(), index.n_lists(), stream); RAFT_LOG_TRACE_VEC(distance_buffer_dev.data(), std::min<uint32_t>(20, index.n_lists())); matrix::detail::select_k<AccT, uint32_t>(handle, distance_buffer_dev.data(), nullptr, n_queries, index.n_lists(), n_probes, coarse_distances_dev.data(), coarse_indices_dev.data(), select_min, search_mr); RAFT_LOG_TRACE_VEC(coarse_indices_dev.data(), n_probes); RAFT_LOG_TRACE_VEC(coarse_distances_dev.data(), n_probes); auto distances_dev_ptr = refined_distances_dev.data(); auto indices_dev_ptr = refined_indices_dev.data(); uint32_t grid_dim_x = 0; if (n_probes > 1) { // query the gridDimX size to store probes topK output ivfflat_interleaved_scan<T, typename utils::config<T>::value_t, IdxT, IvfSampleFilterT>( index, nullptr, nullptr, n_queries, queries_offset, index.metric(), n_probes, k, select_min, sample_filter, nullptr, nullptr, grid_dim_x, stream); } else { grid_dim_x = 1; } if (grid_dim_x == 1) { distances_dev_ptr = distances; indices_dev_ptr = neighbors; } ivfflat_interleaved_scan<T, typename utils::config<T>::value_t, IdxT, IvfSampleFilterT>( index, queries, coarse_indices_dev.data(), n_queries, queries_offset, index.metric(), n_probes, k, select_min, sample_filter, indices_dev_ptr, distances_dev_ptr, grid_dim_x, stream); RAFT_LOG_TRACE_VEC(distances_dev_ptr, 2 * k); RAFT_LOG_TRACE_VEC(indices_dev_ptr, 2 * k); // Merge topk values from different blocks if (grid_dim_x > 1) { matrix::detail::select_k<AccT, IdxT>(handle, refined_distances_dev.data(), refined_indices_dev.data(), n_queries, k * grid_dim_x, k, distances, neighbors, select_min, search_mr); } } /** See raft::neighbors::ivf_flat::search docs */ template <typename T, typename IdxT, typename IvfSampleFilterT = raft::neighbors::filtering::none_ivf_sample_filter> inline void search(raft::resources const& handle, const search_params& params, const index<T, IdxT>& index, const T* queries, uint32_t n_queries, uint32_t k, IdxT* neighbors, float* distances, rmm::mr::device_memory_resource* mr = nullptr, IvfSampleFilterT sample_filter = IvfSampleFilterT()) { common::nvtx::range<common::nvtx::domain::raft> fun_scope( "ivf_flat::search(k = %u, n_queries = %u, dim = %zu)", k, n_queries, index.dim()); RAFT_EXPECTS(params.n_probes > 0, "n_probes (number of clusters to probe in the search) must be positive."); auto n_probes = std::min<uint32_t>(params.n_probes, index.n_lists()); // a batch size heuristic: try to keep the workspace within the specified size constexpr uint32_t kExpectedWsSize = 1024 * 1024 * 1024; const uint32_t max_queries = std::min<uint32_t>(n_queries, raft::div_rounding_up_safe<uint64_t>( kExpectedWsSize, 16ull * uint64_t{n_probes} * k + 4ull * index.dim())); auto pool_guard = raft::get_pool_memory_resource(mr, max_queries * n_probes * k * 16); if (pool_guard) { RAFT_LOG_DEBUG("ivf_flat::search: using pool memory resource with initial size %zu bytes", n_queries * n_probes * k * 16ull); } for (uint32_t offset_q = 0; offset_q < n_queries; offset_q += max_queries) { uint32_t queries_batch = min(max_queries, n_queries - offset_q); search_impl<T, float, IdxT, IvfSampleFilterT>(handle, index, queries + offset_q * index.dim(), queries_batch, offset_q, k, n_probes, raft::distance::is_min_close(index.metric()), neighbors + offset_q * k, distances + offset_q * k, mr, sample_filter); } } } // namespace raft::neighbors::ivf_flat::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/ivf_flat_search-ext.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> // uintX_t #include <raft/neighbors/ivf_flat_types.hpp> // raft::neighbors::ivf_flat::index #include <raft/neighbors/sample_filter_types.hpp> // none_ivf_sample_filter #include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT #ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY namespace raft::neighbors::ivf_flat::detail { template <typename T, typename IdxT, typename IvfSampleFilterT> void search(raft::resources const& handle, const search_params& params, const raft::neighbors::ivf_flat::index<T, IdxT>& index, const T* queries, uint32_t n_queries, uint32_t k, IdxT* neighbors, float* distances, rmm::mr::device_memory_resource* mr = nullptr, IvfSampleFilterT sample_filter = IvfSampleFilterT()) RAFT_EXPLICIT; } // namespace raft::neighbors::ivf_flat::detail #endif // RAFT_EXPLICIT_INSTANTIATE_ONLY #define instantiate_raft_neighbors_ivf_flat_detail_search(T, IdxT, IvfSampleFilterT) \ extern template void raft::neighbors::ivf_flat::detail::search<T, IdxT>( \ raft::resources const& handle, \ const search_params& params, \ const raft::neighbors::ivf_flat::index<T, IdxT>& index, \ const T* queries, \ uint32_t n_queries, \ uint32_t k, \ IdxT* neighbors, \ float* distances, \ rmm::mr::device_memory_resource* mr, \ IvfSampleFilterT sample_filter) instantiate_raft_neighbors_ivf_flat_detail_search( float, int64_t, raft::neighbors::filtering::none_ivf_sample_filter); instantiate_raft_neighbors_ivf_flat_detail_search( int8_t, int64_t, raft::neighbors::filtering::none_ivf_sample_filter); instantiate_raft_neighbors_ivf_flat_detail_search( uint8_t, int64_t, raft::neighbors::filtering::none_ivf_sample_filter); #undef instantiate_raft_neighbors_ivf_flat_detail_search
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/device_common.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "utils.hpp" #include <cfloat> #include <cstdint> #include <cuda_fp16.h> #include <raft/core/detail/macros.hpp> namespace raft::neighbors::cagra::detail { namespace device { // warpSize for compile time calculation constexpr unsigned warp_size = 32; /** Xorshift rondem number generator. * * See https://en.wikipedia.org/wiki/Xorshift#xorshift for reference. */ _RAFT_HOST_DEVICE inline uint64_t xorshift64(uint64_t u) { u ^= u >> 12; u ^= u << 25; u ^= u >> 27; return u * 0x2545F4914F6CDD1DULL; } template <class T> _RAFT_DEVICE inline T swizzling(T x) { // Address swizzling reduces bank conflicts in shared memory, but increases // the amount of operation instead. // return x; return x ^ (x >> 5); // "x" must be less than 1024 } } // namespace device } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_single_cta_kernel.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "search_single_cta_kernel-inl.cuh" #endif #ifdef RAFT_COMPILED #include "search_single_cta_kernel-ext.cuh" #endif
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/bitonic.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> #include <raft/core/detail/macros.hpp> namespace raft::neighbors::cagra::detail { namespace bitonic { namespace detail { template <class K, class V> _RAFT_DEVICE inline void swap_if_needed(K& k0, V& v0, K& k1, V& v1, const bool asc) { if ((k0 != k1) && ((k0 < k1) != asc)) { const auto tmp_k = k0; k0 = k1; k1 = tmp_k; const auto tmp_v = v0; v0 = v1; v1 = tmp_v; } } template <class K, class V> _RAFT_DEVICE inline void swap_if_needed(K& k0, V& v0, const unsigned lane_offset, const bool asc) { auto k1 = __shfl_xor_sync(~0u, k0, lane_offset); auto v1 = __shfl_xor_sync(~0u, v0, lane_offset); if ((k0 != k1) && ((k0 < k1) != asc)) { k0 = k1; v0 = v1; } } template <class K, class V, unsigned N, unsigned warp_size = 32> struct warp_merge_core { _RAFT_DEVICE inline void operator()(K k[N], V v[N], const std::uint32_t range, const bool asc) { const auto lane_id = threadIdx.x % warp_size; if (range == 1) { for (std::uint32_t b = 2; b <= N; b <<= 1) { for (std::uint32_t c = b / 2; c >= 1; c >>= 1) { #pragma unroll for (std::uint32_t i = 0; i < N; i++) { std::uint32_t j = i ^ c; if (i >= j) continue; const auto line_id = i + (N * lane_id); const auto p = static_cast<bool>(line_id & b) == static_cast<bool>(line_id & c); swap_if_needed(k[i], v[i], k[j], v[j], p); } } } return; } const std::uint32_t b = range; for (std::uint32_t c = b / 2; c >= 1; c >>= 1) { const auto p = static_cast<bool>(lane_id & b) == static_cast<bool>(lane_id & c); #pragma unroll for (std::uint32_t i = 0; i < N; i++) { swap_if_needed(k[i], v[i], c, p); } } const auto p = ((lane_id & b) == 0); for (std::uint32_t c = N / 2; c >= 1; c >>= 1) { #pragma unroll for (std::uint32_t i = 0; i < N; i++) { std::uint32_t j = i ^ c; if (i >= j) continue; swap_if_needed(k[i], v[i], k[j], v[j], p); } } } }; template <class K, class V, unsigned warp_size> struct warp_merge_core<K, V, 6, warp_size> { _RAFT_DEVICE inline void operator()(K k[6], V v[6], const std::uint32_t range, const bool asc) { constexpr unsigned N = 6; const auto lane_id = threadIdx.x % warp_size; if (range == 1) { for (std::uint32_t i = 0; i < N; i += 3) { const auto p = (i == 0); swap_if_needed(k[0 + i], v[0 + i], k[1 + i], v[1 + i], p); swap_if_needed(k[1 + i], v[1 + i], k[2 + i], v[2 + i], p); swap_if_needed(k[0 + i], v[0 + i], k[1 + i], v[1 + i], p); } const auto p = ((lane_id & 1) == 0); for (std::uint32_t i = 0; i < 3; i++) { std::uint32_t j = i + 3; swap_if_needed(k[i], v[i], k[j], v[j], p); } for (std::uint32_t i = 0; i < N; i += 3) { swap_if_needed(k[0 + i], v[0 + i], k[1 + i], v[1 + i], p); swap_if_needed(k[1 + i], v[1 + i], k[2 + i], v[2 + i], p); swap_if_needed(k[0 + i], v[0 + i], k[1 + i], v[1 + i], p); } return; } const std::uint32_t b = range; for (std::uint32_t c = b / 2; c >= 1; c >>= 1) { const auto p = static_cast<bool>(lane_id & b) == static_cast<bool>(lane_id & c); #pragma unroll for (std::uint32_t i = 0; i < N; i++) { swap_if_needed(k[i], v[i], c, p); } } const auto p = ((lane_id & b) == 0); for (std::uint32_t i = 0; i < 3; i++) { std::uint32_t j = i + 3; swap_if_needed(k[i], v[i], k[j], v[j], p); } for (std::uint32_t i = 0; i < N; i += N / 2) { swap_if_needed(k[0 + i], v[0 + i], k[1 + i], v[1 + i], p); swap_if_needed(k[1 + i], v[1 + i], k[2 + i], v[2 + i], p); swap_if_needed(k[0 + i], v[0 + i], k[1 + i], v[1 + i], p); } } }; template <class K, class V, unsigned warp_size> struct warp_merge_core<K, V, 3, warp_size> { _RAFT_DEVICE inline void operator()(K k[3], V v[3], const std::uint32_t range, const bool asc) { constexpr unsigned N = 3; const auto lane_id = threadIdx.x % warp_size; if (range == 1) { const auto p = ((lane_id & 1) == 0); swap_if_needed(k[0], v[0], k[1], v[1], p); swap_if_needed(k[1], v[1], k[2], v[2], p); swap_if_needed(k[0], v[0], k[1], v[1], p); return; } const std::uint32_t b = range; for (std::uint32_t c = b / 2; c >= 1; c >>= 1) { const auto p = static_cast<bool>(lane_id & b) == static_cast<bool>(lane_id & c); #pragma unroll for (std::uint32_t i = 0; i < N; i++) { swap_if_needed(k[i], v[i], c, p); } } const auto p = ((lane_id & b) == 0); swap_if_needed(k[0], v[0], k[1], v[1], p); swap_if_needed(k[1], v[1], k[2], v[2], p); swap_if_needed(k[0], v[0], k[1], v[1], p); } }; template <class K, class V, unsigned warp_size> struct warp_merge_core<K, V, 2, warp_size> { _RAFT_DEVICE inline void operator()(K k[2], V v[2], const std::uint32_t range, const bool asc) { constexpr unsigned N = 2; const auto lane_id = threadIdx.x % warp_size; if (range == 1) { const auto p = ((lane_id & 1) == 0); swap_if_needed(k[0], v[0], k[1], v[1], p); return; } const std::uint32_t b = range; for (std::uint32_t c = b / 2; c >= 1; c >>= 1) { const auto p = static_cast<bool>(lane_id & b) == static_cast<bool>(lane_id & c); #pragma unroll for (std::uint32_t i = 0; i < N; i++) { swap_if_needed(k[i], v[i], c, p); } } const auto p = ((lane_id & b) == 0); swap_if_needed(k[0], v[0], k[1], v[1], p); } }; template <class K, class V, unsigned warp_size> struct warp_merge_core<K, V, 1, warp_size> { _RAFT_DEVICE inline void operator()(K k[1], V v[1], const std::uint32_t range, const bool asc) { const auto lane_id = threadIdx.x % warp_size; const std::uint32_t b = range; for (std::uint32_t c = b / 2; c >= 1; c >>= 1) { const auto p = static_cast<bool>(lane_id & b) == static_cast<bool>(lane_id & c); swap_if_needed(k[0], v[0], c, p); } } }; } // namespace detail template <class K, class V, unsigned N, unsigned warp_size = 32> __device__ void warp_merge(K k[N], V v[N], unsigned range, const bool asc = true) { detail::warp_merge_core<K, V, N, warp_size>{}(k, v, range, asc); } template <class K, class V, unsigned N, unsigned warp_size = 32> __device__ void warp_sort(K k[N], V v[N], const bool asc = true) { for (std::uint32_t range = 1; range <= warp_size; range <<= 1) { warp_merge<K, V, N, warp_size>(k, v, range, asc); } } } // namespace bitonic } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/factory.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "search_multi_cta.cuh" #include "search_multi_kernel.cuh" #include "search_plan.cuh" #include "search_single_cta.cuh" #include <raft/neighbors/sample_filter_types.hpp> namespace raft::neighbors::cagra::detail { template <typename T, typename IdxT = uint32_t, typename DistanceT = float, typename CagraSampleFilterT = raft::neighbors::filtering::none_cagra_sample_filter> class factory { public: /** * Create a search structure for dataset with dim features. */ static std::unique_ptr<search_plan_impl<T, IdxT, DistanceT, CagraSampleFilterT>> create( raft::resources const& res, search_params const& params, int64_t dim, int64_t graph_degree, uint32_t topk) { search_plan_impl_base plan(params, dim, graph_degree, topk); switch (plan.max_dim) { case 128: switch (plan.team_size) { case 8: return dispatch_kernel<128, 8>(res, plan); break; default: THROW("Incorrect team size %lu", plan.team_size); } break; case 256: switch (plan.team_size) { case 16: return dispatch_kernel<256, 16>(res, plan); break; default: THROW("Incorrect team size %lu", plan.team_size); } break; case 512: switch (plan.team_size) { case 32: return dispatch_kernel<512, 32>(res, plan); break; default: THROW("Incorrect team size %lu", plan.team_size); } break; case 1024: switch (plan.team_size) { case 32: return dispatch_kernel<1024, 32>(res, plan); break; default: THROW("Incorrect team size %lu", plan.team_size); } break; default: RAFT_LOG_DEBUG("Incorrect max_dim (%lu)\n", plan.max_dim); } return std::unique_ptr<search_plan_impl<T, IdxT, DistanceT, CagraSampleFilterT>>(); } private: template <unsigned MAX_DATASET_DIM, unsigned TEAM_SIZE> static std::unique_ptr<search_plan_impl<T, IdxT, DistanceT, CagraSampleFilterT>> dispatch_kernel( raft::resources const& res, search_plan_impl_base& plan) { if (plan.algo == search_algo::SINGLE_CTA) { return std::unique_ptr<search_plan_impl<T, IdxT, DistanceT, CagraSampleFilterT>>( new single_cta_search:: search<TEAM_SIZE, MAX_DATASET_DIM, T, IdxT, DistanceT, CagraSampleFilterT>( res, plan, plan.dim, plan.graph_degree, plan.topk)); } else if (plan.algo == search_algo::MULTI_CTA) { return std::unique_ptr<search_plan_impl<T, IdxT, DistanceT, CagraSampleFilterT>>( new multi_cta_search:: search<TEAM_SIZE, MAX_DATASET_DIM, T, IdxT, DistanceT, CagraSampleFilterT>( res, plan, plan.dim, plan.graph_degree, plan.topk)); } else { return std::unique_ptr<search_plan_impl<T, IdxT, DistanceT, CagraSampleFilterT>>( new multi_kernel_search:: search<TEAM_SIZE, MAX_DATASET_DIM, T, IdxT, DistanceT, CagraSampleFilterT>( res, plan, plan.dim, plan.graph_degree, plan.topk)); } } }; }; // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_single_cta.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/spatial/knn/detail/ann_utils.cuh> #include <algorithm> #include <cassert> #include <iostream> #include <memory> #include <numeric> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_properties.hpp> #include <raft/core/resources.hpp> #include <rmm/device_uvector.hpp> #include <vector> #include "bitonic.hpp" #include "compute_distance.hpp" #include "device_common.hpp" #include "hashmap.hpp" #include "search_plan.cuh" #include "search_single_cta_kernel.cuh" #include "topk_by_radix.cuh" #include "topk_for_cagra/topk_core.cuh" // TODO replace with raft topk #include "utils.hpp" #include <raft/core/logger.hpp> #include <raft/util/cuda_rt_essentials.hpp> #include <raft/util/cudart_utils.hpp> // RAFT_CUDA_TRY_NOT_THROW is used TODO(tfeher): consider moving this to cuda_rt_essentials.hpp namespace raft::neighbors::cagra::detail { namespace single_cta_search { template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, typename DATA_T, typename INDEX_T, typename DISTANCE_T, typename SAMPLE_FILTER_T> struct search : search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T> { using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_queries; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::itopk_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::algo; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::team_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::search_width; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::min_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::thread_block_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_mode; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_min_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_max_fill_rate; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_random_samplings; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::rand_xor_mask; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_dim; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dim; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::graph_degree; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::topk; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hash_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::small_hash_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::small_hash_reset_interval; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dataset_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::result_buffer_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::smem_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_executed_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dev_seed; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_seeds; uint32_t num_itopk_candidates; search(raft::resources const& res, search_params params, int64_t dim, int64_t graph_degree, uint32_t topk) : search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( res, params, dim, graph_degree, topk) { set_params(res); } ~search() {} inline void set_params(raft::resources const& res) { num_itopk_candidates = search_width * graph_degree; result_buffer_size = itopk_size + num_itopk_candidates; typedef raft::Pow2<32> AlignBytes; unsigned result_buffer_size_32 = AlignBytes::roundUp(result_buffer_size); constexpr unsigned max_itopk = 512; RAFT_EXPECTS(itopk_size <= max_itopk, "itopk_size cannot be larger than %u", max_itopk); RAFT_LOG_DEBUG("# num_itopk_candidates: %u", num_itopk_candidates); RAFT_LOG_DEBUG("# num_itopk: %lu", itopk_size); // // Determine the thread block size // constexpr unsigned min_block_size = 64; // 32 or 64 constexpr unsigned min_block_size_radix = 256; constexpr unsigned max_block_size = 1024; // const std::uint32_t topk_ws_size = 3; const std::uint32_t base_smem_size = sizeof(float) * max_dim + (sizeof(INDEX_T) + sizeof(DISTANCE_T)) * result_buffer_size_32 + sizeof(INDEX_T) * hashmap::get_size(small_hash_bitlen) + sizeof(INDEX_T) * search_width + sizeof(std::uint32_t) * topk_ws_size + sizeof(std::uint32_t); smem_size = base_smem_size; if (num_itopk_candidates > 256) { // Tentatively calculate the required share memory size when radix // sort based topk is used, assuming the block size is the maximum. if (itopk_size <= 256) { smem_size += topk_by_radix_sort<256, INDEX_T>::smem_size * sizeof(std::uint32_t); } else { smem_size += topk_by_radix_sort<512, INDEX_T>::smem_size * sizeof(std::uint32_t); } } uint32_t block_size = thread_block_size; if (block_size == 0) { block_size = min_block_size; if (num_itopk_candidates > 256) { // radix-based topk is used. block_size = min_block_size_radix; // Internal topk values per thread must be equlal to or less than 4 // when radix-sort block_topk is used. while ((block_size < max_block_size) && (max_itopk / block_size > 4)) { block_size *= 2; } } // Increase block size according to shared memory requirements. // If block size is 32, upper limit of shared memory size per // thread block is set to 4096. This is GPU generation dependent. constexpr unsigned ulimit_smem_size_cta32 = 4096; while (smem_size > ulimit_smem_size_cta32 / 32 * block_size) { block_size *= 2; } // Increase block size to improve GPU occupancy when batch size // is small, that is, number of queries is low. cudaDeviceProp deviceProp = resource::get_device_properties(res); RAFT_LOG_DEBUG("# multiProcessorCount: %d", deviceProp.multiProcessorCount); while ((block_size < max_block_size) && (graph_degree * search_width * team_size >= block_size * 2) && (max_queries <= (1024 / (block_size * 2)) * deviceProp.multiProcessorCount)) { block_size *= 2; } } RAFT_LOG_DEBUG("# thread_block_size: %u", block_size); RAFT_EXPECTS(block_size >= min_block_size, "block_size cannot be smaller than min_block size, %u", min_block_size); RAFT_EXPECTS(block_size <= max_block_size, "block_size cannot be larger than max_block size %u", max_block_size); thread_block_size = block_size; if (num_itopk_candidates <= 256) { RAFT_LOG_DEBUG("# bitonic-sort based topk routine is used"); } else { RAFT_LOG_DEBUG("# radix-sort based topk routine is used"); smem_size = base_smem_size; if (itopk_size <= 256) { constexpr unsigned MAX_ITOPK = 256; smem_size += topk_by_radix_sort<MAX_ITOPK, INDEX_T>::smem_size * sizeof(std::uint32_t); } else { constexpr unsigned MAX_ITOPK = 512; smem_size += topk_by_radix_sort<MAX_ITOPK, INDEX_T>::smem_size * sizeof(std::uint32_t); } } RAFT_LOG_DEBUG("# smem_size: %u", smem_size); hashmap_size = 0; if (small_hash_bitlen == 0) { hashmap_size = sizeof(INDEX_T) * max_queries * hashmap::get_size(hash_bitlen); hashmap.resize(hashmap_size, resource::get_cuda_stream(res)); } RAFT_LOG_DEBUG("# hashmap_size: %lu", hashmap_size); } void operator()(raft::resources const& res, raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, INDEX_T* const result_indices_ptr, // [num_queries, topk] DISTANCE_T* const result_distances_ptr, // [num_queries, topk] const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const std::uint32_t num_queries, const INDEX_T* dev_seed_ptr, // [num_queries, num_seeds] std::uint32_t* const num_executed_iterations, // [num_queries] uint32_t topk, SAMPLE_FILTER_T sample_filter) { cudaStream_t stream = resource::get_cuda_stream(res); select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( dataset, graph, result_indices_ptr, result_distances_ptr, queries_ptr, num_queries, dev_seed_ptr, num_executed_iterations, topk, num_itopk_candidates, static_cast<uint32_t>(thread_block_size), smem_size, hash_bitlen, hashmap.data(), small_hash_bitlen, small_hash_reset_interval, num_random_samplings, rand_xor_mask, num_seeds, itopk_size, search_width, min_iterations, max_iterations, sample_filter, stream); } }; } // namespace single_cta_search } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_multi_cta.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/spatial/knn/detail/ann_utils.cuh> #include <algorithm> #include <cassert> #include <iostream> #include <memory> #include <numeric> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_properties.hpp> #include <raft/core/resources.hpp> #include <vector> #include "bitonic.hpp" #include "compute_distance.hpp" #include "device_common.hpp" #include "hashmap.hpp" #include "search_multi_cta_kernel.cuh" #include "search_plan.cuh" #include "topk_for_cagra/topk_core.cuh" // TODO replace with raft topk if possible #include "utils.hpp" #include <raft/core/logger.hpp> #include <raft/util/cuda_rt_essentials.hpp> #include <raft/util/cudart_utils.hpp> // RAFT_CUDA_TRY_NOT_THROW is used TODO(tfeher): consider moving this to cuda_rt_essentials.hpp namespace raft::neighbors::cagra::detail { namespace multi_cta_search { template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, typename DATA_T, typename INDEX_T, typename DISTANCE_T, typename SAMPLE_FILTER_T> struct search : public search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T> { using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_queries; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::itopk_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::algo; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::team_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::search_width; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::min_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::thread_block_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_mode; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_min_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_max_fill_rate; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_random_samplings; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::rand_xor_mask; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_dim; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dim; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::graph_degree; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::topk; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hash_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::small_hash_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::small_hash_reset_interval; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dataset_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::result_buffer_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::smem_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_executed_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dev_seed; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_seeds; uint32_t num_cta_per_query; rmm::device_uvector<INDEX_T> intermediate_indices; rmm::device_uvector<float> intermediate_distances; size_t topk_workspace_size; rmm::device_uvector<uint32_t> topk_workspace; search(raft::resources const& res, search_params params, int64_t dim, int64_t graph_degree, uint32_t topk) : search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( res, params, dim, graph_degree, topk), intermediate_indices(0, resource::get_cuda_stream(res)), intermediate_distances(0, resource::get_cuda_stream(res)), topk_workspace(0, resource::get_cuda_stream(res)) { set_params(res, params); } void set_params(raft::resources const& res, const search_params& params) { constexpr unsigned muti_cta_itopk_size = 32; this->itopk_size = muti_cta_itopk_size; search_width = 1; num_cta_per_query = max(params.search_width, params.itopk_size / muti_cta_itopk_size); result_buffer_size = itopk_size + search_width * graph_degree; typedef raft::Pow2<32> AlignBytes; unsigned result_buffer_size_32 = AlignBytes::roundUp(result_buffer_size); // constexpr unsigned max_result_buffer_size = 256; RAFT_EXPECTS(result_buffer_size_32 <= 256, "Result buffer size cannot exceed 256"); smem_size = sizeof(float) * max_dim + (sizeof(INDEX_T) + sizeof(DISTANCE_T)) * result_buffer_size_32 + sizeof(uint32_t) * search_width + sizeof(uint32_t); RAFT_LOG_DEBUG("# smem_size: %u", smem_size); // // Determine the thread block size // constexpr unsigned min_block_size = 64; constexpr unsigned max_block_size = 1024; uint32_t block_size = thread_block_size; if (block_size == 0) { block_size = min_block_size; // Increase block size according to shared memory requirements. // If block size is 32, upper limit of shared memory size per // thread block is set to 4096. This is GPU generation dependent. constexpr unsigned ulimit_smem_size_cta32 = 4096; while (smem_size > ulimit_smem_size_cta32 / 32 * block_size) { block_size *= 2; } // Increase block size to improve GPU occupancy when total number of // CTAs (= num_cta_per_query * max_queries) is small. cudaDeviceProp deviceProp = resource::get_device_properties(res); RAFT_LOG_DEBUG("# multiProcessorCount: %d", deviceProp.multiProcessorCount); while ((block_size < max_block_size) && (graph_degree * search_width * team_size >= block_size * 2) && (num_cta_per_query * max_queries <= (1024 / (block_size * 2)) * deviceProp.multiProcessorCount)) { block_size *= 2; } } RAFT_LOG_DEBUG("# thread_block_size: %u", block_size); RAFT_EXPECTS(block_size >= min_block_size, "block_size cannot be smaller than min_block size, %u", min_block_size); RAFT_EXPECTS(block_size <= max_block_size, "block_size cannot be larger than max_block size %u", max_block_size); thread_block_size = block_size; // // Allocate memory for intermediate buffer and workspace. // uint32_t num_intermediate_results = num_cta_per_query * itopk_size; intermediate_indices.resize(num_intermediate_results * max_queries, resource::get_cuda_stream(res)); intermediate_distances.resize(num_intermediate_results * max_queries, resource::get_cuda_stream(res)); hashmap.resize(hashmap_size, resource::get_cuda_stream(res)); topk_workspace_size = _cuann_find_topk_bufferSize( topk, max_queries, num_intermediate_results, utils::get_cuda_data_type<DATA_T>()); RAFT_LOG_DEBUG("# topk_workspace_size: %lu", topk_workspace_size); topk_workspace.resize(topk_workspace_size, resource::get_cuda_stream(res)); } void check(const uint32_t topk) override { RAFT_EXPECTS(num_cta_per_query * 32 >= topk, "`num_cta_per_query` (%u) * 32 must be equal to or greater than " "`topk` (%u) when 'search_mode' is \"multi-cta\". " "(`num_cta_per_query`=max(`search_width`, `itopk_size`/32))", num_cta_per_query, topk); } ~search() {} void operator()(raft::resources const& res, raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, INDEX_T* const topk_indices_ptr, // [num_queries, topk] DISTANCE_T* const topk_distances_ptr, // [num_queries, topk] const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const uint32_t num_queries, const INDEX_T* dev_seed_ptr, // [num_queries, num_seeds] uint32_t* const num_executed_iterations, // [num_queries,] uint32_t topk, SAMPLE_FILTER_T sample_filter) { cudaStream_t stream = resource::get_cuda_stream(res); select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( dataset, graph, intermediate_indices.data(), intermediate_distances.data(), queries_ptr, num_queries, dev_seed_ptr, num_executed_iterations, topk, thread_block_size, result_buffer_size, smem_size, hash_bitlen, hashmap.data(), num_cta_per_query, num_random_samplings, rand_xor_mask, num_seeds, itopk_size, search_width, min_iterations, max_iterations, sample_filter, stream); RAFT_CUDA_TRY(cudaPeekAtLastError()); // Select the top-k results from the intermediate results const uint32_t num_intermediate_results = num_cta_per_query * itopk_size; _cuann_find_topk(topk, num_queries, num_intermediate_results, intermediate_distances.data(), num_intermediate_results, intermediate_indices.data(), num_intermediate_results, topk_distances_ptr, topk, topk_indices_ptr, topk, topk_workspace.data(), true, NULL, stream); } }; } // namespace multi_cta_search } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/compute_distance.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/spatial/knn/detail/ann_utils.cuh> #include "device_common.hpp" #include "hashmap.hpp" #include "utils.hpp" #include <type_traits> namespace raft::neighbors::cagra::detail { namespace device { // using LOAD_256BIT_T = ulonglong4; using LOAD_128BIT_T = uint4; using LOAD_64BIT_T = uint64_t; template <class LOAD_T, class DATA_T> _RAFT_DEVICE constexpr unsigned get_vlen() { return utils::size_of<LOAD_T>() / utils::size_of<DATA_T>(); } template <class LOAD_T, class DATA_T, unsigned VLEN> struct data_load_t { union { LOAD_T load; DATA_T data[VLEN]; }; }; template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, class LOAD_T, class DATA_T, class DISTANCE_T, class INDEX_T> _RAFT_DEVICE void compute_distance_to_random_nodes( INDEX_T* const result_indices_ptr, // [num_pickup] DISTANCE_T* const result_distances_ptr, // [num_pickup] const float* const query_buffer, const DATA_T* const dataset_ptr, // [dataset_size, dataset_dim] const std::size_t dataset_dim, const std::size_t dataset_size, const std::size_t dataset_ld, const std::size_t num_pickup, const unsigned num_distilation, const uint64_t rand_xor_mask, const INDEX_T* const seed_ptr, // [num_seeds] const uint32_t num_seeds, INDEX_T* const visited_hash_ptr, const uint32_t hash_bitlen, const uint32_t block_id = 0, const uint32_t num_blocks = 1) { const unsigned lane_id = threadIdx.x % TEAM_SIZE; constexpr unsigned vlen = get_vlen<LOAD_T, DATA_T>(); constexpr unsigned nelem = (MAX_DATASET_DIM + (TEAM_SIZE * vlen) - 1) / (TEAM_SIZE * vlen); struct data_load_t<LOAD_T, DATA_T, vlen> dl_buff[nelem]; uint32_t max_i = num_pickup; if (max_i % (32 / TEAM_SIZE)) { max_i += (32 / TEAM_SIZE) - (max_i % (32 / TEAM_SIZE)); } for (uint32_t i = threadIdx.x / TEAM_SIZE; i < max_i; i += blockDim.x / TEAM_SIZE) { const bool valid_i = (i < num_pickup); INDEX_T best_index_team_local; DISTANCE_T best_norm2_team_local = utils::get_max_value<DISTANCE_T>(); for (uint32_t j = 0; j < num_distilation; j++) { // Select a node randomly and compute the distance to it INDEX_T seed_index; DISTANCE_T norm2 = 0.0; if (valid_i) { // uint32_t gid = i + (num_pickup * (j + (num_distilation * block_id))); uint32_t gid = block_id + (num_blocks * (i + (num_pickup * j))); if (seed_ptr && (gid < num_seeds)) { seed_index = seed_ptr[gid]; } else { seed_index = device::xorshift64(gid ^ rand_xor_mask) % dataset_size; } #pragma unroll for (uint32_t e = 0; e < nelem; e++) { const uint32_t k = (lane_id + (TEAM_SIZE * e)) * vlen; if (k >= dataset_dim) break; dl_buff[e].load = ((LOAD_T*)(dataset_ptr + k + (dataset_ld * seed_index)))[0]; } #pragma unroll for (uint32_t e = 0; e < nelem; e++) { const uint32_t k = (lane_id + (TEAM_SIZE * e)) * vlen; if (k >= dataset_dim) break; #pragma unroll for (uint32_t v = 0; v < vlen; v++) { const uint32_t kv = k + v; // if (kv >= dataset_dim) break; DISTANCE_T diff = query_buffer[device::swizzling(kv)]; diff -= spatial::knn::detail::utils::mapping<float>{}(dl_buff[e].data[v]); norm2 += diff * diff; } } } for (uint32_t offset = TEAM_SIZE / 2; offset > 0; offset >>= 1) { norm2 += __shfl_xor_sync(0xffffffff, norm2, offset); } if (valid_i && (norm2 < best_norm2_team_local)) { best_norm2_team_local = norm2; best_index_team_local = seed_index; } } if (valid_i && (threadIdx.x % TEAM_SIZE == 0)) { if (hashmap::insert(visited_hash_ptr, hash_bitlen, best_index_team_local)) { result_distances_ptr[i] = best_norm2_team_local; result_indices_ptr[i] = best_index_team_local; } else { result_distances_ptr[i] = utils::get_max_value<DISTANCE_T>(); result_indices_ptr[i] = utils::get_max_value<INDEX_T>(); } } } } template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, unsigned MAX_N_FRAGS, class LOAD_T, class DATA_T, class DISTANCE_T, class INDEX_T> _RAFT_DEVICE void compute_distance_to_child_nodes(INDEX_T* const result_child_indices_ptr, DISTANCE_T* const result_child_distances_ptr, // query const float* const query_buffer, // [dataset_dim, dataset_size] const DATA_T* const dataset_ptr, const std::size_t dataset_dim, const std::size_t dataset_ld, // [knn_k, dataset_size] const INDEX_T* const knn_graph, const std::uint32_t knn_k, // hashmap INDEX_T* const visited_hashmap_ptr, const std::uint32_t hash_bitlen, const INDEX_T* const parent_indices, const INDEX_T* const internal_topk_list, const std::uint32_t search_width) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const INDEX_T invalid_index = utils::get_max_value<INDEX_T>(); // Read child indices of parents from knn graph and check if the distance // computaiton is necessary. for (uint32_t i = threadIdx.x; i < knn_k * search_width; i += blockDim.x) { const INDEX_T smem_parent_id = parent_indices[i / knn_k]; INDEX_T child_id = invalid_index; if (smem_parent_id != invalid_index) { const auto parent_id = internal_topk_list[smem_parent_id] & ~index_msb_1_mask; child_id = knn_graph[(i % knn_k) + ((uint64_t)knn_k * parent_id)]; } if (child_id != invalid_index) { if (hashmap::insert(visited_hashmap_ptr, hash_bitlen, child_id) == 0) { child_id = invalid_index; } } result_child_indices_ptr[i] = child_id; } constexpr unsigned vlen = get_vlen<LOAD_T, DATA_T>(); constexpr unsigned nelem = (MAX_DATASET_DIM + (TEAM_SIZE * vlen) - 1) / (TEAM_SIZE * vlen); const unsigned lane_id = threadIdx.x % TEAM_SIZE; // [Notice] // Loading the query vector here from shared memory into registers reduces // shared memory trafiic. However, register usage increase. The // MAX_N_FRAGS below is used as the threshold to enable or disable this, // but the appropriate value should be discussed. constexpr unsigned N_FRAGS = (MAX_DATASET_DIM + TEAM_SIZE - 1) / TEAM_SIZE; float query_frags[N_FRAGS]; if (N_FRAGS <= MAX_N_FRAGS) { // Pre-load query vectors into registers when register usage is not too large. #pragma unroll for (unsigned e = 0; e < nelem; e++) { const unsigned k = (lane_id + (TEAM_SIZE * e)) * vlen; // if (k >= dataset_dim) break; #pragma unroll for (unsigned v = 0; v < vlen; v++) { const unsigned kv = k + v; const unsigned ev = (vlen * e) + v; query_frags[ev] = query_buffer[device::swizzling(kv)]; } } } __syncthreads(); // Compute the distance to child nodes std::uint32_t max_i = knn_k * search_width; if (max_i % (32 / TEAM_SIZE)) { max_i += (32 / TEAM_SIZE) - (max_i % (32 / TEAM_SIZE)); } for (std::uint32_t tid = threadIdx.x; tid < max_i * TEAM_SIZE; tid += blockDim.x) { const auto i = tid / TEAM_SIZE; const bool valid_i = (i < (knn_k * search_width)); INDEX_T child_id = invalid_index; if (valid_i) { child_id = result_child_indices_ptr[i]; } DISTANCE_T norm2 = 0.0; struct data_load_t<LOAD_T, DATA_T, vlen> dl_buff[nelem]; if (child_id != invalid_index) { #pragma unroll for (unsigned e = 0; e < nelem; e++) { const unsigned k = (lane_id + (TEAM_SIZE * e)) * vlen; if (k >= dataset_dim) break; dl_buff[e].load = ((LOAD_T*)(dataset_ptr + k + (dataset_ld * child_id)))[0]; } #pragma unroll for (unsigned e = 0; e < nelem; e++) { const unsigned k = (lane_id + (TEAM_SIZE * e)) * vlen; if (k >= dataset_dim) break; #pragma unroll for (unsigned v = 0; v < vlen; v++) { DISTANCE_T diff; if (N_FRAGS <= MAX_N_FRAGS) { const unsigned ev = (vlen * e) + v; diff = query_frags[ev]; } else { const unsigned kv = k + v; diff = query_buffer[device::swizzling(kv)]; } diff -= spatial::knn::detail::utils::mapping<float>{}(dl_buff[e].data[v]); norm2 += diff * diff; } } } for (unsigned offset = TEAM_SIZE / 2; offset > 0; offset >>= 1) { norm2 += __shfl_xor_sync(0xffffffff, norm2, offset); } // Store the distance if (valid_i && (threadIdx.x % TEAM_SIZE == 0)) { if (child_id != invalid_index) { result_child_distances_ptr[i] = norm2; } else { result_child_distances_ptr[i] = utils::get_max_value<DISTANCE_T>(); } } } } } // namespace device } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_single_cta_kernel-inl.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <algorithm> #include <cassert> #include <cstdint> #include <iostream> #include <memory> #include <numeric> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_properties.hpp> #include <raft/core/resources.hpp> #include <raft/neighbors/sample_filter_types.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <rmm/device_uvector.hpp> #include <vector> #include "bitonic.hpp" #include "compute_distance.hpp" #include "device_common.hpp" #include "hashmap.hpp" #include "search_plan.cuh" #include "topk_by_radix.cuh" #include "topk_for_cagra/topk_core.cuh" // TODO replace with raft topk #include "utils.hpp" #include <raft/core/logger.hpp> #include <raft/util/cuda_rt_essentials.hpp> #include <raft/util/cudart_utils.hpp> // RAFT_CUDA_TRY_NOT_THROW is used TODO(tfeher): consider moving this to cuda_rt_essentials.hpp namespace raft::neighbors::cagra::detail { namespace single_cta_search { // #define _CLK_BREAKDOWN template <unsigned TOPK_BY_BITONIC_SORT, class INDEX_T> __device__ void pickup_next_parents(std::uint32_t* const terminate_flag, INDEX_T* const next_parent_indices, INDEX_T* const internal_topk_indices, const std::size_t internal_topk_size, const std::size_t dataset_size, const std::uint32_t search_width) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; // if (threadIdx.x >= 32) return; for (std::uint32_t i = threadIdx.x; i < search_width; i += 32) { next_parent_indices[i] = utils::get_max_value<INDEX_T>(); } std::uint32_t itopk_max = internal_topk_size; if (itopk_max % 32) { itopk_max += 32 - (itopk_max % 32); } std::uint32_t num_new_parents = 0; for (std::uint32_t j = threadIdx.x; j < itopk_max; j += 32) { std::uint32_t jj = j; if (TOPK_BY_BITONIC_SORT) { jj = device::swizzling(j); } INDEX_T index; int new_parent = 0; if (j < internal_topk_size) { index = internal_topk_indices[jj]; if ((index & index_msb_1_mask) == 0) { // check if most significant bit is set new_parent = 1; } } const std::uint32_t ballot_mask = __ballot_sync(0xffffffff, new_parent); if (new_parent) { const auto i = __popc(ballot_mask & ((1 << threadIdx.x) - 1)) + num_new_parents; if (i < search_width) { next_parent_indices[i] = jj; // set most significant bit as used node internal_topk_indices[jj] |= index_msb_1_mask; } } num_new_parents += __popc(ballot_mask); if (num_new_parents >= search_width) { break; } } if (threadIdx.x == 0 && (num_new_parents == 0)) { *terminate_flag = 1; } } template <unsigned MAX_CANDIDATES, class IdxT = void> __device__ inline void topk_by_bitonic_sort_1st(float* candidate_distances, // [num_candidates] IdxT* candidate_indices, // [num_candidates] const std::uint32_t num_candidates, const std::uint32_t num_itopk, unsigned MULTI_WARPS = 0) { const unsigned lane_id = threadIdx.x % 32; const unsigned warp_id = threadIdx.x / 32; if (MULTI_WARPS == 0) { if (warp_id > 0) { return; } constexpr unsigned N = (MAX_CANDIDATES + 31) / 32; float key[N]; IdxT val[N]; /* Candidates -> Reg */ for (unsigned i = 0; i < N; i++) { unsigned j = lane_id + (32 * i); if (j < num_candidates) { key[i] = candidate_distances[j]; val[i] = candidate_indices[j]; } else { key[i] = utils::get_max_value<float>(); val[i] = utils::get_max_value<IdxT>(); } } /* Sort */ bitonic::warp_sort<float, IdxT, N>(key, val); /* Reg -> Temp_itopk */ for (unsigned i = 0; i < N; i++) { unsigned j = (N * lane_id) + i; if (j < num_candidates && j < num_itopk) { candidate_distances[device::swizzling(j)] = key[i]; candidate_indices[device::swizzling(j)] = val[i]; } } } else { // Use two warps (64 threads) constexpr unsigned max_candidates_per_warp = (MAX_CANDIDATES + 1) / 2; constexpr unsigned N = (max_candidates_per_warp + 31) / 32; float key[N]; IdxT val[N]; if (warp_id < 2) { /* Candidates -> Reg */ for (unsigned i = 0; i < N; i++) { unsigned jl = lane_id + (32 * i); unsigned j = jl + (max_candidates_per_warp * warp_id); if (j < num_candidates) { key[i] = candidate_distances[j]; val[i] = candidate_indices[j]; } else { key[i] = utils::get_max_value<float>(); val[i] = utils::get_max_value<IdxT>(); } } /* Sort */ bitonic::warp_sort<float, IdxT, N>(key, val); /* Reg -> Temp_candidates */ for (unsigned i = 0; i < N; i++) { unsigned jl = (N * lane_id) + i; unsigned j = jl + (max_candidates_per_warp * warp_id); if (j < num_candidates && jl < num_itopk) { candidate_distances[device::swizzling(j)] = key[i]; candidate_indices[device::swizzling(j)] = val[i]; } } } __syncthreads(); unsigned num_warps_used = (num_itopk + max_candidates_per_warp - 1) / max_candidates_per_warp; if (warp_id < num_warps_used) { /* Temp_candidates -> Reg */ for (unsigned i = 0; i < N; i++) { unsigned jl = (N * lane_id) + i; unsigned kl = max_candidates_per_warp - 1 - jl; unsigned j = jl + (max_candidates_per_warp * warp_id); unsigned k = MAX_CANDIDATES - 1 - j; if (j >= num_candidates || k >= num_candidates || kl >= num_itopk) continue; float temp_key = candidate_distances[device::swizzling(k)]; if (key[i] == temp_key) continue; if ((warp_id == 0) == (key[i] > temp_key)) { key[i] = temp_key; val[i] = candidate_indices[device::swizzling(k)]; } } } if (num_warps_used > 1) { __syncthreads(); } if (warp_id < num_warps_used) { /* Merge */ bitonic::warp_merge<float, IdxT, N>(key, val, 32); /* Reg -> Temp_itopk */ for (unsigned i = 0; i < N; i++) { unsigned jl = (N * lane_id) + i; unsigned j = jl + (max_candidates_per_warp * warp_id); if (j < num_candidates && j < num_itopk) { candidate_distances[device::swizzling(j)] = key[i]; candidate_indices[device::swizzling(j)] = val[i]; } } } if (num_warps_used > 1) { __syncthreads(); } } } template <unsigned MAX_ITOPK, class IdxT = void> __device__ inline void topk_by_bitonic_sort_2nd(float* itopk_distances, // [num_itopk] IdxT* itopk_indices, // [num_itopk] const std::uint32_t num_itopk, float* candidate_distances, // [num_candidates] IdxT* candidate_indices, // [num_candidates] const std::uint32_t num_candidates, std::uint32_t* work_buf, const bool first, unsigned MULTI_WARPS = 0) { const unsigned lane_id = threadIdx.x % 32; const unsigned warp_id = threadIdx.x / 32; if (MULTI_WARPS == 0) { if (warp_id > 0) { return; } constexpr unsigned N = (MAX_ITOPK + 31) / 32; float key[N]; IdxT val[N]; if (first) { /* Load itopk results */ for (unsigned i = 0; i < N; i++) { unsigned j = lane_id + (32 * i); if (j < num_itopk) { key[i] = itopk_distances[j]; val[i] = itopk_indices[j]; } else { key[i] = utils::get_max_value<float>(); val[i] = utils::get_max_value<IdxT>(); } } /* Warp Sort */ bitonic::warp_sort<float, IdxT, N>(key, val); } else { /* Load itopk results */ for (unsigned i = 0; i < N; i++) { unsigned j = (N * lane_id) + i; if (j < num_itopk) { key[i] = itopk_distances[device::swizzling(j)]; val[i] = itopk_indices[device::swizzling(j)]; } else { key[i] = utils::get_max_value<float>(); val[i] = utils::get_max_value<IdxT>(); } } } /* Merge candidates */ for (unsigned i = 0; i < N; i++) { unsigned j = (N * lane_id) + i; // [0:MAX_ITOPK-1] unsigned k = MAX_ITOPK - 1 - j; if (k >= num_itopk || k >= num_candidates) continue; float candidate_key = candidate_distances[device::swizzling(k)]; if (key[i] > candidate_key) { key[i] = candidate_key; val[i] = candidate_indices[device::swizzling(k)]; } } /* Warp Merge */ bitonic::warp_merge<float, IdxT, N>(key, val, 32); /* Store new itopk results */ for (unsigned i = 0; i < N; i++) { unsigned j = (N * lane_id) + i; if (j < num_itopk) { itopk_distances[device::swizzling(j)] = key[i]; itopk_indices[device::swizzling(j)] = val[i]; } } } else { // Use two warps (64 threads) or more constexpr unsigned max_itopk_per_warp = (MAX_ITOPK + 1) / 2; constexpr unsigned N = (max_itopk_per_warp + 31) / 32; float key[N]; IdxT val[N]; if (first) { /* Load itop results (not sorted) */ if (warp_id < 2) { for (unsigned i = 0; i < N; i++) { unsigned j = lane_id + (32 * i) + (max_itopk_per_warp * warp_id); if (j < num_itopk) { key[i] = itopk_distances[j]; val[i] = itopk_indices[j]; } else { key[i] = utils::get_max_value<float>(); val[i] = utils::get_max_value<IdxT>(); } } /* Warp Sort */ bitonic::warp_sort<float, IdxT, N>(key, val); /* Store intermedidate results */ for (unsigned i = 0; i < N; i++) { unsigned j = (N * threadIdx.x) + i; if (j >= num_itopk) continue; itopk_distances[device::swizzling(j)] = key[i]; itopk_indices[device::swizzling(j)] = val[i]; } } __syncthreads(); if (warp_id < 2) { /* Load intermedidate results */ for (unsigned i = 0; i < N; i++) { unsigned j = (N * threadIdx.x) + i; unsigned k = MAX_ITOPK - 1 - j; if (k >= num_itopk) continue; float temp_key = itopk_distances[device::swizzling(k)]; if (key[i] == temp_key) continue; if ((warp_id == 0) == (key[i] > temp_key)) { key[i] = temp_key; val[i] = itopk_indices[device::swizzling(k)]; } } /* Warp Merge */ bitonic::warp_merge<float, IdxT, N>(key, val, 32); } __syncthreads(); /* Store itopk results (sorted) */ if (warp_id < 2) { for (unsigned i = 0; i < N; i++) { unsigned j = (N * threadIdx.x) + i; if (j >= num_itopk) continue; itopk_distances[device::swizzling(j)] = key[i]; itopk_indices[device::swizzling(j)] = val[i]; } } } const uint32_t num_itopk_div2 = num_itopk / 2; if (threadIdx.x < 3) { // work_buf is used to obtain turning points in 1st and 2nd half of itopk afer merge. work_buf[threadIdx.x] = num_itopk_div2; } __syncthreads(); // Merge candidates (using whole threads) for (unsigned k = threadIdx.x; k < min(num_candidates, num_itopk); k += blockDim.x) { const unsigned j = num_itopk - 1 - k; const float itopk_key = itopk_distances[device::swizzling(j)]; const float candidate_key = candidate_distances[device::swizzling(k)]; if (itopk_key > candidate_key) { itopk_distances[device::swizzling(j)] = candidate_key; itopk_indices[device::swizzling(j)] = candidate_indices[device::swizzling(k)]; if (j < num_itopk_div2) { atomicMin(work_buf + 2, j); } else { atomicMin(work_buf + 1, j - num_itopk_div2); } } } __syncthreads(); // Merge 1st and 2nd half of itopk (using whole threads) for (unsigned j = threadIdx.x; j < num_itopk_div2; j += blockDim.x) { const unsigned k = j + num_itopk_div2; float key_0 = itopk_distances[device::swizzling(j)]; float key_1 = itopk_distances[device::swizzling(k)]; if (key_0 > key_1) { itopk_distances[device::swizzling(j)] = key_1; itopk_distances[device::swizzling(k)] = key_0; IdxT val_0 = itopk_indices[device::swizzling(j)]; IdxT val_1 = itopk_indices[device::swizzling(k)]; itopk_indices[device::swizzling(j)] = val_1; itopk_indices[device::swizzling(k)] = val_0; atomicMin(work_buf + 0, j); } } if (threadIdx.x == blockDim.x - 1) { if (work_buf[2] < num_itopk_div2) { work_buf[1] = work_buf[2]; } } __syncthreads(); // if ((blockIdx.x == 0) && (threadIdx.x == 0)) { // RAFT_LOG_DEBUG( "work_buf: %u, %u, %u\n", work_buf[0], work_buf[1], work_buf[2] ); // } // Warp-0 merges 1st half of itopk, warp-1 does 2nd half. if (warp_id < 2) { // Load intermedidate itopk results const uint32_t turning_point = work_buf[warp_id]; // turning_point <= num_itopk_div2 for (unsigned i = 0; i < N; i++) { unsigned k = num_itopk; unsigned j = (N * lane_id) + i; if (j < turning_point) { k = j + (num_itopk_div2 * warp_id); } else if (j >= (MAX_ITOPK / 2 - num_itopk_div2)) { j -= (MAX_ITOPK / 2 - num_itopk_div2); if ((turning_point <= j) && (j < num_itopk_div2)) { k = j + (num_itopk_div2 * warp_id); } } if (k < num_itopk) { key[i] = itopk_distances[device::swizzling(k)]; val[i] = itopk_indices[device::swizzling(k)]; } else { key[i] = utils::get_max_value<float>(); val[i] = utils::get_max_value<IdxT>(); } } /* Warp Merge */ bitonic::warp_merge<float, IdxT, N>(key, val, 32); /* Store new itopk results */ for (unsigned i = 0; i < N; i++) { const unsigned j = (N * lane_id) + i; if (j < num_itopk_div2) { unsigned k = j + (num_itopk_div2 * warp_id); itopk_distances[device::swizzling(k)] = key[i]; itopk_indices[device::swizzling(k)] = val[i]; } } } } } template <unsigned MAX_ITOPK, unsigned MAX_CANDIDATES, class IdxT> __device__ void topk_by_bitonic_sort(float* itopk_distances, // [num_itopk] IdxT* itopk_indices, // [num_itopk] const std::uint32_t num_itopk, float* candidate_distances, // [num_candidates] IdxT* candidate_indices, // [num_candidates] const std::uint32_t num_candidates, std::uint32_t* work_buf, const bool first, const unsigned MULTI_WARPS_1, const unsigned MULTI_WARPS_2) { // The results in candidate_distances/indices are sorted by bitonic sort. topk_by_bitonic_sort_1st<MAX_CANDIDATES, IdxT>( candidate_distances, candidate_indices, num_candidates, num_itopk, MULTI_WARPS_1); // The results sorted above are merged with the internal intermediate top-k // results so far using bitonic merge. topk_by_bitonic_sort_2nd<MAX_ITOPK, IdxT>(itopk_distances, itopk_indices, num_itopk, candidate_distances, candidate_indices, num_candidates, work_buf, first, MULTI_WARPS_2); } template <class INDEX_T> __device__ inline void hashmap_restore(INDEX_T* const hashmap_ptr, const size_t hashmap_bitlen, const INDEX_T* itopk_indices, const uint32_t itopk_size, const uint32_t first_tid = 0) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; if (threadIdx.x < first_tid) return; for (unsigned i = threadIdx.x - first_tid; i < itopk_size; i += blockDim.x - first_tid) { auto key = itopk_indices[i] & ~index_msb_1_mask; // clear most significant bit hashmap::insert(hashmap_ptr, hashmap_bitlen, key); } } template <class T, unsigned BLOCK_SIZE> __device__ inline void set_value_device(T* const ptr, const T fill, const std::uint32_t count) { for (std::uint32_t i = threadIdx.x; i < count; i += BLOCK_SIZE) { ptr[i] = fill; } } // One query one thread block template <unsigned TEAM_SIZE, unsigned MAX_ITOPK, unsigned MAX_CANDIDATES, unsigned TOPK_BY_BITONIC_SORT, unsigned MAX_DATASET_DIM, class DATA_T, class DISTANCE_T, class INDEX_T, class SAMPLE_FILTER_T> __launch_bounds__(1024, 1) RAFT_KERNEL search_kernel(INDEX_T* const result_indices_ptr, // [num_queries, top_k] DISTANCE_T* const result_distances_ptr, // [num_queries, top_k] const std::uint32_t top_k, const DATA_T* const dataset_ptr, // [dataset_size, dataset_dim] const std::size_t dataset_dim, const std::size_t dataset_size, const std::size_t dataset_ld, // stride of dataset const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const INDEX_T* const knn_graph, // [dataset_size, graph_degree] const std::uint32_t graph_degree, const unsigned num_distilation, const uint64_t rand_xor_mask, const INDEX_T* seed_ptr, // [num_queries, num_seeds] const uint32_t num_seeds, INDEX_T* const visited_hashmap_ptr, // [num_queries, 1 << hash_bitlen] const std::uint32_t internal_topk, const std::uint32_t search_width, const std::uint32_t min_iteration, const std::uint32_t max_iteration, std::uint32_t* const num_executed_iterations, // [num_queries] const std::uint32_t hash_bitlen, const std::uint32_t small_hash_bitlen, const std::uint32_t small_hash_reset_interval, SAMPLE_FILTER_T sample_filter) { using LOAD_T = device::LOAD_128BIT_T; const auto query_id = blockIdx.y; #ifdef _CLK_BREAKDOWN std::uint64_t clk_init = 0; std::uint64_t clk_compute_1st_distance = 0; std::uint64_t clk_topk = 0; std::uint64_t clk_reset_hash = 0; std::uint64_t clk_pickup_parents = 0; std::uint64_t clk_restore_hash = 0; std::uint64_t clk_compute_distance = 0; std::uint64_t clk_start; #define _CLK_START() clk_start = clock64() #define _CLK_REC(V) V += clock64() - clk_start; #else #define _CLK_START() #define _CLK_REC(V) #endif _CLK_START(); extern __shared__ std::uint32_t smem[]; // Layout of result_buffer // +----------------------+------------------------------+---------+ // | internal_top_k | neighbors of internal_top_k | padding | // | <internal_topk_size> | <search_width * graph_degree> | upto 32 | // +----------------------+------------------------------+---------+ // |<--- result_buffer_size --->| std::uint32_t result_buffer_size = internal_topk + (search_width * graph_degree); std::uint32_t result_buffer_size_32 = result_buffer_size; if (result_buffer_size % 32) { result_buffer_size_32 += 32 - (result_buffer_size % 32); } const auto small_hash_size = hashmap::get_size(small_hash_bitlen); auto query_buffer = reinterpret_cast<float*>(smem); auto result_indices_buffer = reinterpret_cast<INDEX_T*>(query_buffer + MAX_DATASET_DIM); auto result_distances_buffer = reinterpret_cast<DISTANCE_T*>(result_indices_buffer + result_buffer_size_32); auto visited_hash_buffer = reinterpret_cast<INDEX_T*>(result_distances_buffer + result_buffer_size_32); auto parent_list_buffer = reinterpret_cast<INDEX_T*>(visited_hash_buffer + small_hash_size); auto topk_ws = reinterpret_cast<std::uint32_t*>(parent_list_buffer + search_width); auto terminate_flag = reinterpret_cast<std::uint32_t*>(topk_ws + 3); auto smem_working_ptr = reinterpret_cast<std::uint32_t*>(terminate_flag + 1); // A flag for filtering. auto filter_flag = terminate_flag; const DATA_T* const query_ptr = queries_ptr + query_id * dataset_dim; for (unsigned i = threadIdx.x; i < MAX_DATASET_DIM; i += blockDim.x) { unsigned j = device::swizzling(i); if (i < dataset_dim) { query_buffer[j] = spatial::knn::detail::utils::mapping<float>{}(query_ptr[i]); } else { query_buffer[j] = 0.0; } } if (threadIdx.x == 0) { terminate_flag[0] = 0; topk_ws[0] = ~0u; } // Init hashmap INDEX_T* local_visited_hashmap_ptr; if (small_hash_bitlen) { local_visited_hashmap_ptr = visited_hash_buffer; } else { local_visited_hashmap_ptr = visited_hashmap_ptr + (hashmap::get_size(hash_bitlen) * query_id); } hashmap::init(local_visited_hashmap_ptr, hash_bitlen, 0); __syncthreads(); _CLK_REC(clk_init); // compute distance to randomly selecting nodes _CLK_START(); const INDEX_T* const local_seed_ptr = seed_ptr ? seed_ptr + (num_seeds * query_id) : nullptr; device::compute_distance_to_random_nodes<TEAM_SIZE, MAX_DATASET_DIM, LOAD_T>( result_indices_buffer, result_distances_buffer, query_buffer, dataset_ptr, dataset_dim, dataset_size, dataset_ld, result_buffer_size, num_distilation, rand_xor_mask, local_seed_ptr, num_seeds, local_visited_hashmap_ptr, hash_bitlen); __syncthreads(); _CLK_REC(clk_compute_1st_distance); std::uint32_t iter = 0; while (1) { // sort if constexpr (TOPK_BY_BITONIC_SORT) { // [Notice] // It is good to use multiple warps in topk_by_bitonic_sort() when // batch size is small (short-latency), but it might not be always good // when batch size is large (high-throughput). // topk_by_bitonic_sort() consists of two operations: // if MAX_CANDIDATES is greater than 128, the first operation uses two warps; // if MAX_ITOPK is greater than 256, the second operation used two warps. const unsigned multi_warps_1 = ((blockDim.x >= 64) && (MAX_CANDIDATES > 128)) ? 1 : 0; const unsigned multi_warps_2 = ((blockDim.x >= 64) && (MAX_ITOPK > 256)) ? 1 : 0; // reset small-hash table. if ((iter + 1) % small_hash_reset_interval == 0) { // Depending on the block size and the number of warps used in // topk_by_bitonic_sort(), determine which warps are used to reset // the small hash and whether they are performed in overlap with // topk_by_bitonic_sort(). _CLK_START(); unsigned hash_start_tid; if (blockDim.x == 32) { hash_start_tid = 0; } else if (blockDim.x == 64) { if (multi_warps_1 || multi_warps_2) { hash_start_tid = 0; } else { hash_start_tid = 32; } } else { if (multi_warps_1 || multi_warps_2) { hash_start_tid = 64; } else { hash_start_tid = 32; } } hashmap::init(local_visited_hashmap_ptr, hash_bitlen, hash_start_tid); _CLK_REC(clk_reset_hash); } // topk with bitonic sort _CLK_START(); if (std::is_same<SAMPLE_FILTER_T, raft::neighbors::filtering::none_cagra_sample_filter>::value || *filter_flag == 0) { topk_by_bitonic_sort<MAX_ITOPK, MAX_CANDIDATES>(result_distances_buffer, result_indices_buffer, internal_topk, result_distances_buffer + internal_topk, result_indices_buffer + internal_topk, search_width * graph_degree, topk_ws, (iter == 0), multi_warps_1, multi_warps_2); __syncthreads(); } else { topk_by_bitonic_sort_1st<MAX_ITOPK + MAX_CANDIDATES>( result_distances_buffer, result_indices_buffer, internal_topk + search_width * graph_degree, internal_topk, false); if (threadIdx.x == 0) { *terminate_flag = 0; } } _CLK_REC(clk_topk); } else { _CLK_START(); // topk with radix block sort topk_by_radix_sort<MAX_ITOPK, INDEX_T>{}( internal_topk, gridDim.x, result_buffer_size, reinterpret_cast<std::uint32_t*>(result_distances_buffer), result_indices_buffer, reinterpret_cast<std::uint32_t*>(result_distances_buffer), result_indices_buffer, nullptr, topk_ws, true, reinterpret_cast<std::uint32_t*>(smem_working_ptr)); _CLK_REC(clk_topk); // reset small-hash table if ((iter + 1) % small_hash_reset_interval == 0) { _CLK_START(); hashmap::init(local_visited_hashmap_ptr, hash_bitlen); _CLK_REC(clk_reset_hash); } } __syncthreads(); if (iter + 1 == max_iteration) { break; } // pick up next parents if (threadIdx.x < 32) { _CLK_START(); pickup_next_parents<TOPK_BY_BITONIC_SORT, INDEX_T>(terminate_flag, parent_list_buffer, result_indices_buffer, internal_topk, dataset_size, search_width); _CLK_REC(clk_pickup_parents); } // restore small-hash table by putting internal-topk indices in it if ((iter + 1) % small_hash_reset_interval == 0) { const unsigned first_tid = ((blockDim.x <= 32) ? 0 : 32); _CLK_START(); hashmap_restore( local_visited_hashmap_ptr, hash_bitlen, result_indices_buffer, internal_topk, first_tid); _CLK_REC(clk_restore_hash); } __syncthreads(); if (*terminate_flag && iter >= min_iteration) { break; } // compute the norms between child nodes and query node _CLK_START(); constexpr unsigned max_n_frags = 16; device::compute_distance_to_child_nodes<TEAM_SIZE, MAX_DATASET_DIM, max_n_frags, LOAD_T>( result_indices_buffer + internal_topk, result_distances_buffer + internal_topk, query_buffer, dataset_ptr, dataset_dim, dataset_ld, knn_graph, graph_degree, local_visited_hashmap_ptr, hash_bitlen, parent_list_buffer, result_indices_buffer, search_width); __syncthreads(); _CLK_REC(clk_compute_distance); // Filtering if constexpr (!std::is_same<SAMPLE_FILTER_T, raft::neighbors::filtering::none_cagra_sample_filter>::value) { if (threadIdx.x == 0) { *filter_flag = 0; } __syncthreads(); constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const INDEX_T invalid_index = utils::get_max_value<INDEX_T>(); for (unsigned p = threadIdx.x; p < search_width; p += blockDim.x) { if (parent_list_buffer[p] != invalid_index) { const auto parent_id = result_indices_buffer[parent_list_buffer[p]] & ~index_msb_1_mask; if (!sample_filter(query_id, parent_id)) { // If the parent must not be in the resulting top-k list, remove from the parent list result_distances_buffer[parent_list_buffer[p]] = utils::get_max_value<DISTANCE_T>(); result_indices_buffer[parent_list_buffer[p]] = invalid_index; *filter_flag = 1; } } } __syncthreads(); } iter++; } // Post process for filtering if constexpr (!std::is_same<SAMPLE_FILTER_T, raft::neighbors::filtering::none_cagra_sample_filter>::value) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const INDEX_T invalid_index = utils::get_max_value<INDEX_T>(); for (unsigned i = threadIdx.x; i < internal_topk + search_width * graph_degree; i += blockDim.x) { const auto node_id = result_indices_buffer[i] & ~index_msb_1_mask; if (node_id != (invalid_index & ~index_msb_1_mask) && !sample_filter(query_id, node_id)) { result_distances_buffer[i] = utils::get_max_value<DISTANCE_T>(); result_indices_buffer[i] = invalid_index; } } __syncthreads(); topk_by_bitonic_sort_1st<MAX_ITOPK + MAX_CANDIDATES>( result_distances_buffer, result_indices_buffer, internal_topk + search_width * graph_degree, top_k, false); __syncthreads(); } for (std::uint32_t i = threadIdx.x; i < top_k; i += blockDim.x) { unsigned j = i + (top_k * query_id); unsigned ii = i; if (TOPK_BY_BITONIC_SORT) { ii = device::swizzling(i); } if (result_distances_ptr != nullptr) { result_distances_ptr[j] = result_distances_buffer[ii]; } constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; result_indices_ptr[j] = result_indices_buffer[ii] & ~index_msb_1_mask; // clear most significant bit } if (threadIdx.x == 0 && num_executed_iterations != nullptr) { num_executed_iterations[query_id] = iter + 1; } #ifdef _CLK_BREAKDOWN if ((threadIdx.x == 0 || threadIdx.x == BLOCK_SIZE - 1) && ((query_id * 3) % gridDim.y < 3)) { RAFT_LOG_DEBUG( "query, %d, thread, %d" ", init, %d" ", 1st_distance, %lu" ", topk, %lu" ", reset_hash, %lu" ", pickup_parents, %lu" ", restore_hash, %lu" ", distance, %lu" "\n", query_id, threadIdx.x, clk_init, clk_compute_1st_distance, clk_topk, clk_reset_hash, clk_pickup_parents, clk_restore_hash, clk_compute_distance); } #endif } template <unsigned TEAM_SIZE, unsigned MX_DIM, typename T, typename IdxT, typename DistT, typename SAMPLE_FILTER_T> struct search_kernel_config { using kernel_t = decltype(&search_kernel<TEAM_SIZE, 64, 64, 0, MX_DIM, T, DistT, IdxT, SAMPLE_FILTER_T>); template <unsigned MAX_CANDIDATES, unsigned USE_BITONIC_SORT> static auto choose_search_kernel(unsigned itopk_size) -> kernel_t { if (itopk_size <= 64) { return search_kernel<TEAM_SIZE, 64, MAX_CANDIDATES, USE_BITONIC_SORT, MX_DIM, T, DistT, IdxT>; } else if (itopk_size <= 128) { return search_kernel<TEAM_SIZE, 128, MAX_CANDIDATES, USE_BITONIC_SORT, MX_DIM, T, DistT, IdxT, SAMPLE_FILTER_T>; } else if (itopk_size <= 256) { return search_kernel<TEAM_SIZE, 256, MAX_CANDIDATES, USE_BITONIC_SORT, MX_DIM, T, DistT, IdxT, SAMPLE_FILTER_T>; } else if (itopk_size <= 512) { return search_kernel<TEAM_SIZE, 512, MAX_CANDIDATES, USE_BITONIC_SORT, MX_DIM, T, DistT, IdxT, SAMPLE_FILTER_T>; } THROW("No kernel for parametels itopk_size %u, max_candidates %u", itopk_size, MAX_CANDIDATES); } static auto choose_itopk_and_mx_candidates(unsigned itopk_size, unsigned num_itopk_candidates, unsigned block_size) -> kernel_t { if (num_itopk_candidates <= 64) { // use bitonic sort based topk return choose_search_kernel<64, 1>(itopk_size); } else if (num_itopk_candidates <= 128) { return choose_search_kernel<128, 1>(itopk_size); } else if (num_itopk_candidates <= 256) { return choose_search_kernel<256, 1>(itopk_size); } else { // Radix-based topk is used constexpr unsigned max_candidates = 32; // to avoid build failure if (itopk_size <= 256) { return search_kernel<TEAM_SIZE, 256, max_candidates, 0, MX_DIM, T, DistT, IdxT>; } else if (itopk_size <= 512) { return search_kernel<TEAM_SIZE, 512, max_candidates, 0, MX_DIM, T, DistT, IdxT>; } } THROW("No kernel for parametels itopk_size %u, num_itopk_candidates %u", itopk_size, num_itopk_candidates); } }; template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, typename DATA_T, typename INDEX_T, typename DISTANCE_T, typename SAMPLE_FILTER_T> void select_and_run( // raft::resources const& res, raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, INDEX_T* const topk_indices_ptr, // [num_queries, topk] DISTANCE_T* const topk_distances_ptr, // [num_queries, topk] const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const uint32_t num_queries, const INDEX_T* dev_seed_ptr, // [num_queries, num_seeds] uint32_t* const num_executed_iterations, // [num_queries,] uint32_t topk, uint32_t num_itopk_candidates, uint32_t block_size, // uint32_t smem_size, int64_t hash_bitlen, INDEX_T* hashmap_ptr, size_t small_hash_bitlen, size_t small_hash_reset_interval, uint32_t num_random_samplings, uint64_t rand_xor_mask, uint32_t num_seeds, size_t itopk_size, size_t search_width, size_t min_iterations, size_t max_iterations, SAMPLE_FILTER_T sample_filter, cudaStream_t stream) { auto kernel = search_kernel_config<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>:: choose_itopk_and_mx_candidates(itopk_size, num_itopk_candidates, block_size); RAFT_CUDA_TRY( cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); dim3 thread_dims(block_size, 1, 1); dim3 block_dims(1, num_queries, 1); RAFT_LOG_DEBUG( "Launching kernel with %u threads, %u block %u smem", block_size, num_queries, smem_size); kernel<<<block_dims, thread_dims, smem_size, stream>>>(topk_indices_ptr, topk_distances_ptr, topk, dataset.data_handle(), dataset.extent(1), dataset.extent(0), dataset.stride(0), queries_ptr, graph.data_handle(), graph.extent(1), num_random_samplings, rand_xor_mask, dev_seed_ptr, num_seeds, hashmap_ptr, itopk_size, search_width, min_iterations, max_iterations, num_executed_iterations, hash_bitlen, small_hash_bitlen, small_hash_reset_interval, sample_filter); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } // namespace single_cta_search } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/cagra_search.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/core/resource/cuda_stream.hpp> #include <raft/neighbors/detail/ivf_pq_search.cuh> #include <raft/neighbors/sample_filter_types.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/core/device_mdspan.hpp> #include <raft/core/host_mdspan.hpp> #include <raft/core/nvtx.hpp> #include <raft/core/resource/detail/device_memory_resource.hpp> #include <raft/core/resources.hpp> #include <raft/neighbors/cagra_types.hpp> #include <rmm/cuda_stream_view.hpp> #include "factory.cuh" #include "search_plan.cuh" #include "search_single_cta.cuh" namespace raft::neighbors::cagra::detail { template <class CagraSampleFilterT> struct CagraSampleFilterWithQueryIdOffset { const uint32_t offset; CagraSampleFilterT filter; CagraSampleFilterWithQueryIdOffset(const uint32_t offset, const CagraSampleFilterT filter) : offset(offset), filter(filter) { } _RAFT_DEVICE auto operator()(const uint32_t query_id, const uint32_t sample_id) { return filter(query_id + offset, sample_id); } }; template <class CagraSampleFilterT> struct CagraSampleFilterT_Selector { using type = CagraSampleFilterWithQueryIdOffset<CagraSampleFilterT>; }; template <> struct CagraSampleFilterT_Selector<raft::neighbors::filtering::none_cagra_sample_filter> { using type = raft::neighbors::filtering::none_cagra_sample_filter; }; // A helper function to set a query id offset template <class CagraSampleFilterT> inline typename CagraSampleFilterT_Selector<CagraSampleFilterT>::type set_offset( CagraSampleFilterT filter, const uint32_t offset) { typename CagraSampleFilterT_Selector<CagraSampleFilterT>::type new_filter(offset, filter); return new_filter; } template <> inline typename CagraSampleFilterT_Selector<raft::neighbors::filtering::none_cagra_sample_filter>::type set_offset<raft::neighbors::filtering::none_cagra_sample_filter>( raft::neighbors::filtering::none_cagra_sample_filter filter, const uint32_t) { return filter; } /** * @brief Search ANN using the constructed index. * * See the [build](#build) documentation for a usage example. * * @tparam T data element type * @tparam IdxT type of database vector indices * @tparam internal_IdxT during search we map IdxT to internal_IdxT, this way we do not need * separate kernels for int/uint. * * @param[in] handle * @param[in] params configure the search * @param[in] idx ivf-pq constructed index * @param[in] queries a device matrix view to a row-major matrix [n_queries, index->dim()] * @param[out] neighbors a device matrix view to the indices of the neighbors in the source dataset * [n_queries, k] * @param[out] distances a device matrix view to the distances to the selected neighbors [n_queries, * k] */ template <typename T, typename internal_IdxT, typename CagraSampleFilterT, typename IdxT = uint32_t, typename DistanceT = float> void search_main(raft::resources const& res, search_params params, const index<T, IdxT>& index, raft::device_matrix_view<const T, int64_t, row_major> queries, raft::device_matrix_view<internal_IdxT, int64_t, row_major> neighbors, raft::device_matrix_view<DistanceT, int64_t, row_major> distances, CagraSampleFilterT sample_filter = CagraSampleFilterT()) { resource::detail::warn_non_pool_workspace(res, "raft::neighbors::cagra::search"); RAFT_LOG_DEBUG("# dataset size = %lu, dim = %lu\n", static_cast<size_t>(index.dataset().extent(0)), static_cast<size_t>(index.dataset().extent(1))); RAFT_LOG_DEBUG("# query size = %lu, dim = %lu\n", static_cast<size_t>(queries.extent(0)), static_cast<size_t>(queries.extent(1))); RAFT_EXPECTS(queries.extent(1) == index.dim(), "Queries and index dim must match"); const uint32_t topk = neighbors.extent(1); if (params.max_queries == 0) { params.max_queries = queries.extent(0); } common::nvtx::range<common::nvtx::domain::raft> fun_scope( "cagra::search(max_queries = %u, k = %u, dim = %zu)", params.max_queries, topk, index.dim()); using CagraSampleFilterT_s = typename CagraSampleFilterT_Selector<CagraSampleFilterT>::type; std::unique_ptr<search_plan_impl<T, internal_IdxT, DistanceT, CagraSampleFilterT_s>> plan = factory<T, internal_IdxT, DistanceT, CagraSampleFilterT_s>::create( res, params, index.dim(), index.graph_degree(), topk); plan->check(neighbors.extent(1)); RAFT_LOG_DEBUG("Cagra search"); const uint32_t max_queries = plan->max_queries; const uint32_t query_dim = queries.extent(1); for (unsigned qid = 0; qid < queries.extent(0); qid += max_queries) { const uint32_t n_queries = std::min<std::size_t>(max_queries, queries.extent(0) - qid); internal_IdxT* _topk_indices_ptr = reinterpret_cast<internal_IdxT*>(neighbors.data_handle()) + (topk * qid); DistanceT* _topk_distances_ptr = distances.data_handle() + (topk * qid); // todo(tfeher): one could keep distances optional and pass nullptr const T* _query_ptr = queries.data_handle() + (query_dim * qid); const internal_IdxT* _seed_ptr = plan->num_seeds > 0 ? reinterpret_cast<const internal_IdxT*>(plan->dev_seed.data()) + (plan->num_seeds * qid) : nullptr; uint32_t* _num_executed_iterations = nullptr; auto dataset_internal = make_device_strided_matrix_view<const T, int64_t, row_major>(index.dataset().data_handle(), index.dataset().extent(0), index.dataset().extent(1), index.dataset().stride(0)); auto graph_internal = raft::make_device_matrix_view<const internal_IdxT, int64_t, row_major>( reinterpret_cast<const internal_IdxT*>(index.graph().data_handle()), index.graph().extent(0), index.graph().extent(1)); (*plan)(res, dataset_internal, graph_internal, _topk_indices_ptr, _topk_distances_ptr, _query_ptr, n_queries, _seed_ptr, _num_executed_iterations, topk, set_offset(sample_filter, qid)); } static_assert(std::is_same_v<DistanceT, float>, "only float distances are supported at the moment"); float* dist_out = distances.data_handle(); const DistanceT* dist_in = distances.data_handle(); // We're converting the data from T to DistanceT during distance computation // and divide the values by kDivisor. Here we restore the original scale. constexpr float kScale = spatial::knn::detail::utils::config<T>::kDivisor / spatial::knn::detail::utils::config<DistanceT>::kDivisor; ivf_pq::detail::postprocess_distances(dist_out, dist_in, index.metric(), distances.extent(0), distances.extent(1), kScale, resource::get_cuda_stream(res)); } /** @} */ // end group cagra } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_plan.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "hashmap.hpp" #include <raft/core/resource/cuda_stream.hpp> // #include "search_single_cta.cuh" // #include "topk_for_cagra/topk_core.cuh" #include <raft/core/device_mdspan.hpp> #include <raft/core/resources.hpp> #include <raft/neighbors/cagra_types.hpp> #include <raft/util/pow2_utils.cuh> namespace raft::neighbors::cagra::detail { struct search_plan_impl_base : public search_params { int64_t max_dim; int64_t dim; int64_t graph_degree; uint32_t topk; search_plan_impl_base(search_params params, int64_t dim, int64_t graph_degree, uint32_t topk) : search_params(params), dim(dim), graph_degree(graph_degree), topk(topk) { set_max_dim_team(dim); if (algo == search_algo::AUTO) { const size_t num_sm = raft::getMultiProcessorCount(); if (itopk_size <= 512 && search_params::max_queries >= num_sm * 2lu) { algo = search_algo::SINGLE_CTA; RAFT_LOG_DEBUG("Auto strategy: selecting single-cta"); } else { algo = search_algo::MULTI_CTA; RAFT_LOG_DEBUG("Auto strategy: selecting multi-cta"); } } } void set_max_dim_team(int64_t dim) { max_dim = 128; while (max_dim < dim && max_dim <= 1024) max_dim *= 2; // To keep binary size in check we limit only one team size specialization for each max_dim. // TODO(tfeher): revise this decision. switch (max_dim) { case 128: team_size = 8; break; case 256: team_size = 16; break; case 512: team_size = 32; break; case 1024: team_size = 32; break; default: RAFT_LOG_DEBUG("Dataset dimension is too large (%lu)\n", dim); } } }; template <class DATA_T, class INDEX_T, class DISTANCE_T, class SAMPLE_FILTER_T> struct search_plan_impl : public search_plan_impl_base { int64_t hash_bitlen; size_t small_hash_bitlen; size_t small_hash_reset_interval; size_t hashmap_size; uint32_t dataset_size; uint32_t result_buffer_size; uint32_t smem_size; uint32_t topk; uint32_t num_seeds; rmm::device_uvector<INDEX_T> hashmap; rmm::device_uvector<uint32_t> num_executed_iterations; // device or managed? rmm::device_uvector<INDEX_T> dev_seed; search_plan_impl(raft::resources const& res, search_params params, int64_t dim, int64_t graph_degree, uint32_t topk) : search_plan_impl_base(params, dim, graph_degree, topk), hashmap(0, resource::get_cuda_stream(res)), num_executed_iterations(0, resource::get_cuda_stream(res)), dev_seed(0, resource::get_cuda_stream(res)), num_seeds(0) { adjust_search_params(); check_params(); calc_hashmap_params(res); set_max_dim_team(dim); num_executed_iterations.resize(max_queries, resource::get_cuda_stream(res)); RAFT_LOG_DEBUG("# algo = %d", static_cast<int>(algo)); } virtual ~search_plan_impl() {} virtual void operator()(raft::resources const& res, raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, INDEX_T* const result_indices_ptr, // [num_queries, topk] DISTANCE_T* const result_distances_ptr, // [num_queries, topk] const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const std::uint32_t num_queries, const INDEX_T* dev_seed_ptr, // [num_queries, num_seeds] std::uint32_t* const num_executed_iterations, // [num_queries] uint32_t topk, SAMPLE_FILTER_T sample_filter){}; void adjust_search_params() { uint32_t _max_iterations = max_iterations; if (max_iterations == 0) { if (algo == search_algo::MULTI_CTA) { _max_iterations = 1 + std::min(32 * 1.1, 32 + 10.0); // TODO(anaruse) } else { _max_iterations = 1 + std::min((itopk_size / search_width) * 1.1, (itopk_size / search_width) + 10.0); } } if (max_iterations < min_iterations) { _max_iterations = min_iterations; } if (max_iterations < _max_iterations) { RAFT_LOG_DEBUG( "# max_iterations is increased from %lu to %u.", max_iterations, _max_iterations); max_iterations = _max_iterations; } if (itopk_size % 32) { uint32_t itopk32 = itopk_size; itopk32 += 32 - (itopk_size % 32); RAFT_LOG_DEBUG("# internal_topk is increased from %lu to %u, as it must be multiple of 32.", itopk_size, itopk32); itopk_size = itopk32; } } // defines hash_bitlen, small_hash_bitlen, small_hash_reset interval, hash_size inline void calc_hashmap_params(raft::resources const& res) { // for multipel CTA search uint32_t mc_num_cta_per_query = 0; uint32_t mc_search_width = 0; uint32_t mc_itopk_size = 0; if (algo == search_algo::MULTI_CTA) { mc_itopk_size = 32; mc_search_width = 1; mc_num_cta_per_query = max(search_width, itopk_size / 32); RAFT_LOG_DEBUG("# mc_itopk_size: %u", mc_itopk_size); RAFT_LOG_DEBUG("# mc_search_width: %u", mc_search_width); RAFT_LOG_DEBUG("# mc_num_cta_per_query: %u", mc_num_cta_per_query); } // Determine hash size (bit length) hashmap_size = 0; hash_bitlen = 0; small_hash_bitlen = 0; small_hash_reset_interval = 1024 * 1024; float max_fill_rate = hashmap_max_fill_rate; while (hashmap_mode == hash_mode::AUTO || hashmap_mode == hash_mode::SMALL) { // // The small-hash reduces hash table size by initializing the hash table // for each iteraton and re-registering only the nodes that should not be // re-visited in that iteration. Therefore, the size of small-hash should // be determined based on the internal topk size and the number of nodes // visited per iteration. // const auto max_visited_nodes = itopk_size + (search_width * graph_degree * 1); unsigned min_bitlen = 8; // 256 unsigned max_bitlen = 13; // 8K if (min_bitlen < hashmap_min_bitlen) { min_bitlen = hashmap_min_bitlen; } hash_bitlen = min_bitlen; while (max_visited_nodes > hashmap::get_size(hash_bitlen) * max_fill_rate) { hash_bitlen += 1; } if (hash_bitlen > max_bitlen) { // Switch to normal hash if hashmap_mode is AUTO, otherwise exit. if (hashmap_mode == hash_mode::AUTO) { hash_bitlen = 0; break; } else { RAFT_FAIL( "small-hash cannot be used because the required hash size exceeds the limit (%u)", hashmap::get_size(max_bitlen)); } } small_hash_bitlen = hash_bitlen; // // Sincc the hash table size is limited to a power of 2, the requirement, // the maximum fill rate, may be satisfied even if the frequency of hash // table reset is reduced to once every 2 or more iterations without // changing the hash table size. In that case, reduce the reset frequency. // small_hash_reset_interval = 1; while (1) { const auto max_visited_nodes = itopk_size + (search_width * graph_degree * (small_hash_reset_interval + 1)); if (max_visited_nodes > hashmap::get_size(hash_bitlen) * max_fill_rate) { break; } small_hash_reset_interval += 1; } break; } if (hash_bitlen == 0) { // // The size of hash table is determined based on the maximum number of // nodes that may be visited before the search is completed and the // maximum fill rate of the hash table. // uint32_t max_visited_nodes = itopk_size + (search_width * graph_degree * max_iterations); if (algo == search_algo::MULTI_CTA) { max_visited_nodes = mc_itopk_size + (mc_search_width * graph_degree * max_iterations); max_visited_nodes *= mc_num_cta_per_query; } unsigned min_bitlen = 11; // 2K if (min_bitlen < hashmap_min_bitlen) { min_bitlen = hashmap_min_bitlen; } hash_bitlen = min_bitlen; while (max_visited_nodes > hashmap::get_size(hash_bitlen) * max_fill_rate) { hash_bitlen += 1; } RAFT_EXPECTS(hash_bitlen <= 20, "hash_bitlen cannot be largen than 20 (1M)"); } RAFT_LOG_DEBUG("# internal topK = %lu", itopk_size); RAFT_LOG_DEBUG("# parent size = %lu", search_width); RAFT_LOG_DEBUG("# min_iterations = %lu", min_iterations); RAFT_LOG_DEBUG("# max_iterations = %lu", max_iterations); RAFT_LOG_DEBUG("# max_queries = %lu", max_queries); RAFT_LOG_DEBUG("# hashmap mode = %s%s-%u", (small_hash_bitlen > 0 ? "small-" : ""), "hash", hashmap::get_size(hash_bitlen)); if (small_hash_bitlen > 0) { RAFT_LOG_DEBUG("# small_hash_reset_interval = %lu", small_hash_reset_interval); } hashmap_size = sizeof(INDEX_T) * max_queries * hashmap::get_size(hash_bitlen); RAFT_LOG_DEBUG("# hashmap size: %lu", hashmap_size); if (hashmap_size >= 1024 * 1024 * 1024) { RAFT_LOG_DEBUG(" (%.2f GiB)", (double)hashmap_size / (1024 * 1024 * 1024)); } else if (hashmap_size >= 1024 * 1024) { RAFT_LOG_DEBUG(" (%.2f MiB)", (double)hashmap_size / (1024 * 1024)); } else if (hashmap_size >= 1024) { RAFT_LOG_DEBUG(" (%.2f KiB)", (double)hashmap_size / (1024)); } } virtual void check(const uint32_t topk) { // For single-CTA and multi kernel RAFT_EXPECTS(topk <= itopk_size, "topk must be smaller than itopk_size = %lu", itopk_size); } inline void check_params() { std::string error_message = ""; if (itopk_size > 1024) { if (algo == search_algo::MULTI_CTA) { } else { error_message += std::string("- `internal_topk` (" + std::to_string(itopk_size) + ") must be smaller or equal to 1024"); } } if (algo != search_algo::SINGLE_CTA && algo != search_algo::MULTI_CTA && algo != search_algo::MULTI_KERNEL) { error_message += "An invalid kernel mode has been given: " + std::to_string((int)algo) + ""; } if (team_size != 0 && team_size != 4 && team_size != 8 && team_size != 16 && team_size != 32) { error_message += "`team_size` must be 0, 4, 8, 16 or 32. " + std::to_string(team_size) + " has been given."; } if (thread_block_size != 0 && thread_block_size != 64 && thread_block_size != 128 && thread_block_size != 256 && thread_block_size != 512 && thread_block_size != 1024) { error_message += "`thread_block_size` must be 0, 64, 128, 256 or 512. " + std::to_string(thread_block_size) + " has been given."; } if (hashmap_min_bitlen > 20) { error_message += "`hashmap_min_bitlen` must be equal to or smaller than 20. " + std::to_string(hashmap_min_bitlen) + " has been given."; } if (hashmap_max_fill_rate < 0.1 || hashmap_max_fill_rate >= 0.9) { error_message += "`hashmap_max_fill_rate` must be equal to or greater than 0.1 and smaller than 0.9. " + std::to_string(hashmap_max_fill_rate) + " has been given."; } if constexpr (!std::is_same<SAMPLE_FILTER_T, raft::neighbors::filtering::none_cagra_sample_filter>::value) { if (hashmap_mode == hash_mode::SMALL) { error_message += "`SMALL` hash is not available when filtering"; } else { hashmap_mode = hash_mode::HASH; } } if (algo == search_algo::MULTI_CTA) { if (hashmap_mode == hash_mode::SMALL) { error_message += "`small_hash` is not available when 'search_mode' is \"multi-cta\""; } else { hashmap_mode = hash_mode::HASH; } } if (error_message.length() != 0) { THROW("[CAGRA Error] %s", error_message.c_str()); } } }; // template <class DATA_T, class DISTANCE_T, class INDEX_T> // struct search_plan { // search_plan(raft::resources const& res, // search_params param, // int64_t dim, // int64_t graph_degree) // : plan(res, param, dim, graph_degree) // { // } // void check(uint32_t topk) { plan.check(topk); } // // private: // detail::search_plan_impl<DATA_T, DISTANCE_T, INDEX_T> plan; // }; /** @} */ // end group cagra } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/graph_core.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cassert> #include <climits> #include <cuda_fp16.h> #include <float.h> #include <iostream> #include <memory> #include <omp.h> #include <raft/core/device_mdspan.hpp> #include <raft/core/host_device_accessor.hpp> #include <raft/core/mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <random> #include <sys/time.h> #include <raft/util/bitonic_sort.cuh> #include <raft/util/cuda_rt_essentials.hpp> #include "utils.hpp" namespace raft::neighbors::cagra::detail { namespace graph { // unnamed namespace to avoid multiple definition error namespace { inline double cur_time(void) { struct timeval tv; gettimeofday(&tv, NULL); return ((double)tv.tv_sec + (double)tv.tv_usec * 1e-6); } template <typename T> __device__ inline void swap(T& val1, T& val2) { T val0 = val1; val1 = val2; val2 = val0; } template <typename K, typename V> __device__ inline bool swap_if_needed(K& key1, K& key2, V& val1, V& val2, bool ascending) { if (key1 == key2) { return false; } if ((key1 > key2) == ascending) { swap<K>(key1, key2); swap<V>(val1, val2); return true; } return false; } template <class DATA_T, class IdxT, int numElementsPerThread> RAFT_KERNEL kern_sort(const DATA_T* const dataset, // [dataset_chunk_size, dataset_dim] const IdxT dataset_size, const uint32_t dataset_dim, IdxT* const knn_graph, // [graph_chunk_size, graph_degree] const uint32_t graph_size, const uint32_t graph_degree) { const IdxT srcNode = (blockDim.x * blockIdx.x + threadIdx.x) / raft::WarpSize; if (srcNode >= graph_size) { return; } const uint32_t lane_id = threadIdx.x % raft::WarpSize; float my_keys[numElementsPerThread]; IdxT my_vals[numElementsPerThread]; // Compute distance from a src node to its neighbors for (int k = 0; k < graph_degree; k++) { const IdxT dstNode = knn_graph[k + static_cast<uint64_t>(graph_degree) * srcNode]; float dist = 0.0; for (int d = lane_id; d < dataset_dim; d += raft::WarpSize) { float diff = spatial::knn::detail::utils::mapping<float>{}( dataset[d + static_cast<uint64_t>(dataset_dim) * srcNode]) - spatial::knn::detail::utils::mapping<float>{}( dataset[d + static_cast<uint64_t>(dataset_dim) * dstNode]); dist += diff * diff; } dist += __shfl_xor_sync(0xffffffff, dist, 1); dist += __shfl_xor_sync(0xffffffff, dist, 2); dist += __shfl_xor_sync(0xffffffff, dist, 4); dist += __shfl_xor_sync(0xffffffff, dist, 8); dist += __shfl_xor_sync(0xffffffff, dist, 16); if (lane_id == (k % raft::WarpSize)) { my_keys[k / raft::WarpSize] = dist; my_vals[k / raft::WarpSize] = dstNode; } } for (int k = graph_degree; k < raft::WarpSize * numElementsPerThread; k++) { if (lane_id == k % raft::WarpSize) { my_keys[k / raft::WarpSize] = utils::get_max_value<float>(); my_vals[k / raft::WarpSize] = utils::get_max_value<IdxT>(); } } // Sort by RAFT bitonic sort raft::util::bitonic<numElementsPerThread>(true).sort(my_keys, my_vals); // Update knn_graph for (int i = 0; i < numElementsPerThread; i++) { const int k = i * raft::WarpSize + lane_id; if (k < graph_degree) { knn_graph[k + (static_cast<uint64_t>(graph_degree) * srcNode)] = my_vals[i]; } } } template <int MAX_DEGREE, class IdxT> RAFT_KERNEL kern_prune(const IdxT* const knn_graph, // [graph_chunk_size, graph_degree] const uint32_t graph_size, const uint32_t graph_degree, const uint32_t degree, const uint32_t batch_size, const uint32_t batch_id, uint8_t* const detour_count, // [graph_chunk_size, graph_degree] uint32_t* const num_no_detour_edges, // [graph_size] uint64_t* const stats) { __shared__ uint32_t smem_num_detour[MAX_DEGREE]; uint64_t* const num_retain = stats; uint64_t* const num_full = stats + 1; const uint64_t nid = blockIdx.x + (batch_size * batch_id); if (nid >= graph_size) { return; } for (uint32_t k = threadIdx.x; k < graph_degree; k += blockDim.x) { smem_num_detour[k] = 0; } __syncthreads(); const uint64_t iA = nid; if (iA >= graph_size) { return; } // count number of detours (A->D->B) for (uint32_t kAD = 0; kAD < graph_degree - 1; kAD++) { const uint64_t iD = knn_graph[kAD + (graph_degree * iA)]; for (uint32_t kDB = threadIdx.x; kDB < graph_degree; kDB += blockDim.x) { const uint64_t iB_candidate = knn_graph[kDB + ((uint64_t)graph_degree * iD)]; for (uint32_t kAB = kAD + 1; kAB < graph_degree; kAB++) { // if ( kDB < kAB ) { const uint64_t iB = knn_graph[kAB + (graph_degree * iA)]; if (iB == iB_candidate) { atomicAdd(smem_num_detour + kAB, 1); break; } } } } __syncthreads(); } uint32_t num_edges_no_detour = 0; for (uint32_t k = threadIdx.x; k < graph_degree; k += blockDim.x) { detour_count[k + (graph_degree * iA)] = min(smem_num_detour[k], (uint32_t)255); if (smem_num_detour[k] == 0) { num_edges_no_detour++; } } num_edges_no_detour += __shfl_xor_sync(0xffffffff, num_edges_no_detour, 1); num_edges_no_detour += __shfl_xor_sync(0xffffffff, num_edges_no_detour, 2); num_edges_no_detour += __shfl_xor_sync(0xffffffff, num_edges_no_detour, 4); num_edges_no_detour += __shfl_xor_sync(0xffffffff, num_edges_no_detour, 8); num_edges_no_detour += __shfl_xor_sync(0xffffffff, num_edges_no_detour, 16); num_edges_no_detour = min(num_edges_no_detour, degree); if (threadIdx.x == 0) { num_no_detour_edges[iA] = num_edges_no_detour; atomicAdd((unsigned long long int*)num_retain, (unsigned long long int)num_edges_no_detour); if (num_edges_no_detour >= degree) { atomicAdd((unsigned long long int*)num_full, 1); } } } template <class IdxT> RAFT_KERNEL kern_make_rev_graph(const IdxT* const dest_nodes, // [graph_size] IdxT* const rev_graph, // [size, degree] uint32_t* const rev_graph_count, // [graph_size] const uint32_t graph_size, const uint32_t degree) { const uint32_t tid = threadIdx.x + (blockDim.x * blockIdx.x); const uint32_t tnum = blockDim.x * gridDim.x; for (uint32_t src_id = tid; src_id < graph_size; src_id += tnum) { const IdxT dest_id = dest_nodes[src_id]; if (dest_id >= graph_size) continue; const uint32_t pos = atomicAdd(rev_graph_count + dest_id, 1); if (pos < degree) { rev_graph[pos + ((uint64_t)degree * dest_id)] = src_id; } } } template <class T> uint64_t pos_in_array(T val, const T* array, uint64_t num) { for (uint64_t i = 0; i < num; i++) { if (val == array[i]) { return i; } } return num; } template <class T> void shift_array(T* array, uint64_t num) { for (uint64_t i = num; i > 0; i--) { array[i] = array[i - 1]; } } } // namespace template <typename DataT, typename IdxT = uint32_t, typename d_accessor = host_device_accessor<std::experimental::default_accessor<DataT>, memory_type::device>, typename g_accessor = host_device_accessor<std::experimental::default_accessor<IdxT>, memory_type::host>> void sort_knn_graph(raft::resources const& res, mdspan<const DataT, matrix_extent<int64_t>, row_major, d_accessor> dataset, mdspan<IdxT, matrix_extent<int64_t>, row_major, g_accessor> knn_graph) { RAFT_EXPECTS(dataset.extent(0) == knn_graph.extent(0), "dataset size is expected to have the same number of graph index size"); const uint32_t dataset_size = dataset.extent(0); const uint32_t dataset_dim = dataset.extent(1); const DataT* dataset_ptr = dataset.data_handle(); const IdxT graph_size = dataset_size; const uint32_t input_graph_degree = knn_graph.extent(1); IdxT* const input_graph_ptr = knn_graph.data_handle(); auto d_input_graph = raft::make_device_matrix<IdxT, int64_t>(res, graph_size, input_graph_degree); // // Sorting kNN graph // const double time_sort_start = cur_time(); RAFT_LOG_DEBUG("# Sorting kNN Graph on GPUs "); auto d_dataset = raft::make_device_matrix<DataT, int64_t>(res, dataset_size, dataset_dim); raft::copy(d_dataset.data_handle(), dataset_ptr, dataset_size * dataset_dim, resource::get_cuda_stream(res)); raft::copy(d_input_graph.data_handle(), input_graph_ptr, graph_size * input_graph_degree, resource::get_cuda_stream(res)); void (*kernel_sort)( const DataT* const, const IdxT, const uint32_t, IdxT* const, const uint32_t, const uint32_t); if (input_graph_degree <= 32) { constexpr int numElementsPerThread = 1; kernel_sort = kern_sort<DataT, IdxT, numElementsPerThread>; } else if (input_graph_degree <= 64) { constexpr int numElementsPerThread = 2; kernel_sort = kern_sort<DataT, IdxT, numElementsPerThread>; } else if (input_graph_degree <= 128) { constexpr int numElementsPerThread = 4; kernel_sort = kern_sort<DataT, IdxT, numElementsPerThread>; } else if (input_graph_degree <= 256) { constexpr int numElementsPerThread = 8; kernel_sort = kern_sort<DataT, IdxT, numElementsPerThread>; } else if (input_graph_degree <= 512) { constexpr int numElementsPerThread = 16; kernel_sort = kern_sort<DataT, IdxT, numElementsPerThread>; } else if (input_graph_degree <= 1024) { constexpr int numElementsPerThread = 32; kernel_sort = kern_sort<DataT, IdxT, numElementsPerThread>; } else { RAFT_FAIL( "The degree of input knn graph is too large (%u). " "It must be equal to or smaller than %d.", input_graph_degree, 1024); } const auto block_size = 256; const auto num_warps_per_block = block_size / raft::WarpSize; const auto grid_size = (graph_size + num_warps_per_block - 1) / num_warps_per_block; RAFT_LOG_DEBUG("."); kernel_sort<<<grid_size, block_size, 0, resource::get_cuda_stream(res)>>>( d_dataset.data_handle(), dataset_size, dataset_dim, d_input_graph.data_handle(), graph_size, input_graph_degree); resource::sync_stream(res); RAFT_LOG_DEBUG("."); raft::copy(input_graph_ptr, d_input_graph.data_handle(), graph_size * input_graph_degree, resource::get_cuda_stream(res)); RAFT_LOG_DEBUG("\n"); const double time_sort_end = cur_time(); RAFT_LOG_DEBUG("# Sorting kNN graph time: %.1lf sec\n", time_sort_end - time_sort_start); } template <typename IdxT = uint32_t, typename g_accessor = host_device_accessor<std::experimental::default_accessor<IdxT>, memory_type::host>> void optimize(raft::resources const& res, mdspan<IdxT, matrix_extent<int64_t>, row_major, g_accessor> knn_graph, raft::host_matrix_view<IdxT, int64_t, row_major> new_graph) { RAFT_LOG_DEBUG( "# Pruning kNN graph (size=%lu, degree=%lu)\n", knn_graph.extent(0), knn_graph.extent(1)); RAFT_EXPECTS(knn_graph.extent(0) == new_graph.extent(0), "Each input array is expected to have the same number of rows"); RAFT_EXPECTS(new_graph.extent(1) <= knn_graph.extent(1), "output graph cannot have more columns than input graph"); const uint32_t input_graph_degree = knn_graph.extent(1); const uint32_t output_graph_degree = new_graph.extent(1); auto input_graph_ptr = knn_graph.data_handle(); auto output_graph_ptr = new_graph.data_handle(); const IdxT graph_size = new_graph.extent(0); { // // Prune kNN graph // auto d_detour_count = raft::make_device_matrix<uint8_t, int64_t>(res, graph_size, input_graph_degree); RAFT_CUDA_TRY(cudaMemsetAsync(d_detour_count.data_handle(), 0xff, graph_size * input_graph_degree * sizeof(uint8_t), resource::get_cuda_stream(res))); auto d_num_no_detour_edges = raft::make_device_vector<uint32_t, int64_t>(res, graph_size); RAFT_CUDA_TRY(cudaMemsetAsync(d_num_no_detour_edges.data_handle(), 0x00, graph_size * sizeof(uint32_t), resource::get_cuda_stream(res))); auto dev_stats = raft::make_device_vector<uint64_t>(res, 2); auto host_stats = raft::make_host_vector<uint64_t>(2); // // Prune unimportant edges. // // The edge to be retained is determined without explicitly considering // distance or angle. Suppose the edge is the k-th edge of some node-A to // node-B (A->B). Among the edges originating at node-A, there are k-1 edges // shorter than the edge A->B. Each of these k-1 edges are connected to a // different k-1 nodes. Among these k-1 nodes, count the number of nodes with // edges to node-B, which is the number of 2-hop detours for the edge A->B. // Once the number of 2-hop detours has been counted for all edges, the // specified number of edges are picked up for each node, starting with the // edge with the lowest number of 2-hop detours. // const double time_prune_start = cur_time(); RAFT_LOG_DEBUG("# Pruning kNN Graph on GPUs\r"); // Copy input_graph_ptr over to device if necessary device_matrix_view_from_host d_input_graph( res, raft::make_host_matrix_view<IdxT, int64_t>(input_graph_ptr, graph_size, input_graph_degree)); constexpr int MAX_DEGREE = 1024; if (input_graph_degree > MAX_DEGREE) { RAFT_FAIL( "The degree of input knn graph is too large (%u). " "It must be equal to or smaller than %d.", input_graph_degree, 1024); } const uint32_t batch_size = std::min(static_cast<uint32_t>(graph_size), static_cast<uint32_t>(256 * 1024)); const uint32_t num_batch = (graph_size + batch_size - 1) / batch_size; const dim3 threads_prune(32, 1, 1); const dim3 blocks_prune(batch_size, 1, 1); RAFT_CUDA_TRY(cudaMemsetAsync( dev_stats.data_handle(), 0, sizeof(uint64_t) * 2, resource::get_cuda_stream(res))); for (uint32_t i_batch = 0; i_batch < num_batch; i_batch++) { kern_prune<MAX_DEGREE, IdxT> <<<blocks_prune, threads_prune, 0, resource::get_cuda_stream(res)>>>( d_input_graph.data_handle(), graph_size, input_graph_degree, output_graph_degree, batch_size, i_batch, d_detour_count.data_handle(), d_num_no_detour_edges.data_handle(), dev_stats.data_handle()); resource::sync_stream(res); RAFT_LOG_DEBUG( "# Pruning kNN Graph on GPUs (%.1lf %%)\r", (double)std::min<IdxT>((i_batch + 1) * batch_size, graph_size) / graph_size * 100); } resource::sync_stream(res); RAFT_LOG_DEBUG("\n"); host_matrix_view_from_device<uint8_t, int64_t> detour_count(res, d_detour_count.view()); raft::copy( host_stats.data_handle(), dev_stats.data_handle(), 2, resource::get_cuda_stream(res)); const auto num_keep = host_stats.data_handle()[0]; const auto num_full = host_stats.data_handle()[1]; // Create pruned kNN graph uint32_t max_detour = 0; #pragma omp parallel for reduction(max : max_detour) for (uint64_t i = 0; i < graph_size; i++) { uint64_t pk = 0; for (uint32_t num_detour = 0; num_detour < output_graph_degree; num_detour++) { if (max_detour < num_detour) { max_detour = num_detour; /* stats */ } for (uint64_t k = 0; k < input_graph_degree; k++) { if (detour_count.data_handle()[k + (input_graph_degree * i)] != num_detour) { continue; } output_graph_ptr[pk + (output_graph_degree * i)] = input_graph_ptr[k + (input_graph_degree * i)]; pk += 1; if (pk >= output_graph_degree) break; } if (pk >= output_graph_degree) break; } assert(pk == output_graph_degree); } // RAFT_LOG_DEBUG("# max_detour: %u\n", max_detour); const double time_prune_end = cur_time(); RAFT_LOG_DEBUG( "# Pruning time: %.1lf sec, " "avg_no_detour_edges_per_node: %.2lf/%u, " "nodes_with_no_detour_at_all_edges: %.1lf%%\n", time_prune_end - time_prune_start, (double)num_keep / graph_size, output_graph_degree, (double)num_full / graph_size * 100); } auto rev_graph = raft::make_host_matrix<IdxT, int64_t>(graph_size, output_graph_degree); auto rev_graph_count = raft::make_host_vector<uint32_t, int64_t>(graph_size); { // // Make reverse graph // const double time_make_start = cur_time(); device_matrix_view_from_host<IdxT, int64_t> d_rev_graph(res, rev_graph.view()); RAFT_CUDA_TRY(cudaMemsetAsync(d_rev_graph.data_handle(), 0xff, graph_size * output_graph_degree * sizeof(IdxT), resource::get_cuda_stream(res))); auto d_rev_graph_count = raft::make_device_vector<uint32_t, int64_t>(res, graph_size); RAFT_CUDA_TRY(cudaMemsetAsync(d_rev_graph_count.data_handle(), 0x00, graph_size * sizeof(uint32_t), resource::get_cuda_stream(res))); auto dest_nodes = raft::make_host_vector<IdxT, int64_t>(graph_size); auto d_dest_nodes = raft::make_device_vector<IdxT, int64_t>(res, graph_size); for (uint64_t k = 0; k < output_graph_degree; k++) { #pragma omp parallel for for (uint64_t i = 0; i < graph_size; i++) { dest_nodes.data_handle()[i] = output_graph_ptr[k + (output_graph_degree * i)]; } resource::sync_stream(res); raft::copy(d_dest_nodes.data_handle(), dest_nodes.data_handle(), graph_size, resource::get_cuda_stream(res)); dim3 threads(256, 1, 1); dim3 blocks(1024, 1, 1); kern_make_rev_graph<<<blocks, threads, 0, resource::get_cuda_stream(res)>>>( d_dest_nodes.data_handle(), d_rev_graph.data_handle(), d_rev_graph_count.data_handle(), graph_size, output_graph_degree); RAFT_LOG_DEBUG("# Making reverse graph on GPUs: %lu / %u \r", k, output_graph_degree); } resource::sync_stream(res); RAFT_LOG_DEBUG("\n"); if (d_rev_graph.allocated_memory()) { raft::copy(rev_graph.data_handle(), d_rev_graph.data_handle(), graph_size * output_graph_degree, resource::get_cuda_stream(res)); } raft::copy(rev_graph_count.data_handle(), d_rev_graph_count.data_handle(), graph_size, resource::get_cuda_stream(res)); const double time_make_end = cur_time(); RAFT_LOG_DEBUG("# Making reverse graph time: %.1lf sec", time_make_end - time_make_start); } { // // Replace some edges with reverse edges // const double time_replace_start = cur_time(); const uint64_t num_protected_edges = output_graph_degree / 2; RAFT_LOG_DEBUG("# num_protected_edges: %lu", num_protected_edges); constexpr int _omp_chunk = 1024; #pragma omp parallel for schedule(dynamic, _omp_chunk) for (uint64_t j = 0; j < graph_size; j++) { uint64_t k = std::min(rev_graph_count.data_handle()[j], output_graph_degree); while (k) { k--; uint64_t i = rev_graph.data_handle()[k + (output_graph_degree * j)]; uint64_t pos = pos_in_array<IdxT>(i, output_graph_ptr + (output_graph_degree * j), output_graph_degree); if (pos < num_protected_edges) { continue; } uint64_t num_shift = pos - num_protected_edges; if (pos == output_graph_degree) { num_shift = output_graph_degree - num_protected_edges - 1; } shift_array<IdxT>(output_graph_ptr + num_protected_edges + (output_graph_degree * j), num_shift); output_graph_ptr[num_protected_edges + (output_graph_degree * j)] = i; } if ((omp_get_thread_num() == 0) && ((j % _omp_chunk) == 0)) { RAFT_LOG_DEBUG("# Replacing reverse edges: %lu / %lu ", j, graph_size); } } RAFT_LOG_DEBUG("\n"); const double time_replace_end = cur_time(); RAFT_LOG_DEBUG("# Replacing edges time: %.1lf sec", time_replace_end - time_replace_start); /* stats */ uint64_t num_replaced_edges = 0; #pragma omp parallel for reduction(+ : num_replaced_edges) for (uint64_t i = 0; i < graph_size; i++) { for (uint64_t k = 0; k < output_graph_degree; k++) { const uint64_t j = output_graph_ptr[k + (output_graph_degree * i)]; const uint64_t pos = pos_in_array<IdxT>(j, output_graph_ptr + (output_graph_degree * i), output_graph_degree); if (pos == output_graph_degree) { num_replaced_edges += 1; } } } RAFT_LOG_DEBUG("# Average number of replaced edges per node: %.2f", (double)num_replaced_edges / graph_size); } } } // namespace graph } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_multi_cta_kernel.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY #include "search_multi_cta_kernel-inl.cuh" #endif #ifdef RAFT_COMPILED #include "search_multi_cta_kernel-ext.cuh" #endif
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_single_cta_kernel-ext.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/neighbors/sample_filter_types.hpp> #include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT namespace raft::neighbors::cagra::detail { namespace single_cta_search { #ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, typename DATA_T, typename INDEX_T, typename DISTANCE_T, typename SAMPLE_FILTER_T> void select_and_run( // raft::resources const& res, raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, INDEX_T* const topk_indices_ptr, // [num_queries, topk] DISTANCE_T* const topk_distances_ptr, // [num_queries, topk] const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const uint32_t num_queries, const INDEX_T* dev_seed_ptr, // [num_queries, num_seeds] uint32_t* const num_executed_iterations, // [num_queries,] uint32_t topk, uint32_t num_itopk_candidates, uint32_t block_size, uint32_t smem_size, int64_t hash_bitlen, INDEX_T* hashmap_ptr, size_t small_hash_bitlen, size_t small_hash_reset_interval, uint32_t num_random_samplings, uint64_t rand_xor_mask, uint32_t num_seeds, size_t itopk_size, size_t search_width, size_t min_iterations, size_t max_iterations, SAMPLE_FILTER_T sample_filter, cudaStream_t stream) RAFT_EXPLICIT; #endif // RAFT_EXPLICIT_INSTANTIATE_ONLY #define instantiate_single_cta_select_and_run( \ TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T) \ extern template void \ select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( \ raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \ raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \ INDEX_T* const topk_indices_ptr, \ DISTANCE_T* const topk_distances_ptr, \ const DATA_T* const queries_ptr, \ const uint32_t num_queries, \ const INDEX_T* dev_seed_ptr, \ uint32_t* const num_executed_iterations, \ uint32_t topk, \ uint32_t num_itopk_candidates, \ uint32_t block_size, \ uint32_t smem_size, \ int64_t hash_bitlen, \ INDEX_T* hashmap_ptr, \ size_t small_hash_bitlen, \ size_t small_hash_reset_interval, \ uint32_t num_random_samplings, \ uint64_t rand_xor_mask, \ uint32_t num_seeds, \ size_t itopk_size, \ size_t search_width, \ size_t min_iterations, \ size_t max_iterations, \ SAMPLE_FILTER_T sample_filter, \ cudaStream_t stream); instantiate_single_cta_select_and_run( 32, 1024, float, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 8, 128, float, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 16, 256, float, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 32, 512, float, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 32, 1024, int8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 8, 128, int8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 16, 256, int8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 32, 512, int8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 32, 1024, uint8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 8, 128, uint8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 16, 256, uint8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_single_cta_select_and_run( 32, 512, uint8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); #undef instantiate_single_cta_select_and_run } // namespace single_cta_search } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/topk_by_radix.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "topk_for_cagra/topk_core.cuh" namespace raft::neighbors::cagra::detail { namespace single_cta_search { template <unsigned MAX_INTERNAL_TOPK> struct topk_by_radix_sort_base { static constexpr std::uint32_t smem_size = MAX_INTERNAL_TOPK * 2 + 2048 + 8; static constexpr std::uint32_t state_bit_lenght = 0; static constexpr std::uint32_t vecLen = 2; // TODO }; template <unsigned MAX_INTERNAL_TOPK, class IdxT, class = void> struct topk_by_radix_sort : topk_by_radix_sort_base<MAX_INTERNAL_TOPK> {}; template <unsigned MAX_INTERNAL_TOPK, class IdxT> struct topk_by_radix_sort<MAX_INTERNAL_TOPK, IdxT, std::enable_if_t<((MAX_INTERNAL_TOPK <= 64))>> : topk_by_radix_sort_base<MAX_INTERNAL_TOPK> { __device__ void operator()(uint32_t topk, uint32_t batch_size, uint32_t len_x, const uint32_t* _x, const IdxT* _in_vals, uint32_t* _y, IdxT* _out_vals, uint32_t* work, uint32_t* _hints, bool sort, uint32_t* _smem) { std::uint8_t* const state = reinterpret_cast<std::uint8_t*>(work); topk_cta_11_core<topk_by_radix_sort_base<MAX_INTERNAL_TOPK>::state_bit_lenght, topk_by_radix_sort_base<MAX_INTERNAL_TOPK>::vecLen, 64, 32, IdxT>(topk, len_x, _x, _in_vals, _y, _out_vals, state, _hints, sort, _smem); } }; #define TOP_FUNC_PARTIAL_SPECIALIZATION(V) \ template <unsigned MAX_INTERNAL_TOPK, class IdxT> \ struct topk_by_radix_sort< \ MAX_INTERNAL_TOPK, \ IdxT, \ std::enable_if_t<((MAX_INTERNAL_TOPK <= V) && (2 * MAX_INTERNAL_TOPK > V))>> \ : topk_by_radix_sort_base<MAX_INTERNAL_TOPK> { \ __device__ void operator()(uint32_t topk, \ uint32_t batch_size, \ uint32_t len_x, \ const uint32_t* _x, \ const IdxT* _in_vals, \ uint32_t* _y, \ IdxT* _out_vals, \ uint32_t* work, \ uint32_t* _hints, \ bool sort, \ uint32_t* _smem) \ { \ assert(blockDim.x >= V / 4); \ std::uint8_t* state = (std::uint8_t*)work; \ topk_cta_11_core<topk_by_radix_sort_base<MAX_INTERNAL_TOPK>::state_bit_lenght, \ topk_by_radix_sort_base<MAX_INTERNAL_TOPK>::vecLen, \ V, \ V / 4, \ IdxT>( \ topk, len_x, _x, _in_vals, _y, _out_vals, state, _hints, sort, _smem); \ } \ }; TOP_FUNC_PARTIAL_SPECIALIZATION(128); TOP_FUNC_PARTIAL_SPECIALIZATION(256); TOP_FUNC_PARTIAL_SPECIALIZATION(512); TOP_FUNC_PARTIAL_SPECIALIZATION(1024); } // namespace single_cta_search } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/fragment.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "device_common.hpp" #include "utils.hpp" #include <raft/core/logger.hpp> #include <type_traits> namespace raft::neighbors::cagra::detail { namespace device { namespace detail { template <unsigned SIZE> struct load_unit_t { using type = uint4; }; template <> struct load_unit_t<8> { using type = std::uint64_t; }; template <> struct load_unit_t<4> { using type = std::uint32_t; }; template <> struct load_unit_t<2> { using type = std::uint16_t; }; template <> struct load_unit_t<1> { using type = std::uint8_t; }; } // namespace detail // One dataset or query vector is distributed within a warp and stored as `fragment`. template <int DIM, class T, unsigned TEAM_SIZE, class ENABLED> struct fragment_base {}; template <int DIM, class T, unsigned TEAM_SIZE = warp_size> struct fragment : fragment_base<DIM, T, TEAM_SIZE, typename std::enable_if<DIM % (TEAM_SIZE * utils::size_of<T>()) == 0>::type> { static constexpr unsigned num_elements = DIM / TEAM_SIZE; using block_t = typename detail::load_unit_t<num_elements * utils::size_of<T>()>::type; static constexpr unsigned num_load_blocks = num_elements * utils::size_of<T>() / utils::size_of<block_t>(); union { T x[num_elements]; block_t load_block[num_load_blocks]; }; }; // Load a vector from device/shared memory template <int DIM, class T, unsigned TEAM_SIZE, class INPUT_T> _RAFT_DEVICE void load_vector_sync(device::fragment<DIM, T, TEAM_SIZE>& frag, const INPUT_T* const input_vector_ptr, const unsigned input_vector_length, const bool sync = true) { const auto lane_id = threadIdx.x % TEAM_SIZE; if (DIM == input_vector_length) { for (unsigned i = 0; i < frag.num_load_blocks; i++) { const auto vector_index = i * TEAM_SIZE + lane_id; frag.load_block[i] = reinterpret_cast<const typename device::fragment<DIM, T, TEAM_SIZE>::block_t*>( input_vector_ptr)[vector_index]; } } else { for (unsigned i = 0; i < frag.num_elements; i++) { const auto vector_index = i * TEAM_SIZE + lane_id; INPUT_T v; if (vector_index < input_vector_length) { v = static_cast<INPUT_T>(input_vector_ptr[vector_index]); } else { v = static_cast<INPUT_T>(0); } frag.x[i] = v; } } if (sync) { __syncwarp(); } } // Compute the square of the L2 norm of two vectors template <class COMPUTE_T, int DIM, class T, unsigned TEAM_SIZE> _RAFT_DEVICE COMPUTE_T norm2(const device::fragment<DIM, T, TEAM_SIZE>& a, const device::fragment<DIM, T, TEAM_SIZE>& b) { COMPUTE_T sum = 0; // Compute the thread-local norm2 for (unsigned i = 0; i < a.num_elements; i++) { const auto diff = static_cast<COMPUTE_T>(a.x[i]) - static_cast<COMPUTE_T>(b.x[i]); sum += diff * diff; } // Compute the result norm2 summing up the thread-local norm2s. for (unsigned offset = TEAM_SIZE / 2; offset > 0; offset >>= 1) sum += __shfl_xor_sync(0xffffffff, sum, offset); return sum; } template <class COMPUTE_T, int DIM, class T, unsigned TEAM_SIZE> _RAFT_DEVICE COMPUTE_T norm2(const device::fragment<DIM, T, TEAM_SIZE>& a, const device::fragment<DIM, T, TEAM_SIZE>& b, const float scale) { COMPUTE_T sum = 0; // Compute the thread-local norm2 for (unsigned i = 0; i < a.num_elements; i++) { const auto diff = static_cast<COMPUTE_T>((static_cast<float>(a.x[i]) - static_cast<float>(b.x[i])) * scale); sum += diff * diff; } // Compute the result norm2 summing up the thread-local norm2s. for (unsigned offset = TEAM_SIZE / 2; offset > 0; offset >>= 1) sum += __shfl_xor_sync(0xffffffff, sum, offset); return sum; } template <class COMPUTE_T, int DIM, class T, unsigned TEAM_SIZE> _RAFT_DEVICE COMPUTE_T norm2(const device::fragment<DIM, T, TEAM_SIZE>& a, const T* b, // [DIM] const float scale) { COMPUTE_T sum = 0; // Compute the thread-local norm2 const unsigned chunk_size = a.num_elements / a.num_load_blocks; const unsigned lane_id = threadIdx.x % TEAM_SIZE; for (unsigned i = 0; i < a.num_elements; i++) { unsigned j = (i % chunk_size) + chunk_size * (lane_id + TEAM_SIZE * (i / chunk_size)); const auto diff = static_cast<COMPUTE_T>(a.x[i] * scale) - static_cast<COMPUTE_T>(b[j] * scale); sum += diff * diff; } // Compute the result norm2 summing up the thread-local norm2s. for (unsigned offset = TEAM_SIZE / 2; offset > 0; offset >>= 1) sum += __shfl_xor_sync(0xffffffff, sum, offset); return sum; } template <class COMPUTE_T, int DIM, class T, unsigned TEAM_SIZE> _RAFT_DEVICE inline COMPUTE_T norm2x(const device::fragment<DIM, T, TEAM_SIZE>& a, const COMPUTE_T* b, // [dim] const uint32_t dim, const float scale) { // Compute the thread-local norm2 COMPUTE_T sum = 0; const unsigned lane_id = threadIdx.x % TEAM_SIZE; if (dim == DIM) { const unsigned chunk_size = a.num_elements / a.num_load_blocks; for (unsigned i = 0; i < a.num_elements; i++) { unsigned j = (i % chunk_size) + chunk_size * (lane_id + TEAM_SIZE * (i / chunk_size)); const auto diff = static_cast<COMPUTE_T>(a.x[i] * scale) - b[j]; sum += diff * diff; } } else { for (unsigned i = 0; i < a.num_elements; i++) { unsigned j = lane_id + (TEAM_SIZE * i); if (j >= dim) break; const auto diff = static_cast<COMPUTE_T>(a.x[i] * scale) - b[j]; sum += diff * diff; } } // Compute the result norm2 summing up the thread-local norm2s. for (unsigned offset = TEAM_SIZE / 2; offset > 0; offset >>= 1) sum += __shfl_xor_sync(0xffffffff, sum, offset); return sum; } template <int DIM, class T, unsigned TEAM_SIZE> _RAFT_DEVICE void print_fragment(const device::fragment<DIM, T, TEAM_SIZE>& a) { for (unsigned i = 0; i < TEAM_SIZE; i++) { if ((threadIdx.x % TEAM_SIZE) == i) { for (unsigned j = 0; j < a.num_elements; j++) { RAFT_LOG_DEBUG("%+e ", static_cast<float>(a.x[j])); } } __syncwarp(); } } } // namespace device } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_multi_cta_kernel-inl.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/spatial/knn/detail/ann_utils.cuh> #include <algorithm> #include <cassert> #include <iostream> #include <memory> #include <numeric> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resource/device_properties.hpp> #include <raft/core/resources.hpp> #include <raft/neighbors/sample_filter_types.hpp> #include <vector> #include "bitonic.hpp" #include "compute_distance.hpp" #include "device_common.hpp" #include "hashmap.hpp" #include "search_plan.cuh" #include "topk_for_cagra/topk_core.cuh" // TODO replace with raft topk if possible #include "utils.hpp" #include <raft/core/logger.hpp> #include <raft/util/cuda_rt_essentials.hpp> #include <raft/util/cudart_utils.hpp> // RAFT_CUDA_TRY_NOT_THROW is used TODO(tfeher): consider moving this to cuda_rt_essentials.hpp namespace raft::neighbors::cagra::detail { namespace multi_cta_search { // #define _CLK_BREAKDOWN template <class INDEX_T> __device__ void pickup_next_parents(INDEX_T* const next_parent_indices, // [search_width] const uint32_t search_width, INDEX_T* const itopk_indices, // [num_itopk] const size_t num_itopk, uint32_t* const terminate_flag) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const unsigned warp_id = threadIdx.x / 32; if (warp_id > 0) { return; } const unsigned lane_id = threadIdx.x % 32; for (uint32_t i = lane_id; i < search_width; i += 32) { next_parent_indices[i] = utils::get_max_value<INDEX_T>(); } uint32_t max_itopk = num_itopk; if (max_itopk % 32) { max_itopk += 32 - (max_itopk % 32); } uint32_t num_new_parents = 0; for (uint32_t j = lane_id; j < max_itopk; j += 32) { INDEX_T index; int new_parent = 0; if (j < num_itopk) { index = itopk_indices[j]; if ((index & index_msb_1_mask) == 0) { // check if most significant bit is set new_parent = 1; } } const uint32_t ballot_mask = __ballot_sync(0xffffffff, new_parent); if (new_parent) { const auto i = __popc(ballot_mask & ((1 << lane_id) - 1)) + num_new_parents; if (i < search_width) { next_parent_indices[i] = j; itopk_indices[j] |= index_msb_1_mask; // set most significant bit as used node } } num_new_parents += __popc(ballot_mask); if (num_new_parents >= search_width) { break; } } if (threadIdx.x == 0 && (num_new_parents == 0)) { *terminate_flag = 1; } } template <unsigned MAX_ELEMENTS, class INDEX_T> __device__ inline void topk_by_bitonic_sort(float* distances, // [num_elements] INDEX_T* indices, // [num_elements] const uint32_t num_elements, const uint32_t num_itopk // num_itopk <= num_elements ) { const unsigned warp_id = threadIdx.x / 32; if (warp_id > 0) { return; } const unsigned lane_id = threadIdx.x % 32; constexpr unsigned N = (MAX_ELEMENTS + 31) / 32; float key[N]; INDEX_T val[N]; for (unsigned i = 0; i < N; i++) { unsigned j = lane_id + (32 * i); if (j < num_elements) { key[i] = distances[j]; val[i] = indices[j]; } else { key[i] = utils::get_max_value<float>(); val[i] = utils::get_max_value<INDEX_T>(); } } /* Warp Sort */ bitonic::warp_sort<float, INDEX_T, N>(key, val); /* Store itopk sorted results */ for (unsigned i = 0; i < N; i++) { unsigned j = (N * lane_id) + i; if (j < num_itopk) { distances[j] = key[i]; indices[j] = val[i]; } } } // // multiple CTAs per single query // template <unsigned TEAM_SIZE, unsigned MAX_ELEMENTS, unsigned MAX_DATASET_DIM, class DATA_T, class DISTANCE_T, class INDEX_T, class LOAD_T, class SAMPLE_FILTER_T> __launch_bounds__(1024, 1) RAFT_KERNEL search_kernel( INDEX_T* const result_indices_ptr, // [num_queries, num_cta_per_query, itopk_size] DISTANCE_T* const result_distances_ptr, // [num_queries, num_cta_per_query, itopk_size] const DATA_T* const dataset_ptr, // [dataset_size, dataset_dim] const size_t dataset_dim, const size_t dataset_size, const size_t dataset_ld, const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const INDEX_T* const knn_graph, // [dataset_size, graph_degree] const uint32_t graph_degree, const unsigned num_distilation, const uint64_t rand_xor_mask, const INDEX_T* seed_ptr, // [num_queries, num_seeds] const uint32_t num_seeds, INDEX_T* const visited_hashmap_ptr, // [num_queries, 1 << hash_bitlen] const uint32_t hash_bitlen, const uint32_t itopk_size, const uint32_t search_width, const uint32_t min_iteration, const uint32_t max_iteration, uint32_t* const num_executed_iterations, /* stats */ SAMPLE_FILTER_T sample_filter) { assert(dataset_dim <= MAX_DATASET_DIM); const auto num_queries = gridDim.y; const auto query_id = blockIdx.y; const auto num_cta_per_query = gridDim.x; const auto cta_id = blockIdx.x; // local CTA ID #ifdef _CLK_BREAKDOWN uint64_t clk_init = 0; uint64_t clk_compute_1st_distance = 0; uint64_t clk_topk = 0; uint64_t clk_pickup_parents = 0; uint64_t clk_compute_distance = 0; uint64_t clk_start; #define _CLK_START() clk_start = clock64() #define _CLK_REC(V) V += clock64() - clk_start; #else #define _CLK_START() #define _CLK_REC(V) #endif _CLK_START(); extern __shared__ uint32_t smem[]; // Layout of result_buffer // +----------------+------------------------------+---------+ // | internal_top_k | neighbors of parent nodes | padding | // | <itopk_size> | <search_width * graph_degree> | upto 32 | // +----------------+------------------------------+---------+ // |<--- result_buffer_size --->| uint32_t result_buffer_size = itopk_size + (search_width * graph_degree); uint32_t result_buffer_size_32 = result_buffer_size; if (result_buffer_size % 32) { result_buffer_size_32 += 32 - (result_buffer_size % 32); } assert(result_buffer_size_32 <= MAX_ELEMENTS); auto query_buffer = reinterpret_cast<float*>(smem); auto result_indices_buffer = reinterpret_cast<INDEX_T*>(query_buffer + MAX_DATASET_DIM); auto result_distances_buffer = reinterpret_cast<DISTANCE_T*>(result_indices_buffer + result_buffer_size_32); auto parent_indices_buffer = reinterpret_cast<INDEX_T*>(result_distances_buffer + result_buffer_size_32); auto terminate_flag = reinterpret_cast<uint32_t*>(parent_indices_buffer + search_width); #if 0 /* debug */ for (unsigned i = threadIdx.x; i < result_buffer_size_32; i += BLOCK_SIZE) { result_indices_buffer[i] = utils::get_max_value<INDEX_T>(); result_distances_buffer[i] = utils::get_max_value<DISTANCE_T>(); } #endif const DATA_T* const query_ptr = queries_ptr + (dataset_dim * query_id); for (unsigned i = threadIdx.x; i < MAX_DATASET_DIM; i += blockDim.x) { unsigned j = device::swizzling(i); if (i < dataset_dim) { query_buffer[j] = spatial::knn::detail::utils::mapping<float>{}(query_ptr[i]); } else { query_buffer[j] = 0.0; } } if (threadIdx.x == 0) { terminate_flag[0] = 0; } INDEX_T* const local_visited_hashmap_ptr = visited_hashmap_ptr + (hashmap::get_size(hash_bitlen) * query_id); __syncthreads(); _CLK_REC(clk_init); // compute distance to randomly selecting nodes _CLK_START(); const INDEX_T* const local_seed_ptr = seed_ptr ? seed_ptr + (num_seeds * query_id) : nullptr; uint32_t block_id = cta_id + (num_cta_per_query * query_id); uint32_t num_blocks = num_cta_per_query * num_queries; device::compute_distance_to_random_nodes<TEAM_SIZE, MAX_DATASET_DIM, LOAD_T>( result_indices_buffer, result_distances_buffer, query_buffer, dataset_ptr, dataset_dim, dataset_size, dataset_ld, result_buffer_size, num_distilation, rand_xor_mask, local_seed_ptr, num_seeds, local_visited_hashmap_ptr, hash_bitlen, block_id, num_blocks); __syncthreads(); _CLK_REC(clk_compute_1st_distance); uint32_t iter = 0; while (1) { // topk with bitonic sort _CLK_START(); topk_by_bitonic_sort<MAX_ELEMENTS, INDEX_T>(result_distances_buffer, result_indices_buffer, itopk_size + (search_width * graph_degree), itopk_size); _CLK_REC(clk_topk); if (iter + 1 == max_iteration) { __syncthreads(); break; } // pick up next parents _CLK_START(); pickup_next_parents<INDEX_T>( parent_indices_buffer, search_width, result_indices_buffer, itopk_size, terminate_flag); _CLK_REC(clk_pickup_parents); __syncthreads(); if (*terminate_flag && iter >= min_iteration) { break; } // compute the norms between child nodes and query node _CLK_START(); // constexpr unsigned max_n_frags = 16; constexpr unsigned max_n_frags = 0; device::compute_distance_to_child_nodes<TEAM_SIZE, MAX_DATASET_DIM, max_n_frags, LOAD_T>( result_indices_buffer + itopk_size, result_distances_buffer + itopk_size, query_buffer, dataset_ptr, dataset_dim, dataset_ld, knn_graph, graph_degree, local_visited_hashmap_ptr, hash_bitlen, parent_indices_buffer, result_indices_buffer, search_width); _CLK_REC(clk_compute_distance); __syncthreads(); // Filtering if constexpr (!std::is_same<SAMPLE_FILTER_T, raft::neighbors::filtering::none_cagra_sample_filter>::value) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const INDEX_T invalid_index = utils::get_max_value<INDEX_T>(); for (unsigned p = threadIdx.x; p < search_width; p += blockDim.x) { if (parent_indices_buffer[p] != invalid_index) { const auto parent_id = result_indices_buffer[parent_indices_buffer[p]] & ~index_msb_1_mask; if (!sample_filter(query_id, parent_id)) { // If the parent must not be in the resulting top-k list, remove from the parent list result_distances_buffer[parent_indices_buffer[p]] = utils::get_max_value<DISTANCE_T>(); result_indices_buffer[parent_indices_buffer[p]] = invalid_index; } } } __syncthreads(); } iter++; } // Post process for filtering if constexpr (!std::is_same<SAMPLE_FILTER_T, raft::neighbors::filtering::none_cagra_sample_filter>::value) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const INDEX_T invalid_index = utils::get_max_value<INDEX_T>(); for (unsigned i = threadIdx.x; i < itopk_size + search_width * graph_degree; i += blockDim.x) { const auto node_id = result_indices_buffer[i] & ~index_msb_1_mask; if (node_id != (invalid_index & ~index_msb_1_mask) && !sample_filter(query_id, node_id)) { // If the parent must not be in the resulting top-k list, remove from the parent list result_distances_buffer[i] = utils::get_max_value<DISTANCE_T>(); result_indices_buffer[i] = invalid_index; } } __syncthreads(); topk_by_bitonic_sort<MAX_ELEMENTS, INDEX_T>(result_distances_buffer, result_indices_buffer, itopk_size + (search_width * graph_degree), itopk_size); __syncthreads(); } for (uint32_t i = threadIdx.x; i < itopk_size; i += blockDim.x) { uint32_t j = i + (itopk_size * (cta_id + (num_cta_per_query * query_id))); if (result_distances_ptr != nullptr) { result_distances_ptr[j] = result_distances_buffer[i]; } constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; result_indices_ptr[j] = result_indices_buffer[i] & ~index_msb_1_mask; // clear most significant bit } if (threadIdx.x == 0 && cta_id == 0 && num_executed_iterations != nullptr) { num_executed_iterations[query_id] = iter + 1; } #ifdef _CLK_BREAKDOWN if ((threadIdx.x == 0 || threadIdx.x == BLOCK_SIZE - 1) && (blockIdx.x == 0) && ((query_id * 3) % gridDim.y < 3)) { RAFT_LOG_DEBUG( "query, %d, thread, %d" ", init, %d" ", 1st_distance, %lu" ", topk, %lu" ", pickup_parents, %lu" ", distance, %lu" "\n", query_id, threadIdx.x, clk_init, clk_compute_1st_distance, clk_topk, clk_pickup_parents, clk_compute_distance); } #endif } template <class T> RAFT_KERNEL set_value_batch_kernel(T* const dev_ptr, const std::size_t ld, const T val, const std::size_t count, const std::size_t batch_size) { const auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= count * batch_size) { return; } const auto batch_id = tid / count; const auto elem_id = tid % count; dev_ptr[elem_id + ld * batch_id] = val; } template <class T> void set_value_batch(T* const dev_ptr, const std::size_t ld, const T val, const std::size_t count, const std::size_t batch_size, cudaStream_t cuda_stream) { constexpr std::uint32_t block_size = 256; const auto grid_size = (count * batch_size + block_size - 1) / block_size; set_value_batch_kernel<T> <<<grid_size, block_size, 0, cuda_stream>>>(dev_ptr, ld, val, count, batch_size); } template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, typename DATA_T, typename INDEX_T, typename DISTANCE_T, typename SAMPLE_FILTER_T> struct search_kernel_config { // Search kernel function type. Note that the actual values for the template value // parameters do not matter, because they are not part of the function signature. The // second to fourth value parameters will be selected by the choose_* functions below. using kernel_t = decltype(&search_kernel<TEAM_SIZE, 128, MAX_DATASET_DIM, DATA_T, DISTANCE_T, INDEX_T, device::LOAD_128BIT_T, SAMPLE_FILTER_T>); static auto choose_buffer_size(unsigned result_buffer_size, unsigned block_size) -> kernel_t { if (result_buffer_size <= 64) { return search_kernel<TEAM_SIZE, 64, MAX_DATASET_DIM, DATA_T, DISTANCE_T, INDEX_T, device::LOAD_128BIT_T, SAMPLE_FILTER_T>; } else if (result_buffer_size <= 128) { return search_kernel<TEAM_SIZE, 128, MAX_DATASET_DIM, DATA_T, DISTANCE_T, INDEX_T, device::LOAD_128BIT_T, SAMPLE_FILTER_T>; } else if (result_buffer_size <= 256) { return search_kernel<TEAM_SIZE, 256, MAX_DATASET_DIM, DATA_T, DISTANCE_T, INDEX_T, device::LOAD_128BIT_T, SAMPLE_FILTER_T>; } THROW("Result buffer size %u larger than max buffer size %u", result_buffer_size, 256); } }; template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, typename DATA_T, typename INDEX_T, typename DISTANCE_T, typename SAMPLE_FILTER_T> void select_and_run( // raft::resources const& res, raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, INDEX_T* const topk_indices_ptr, // [num_queries, topk] DISTANCE_T* const topk_distances_ptr, // [num_queries, topk] const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const uint32_t num_queries, const INDEX_T* dev_seed_ptr, // [num_queries, num_seeds] uint32_t* const num_executed_iterations, // [num_queries,] uint32_t topk, // multi_cta_search (params struct) uint32_t block_size, // uint32_t result_buffer_size, uint32_t smem_size, int64_t hash_bitlen, INDEX_T* hashmap_ptr, uint32_t num_cta_per_query, uint32_t num_random_samplings, uint64_t rand_xor_mask, uint32_t num_seeds, size_t itopk_size, size_t search_width, size_t min_iterations, size_t max_iterations, SAMPLE_FILTER_T sample_filter, cudaStream_t stream) { auto kernel = search_kernel_config<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>:: choose_buffer_size(result_buffer_size, block_size); RAFT_CUDA_TRY( cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); // Initialize hash table const uint32_t hash_size = hashmap::get_size(hash_bitlen); set_value_batch( hashmap_ptr, hash_size, utils::get_max_value<INDEX_T>(), hash_size, num_queries, stream); dim3 block_dims(block_size, 1, 1); dim3 grid_dims(num_cta_per_query, num_queries, 1); RAFT_LOG_DEBUG("Launching kernel with %u threads, (%u, %u) blocks %u smem", block_size, num_cta_per_query, num_queries, smem_size); kernel<<<grid_dims, block_dims, smem_size, stream>>>(topk_indices_ptr, topk_distances_ptr, dataset.data_handle(), dataset.extent(1), dataset.extent(0), dataset.stride(0), queries_ptr, graph.data_handle(), graph.extent(1), num_random_samplings, rand_xor_mask, dev_seed_ptr, num_seeds, hashmap_ptr, hash_bitlen, itopk_size, search_width, min_iterations, max_iterations, num_executed_iterations, sample_filter); } } // namespace multi_cta_search } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/utils.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cfloat> #include <cstdint> #include <cuda.h> #include <cuda_fp16.h> #include <raft/core/detail/macros.hpp> #include <raft/core/device_mdarray.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/util/integer_utils.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <type_traits> namespace raft::neighbors::cagra::detail { namespace utils { template <class DATA_T> inline cudaDataType_t get_cuda_data_type(); template <> inline cudaDataType_t get_cuda_data_type<float>() { return CUDA_R_32F; } template <> inline cudaDataType_t get_cuda_data_type<half>() { return CUDA_R_16F; } template <> inline cudaDataType_t get_cuda_data_type<int8_t>() { return CUDA_R_8I; } template <> inline cudaDataType_t get_cuda_data_type<uint8_t>() { return CUDA_R_8U; } template <> inline cudaDataType_t get_cuda_data_type<uint32_t>() { return CUDA_R_32U; } template <> inline cudaDataType_t get_cuda_data_type<uint64_t>() { return CUDA_R_64U; } template <class T> constexpr unsigned size_of(); template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<std::int8_t>() { return 1; } template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<std::uint8_t>() { return 1; } template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<std::uint16_t>() { return 2; } template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<std::uint32_t>() { return 4; } template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<std::uint64_t>() { return 8; } template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<uint4>() { return 16; } template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<ulonglong4>() { return 32; } template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<float>() { return 4; } template <> _RAFT_HOST_DEVICE constexpr unsigned size_of<half>() { return 2; } // max values for data types template <class BS_T, class FP_T> union fp_conv { BS_T bs; FP_T fp; }; template <class T> _RAFT_HOST_DEVICE inline T get_max_value(); template <> _RAFT_HOST_DEVICE inline float get_max_value<float>() { return FLT_MAX; }; template <> _RAFT_HOST_DEVICE inline half get_max_value<half>() { return fp_conv<std::uint16_t, half>{.bs = 0x7aff}.fp; }; template <> _RAFT_HOST_DEVICE inline std::uint32_t get_max_value<std::uint32_t>() { return 0xffffffffu; }; template <> _RAFT_HOST_DEVICE inline std::uint64_t get_max_value<std::uint64_t>() { return 0xfffffffffffffffflu; }; template <int A, int B, class = void> struct constexpr_max { static const int value = A; }; template <int A, int B> struct constexpr_max<A, B, std::enable_if_t<(B > A), bool>> { static const int value = B; }; template <class IdxT> struct gen_index_msb_1_mask { static constexpr IdxT value = static_cast<IdxT>(1) << (utils::size_of<IdxT>() * 8 - 1); }; } // namespace utils /** * Utility to sync memory from a host_matrix_view to a device_matrix_view * * In certain situations (UVM/HMM/ATS) host memory might be directly accessible on the * device, and no extra allocations need to be performed. This class checks * if the host_matrix_view is already accessible on the device, and only creates device * memory and copies over if necessary. In memory limited situations this is preferable * to having both a host and device copy * TODO: once the mdbuffer changes here https://github.com/wphicks/raft/blob/fea-mdbuffer * have been merged, we should remove this class and switch over to using mdbuffer for this */ template <typename T, typename IdxT> class device_matrix_view_from_host { public: device_matrix_view_from_host(raft::resources const& res, host_matrix_view<T, IdxT> host_view) : host_view_(host_view) { cudaPointerAttributes attr; RAFT_CUDA_TRY(cudaPointerGetAttributes(&attr, host_view.data_handle())); device_ptr = reinterpret_cast<T*>(attr.devicePointer); if (device_ptr == NULL) { // allocate memory and copy over device_mem_.emplace( raft::make_device_matrix<T, IdxT>(res, host_view.extent(0), host_view.extent(1))); raft::copy(device_mem_->data_handle(), host_view.data_handle(), host_view.extent(0) * host_view.extent(1), resource::get_cuda_stream(res)); device_ptr = device_mem_->data_handle(); } } device_matrix_view<T, IdxT> view() { return make_device_matrix_view<T, IdxT>(device_ptr, host_view_.extent(0), host_view_.extent(1)); } T* data_handle() { return device_ptr; } bool allocated_memory() const { return device_mem_.has_value(); } private: std::optional<device_matrix<T, IdxT>> device_mem_; host_matrix_view<T, IdxT> host_view_; T* device_ptr; }; /** * Utility to sync memory from a device_matrix_view to a host_matrix_view * * In certain situations (UVM/HMM/ATS) device memory might be directly accessible on the * host, and no extra allocations need to be performed. This class checks * if the device_matrix_view is already accessible on the host, and only creates host * memory and copies over if necessary. In memory limited situations this is preferable * to having both a host and device copy * TODO: once the mdbuffer changes here https://github.com/wphicks/raft/blob/fea-mdbuffer * have been merged, we should remove this class and switch over to using mdbuffer for this */ template <typename T, typename IdxT> class host_matrix_view_from_device { public: host_matrix_view_from_device(raft::resources const& res, device_matrix_view<T, IdxT> device_view) : device_view_(device_view) { cudaPointerAttributes attr; RAFT_CUDA_TRY(cudaPointerGetAttributes(&attr, device_view.data_handle())); host_ptr = reinterpret_cast<T*>(attr.hostPointer); if (host_ptr == NULL) { // allocate memory and copy over host_mem_.emplace( raft::make_host_matrix<T, IdxT>(device_view.extent(0), device_view.extent(1))); raft::copy(host_mem_->data_handle(), device_view.data_handle(), device_view.extent(0) * device_view.extent(1), resource::get_cuda_stream(res)); host_ptr = host_mem_->data_handle(); } } host_matrix_view<T, IdxT> view() { return make_host_matrix_view<T, IdxT>(host_ptr, device_view_.extent(0), device_view_.extent(1)); } T* data_handle() { return host_ptr; } bool allocated_memory() const { return host_mem_.has_value(); } private: std::optional<host_matrix<T, IdxT>> host_mem_; device_matrix_view<T, IdxT> device_view_; T* host_ptr; }; // Copy matrix src to dst. pad rows with 0 if necessary to make them 16 byte aligned. template <typename T, typename data_accessor> void copy_with_padding(raft::resources const& res, raft::device_matrix<T, int64_t, row_major>& dst, mdspan<const T, matrix_extent<int64_t>, row_major, data_accessor> src, rmm::mr::device_memory_resource* mr = nullptr) { if (!mr) { mr = rmm::mr::get_current_device_resource(); } size_t padded_dim = round_up_safe<size_t>(src.extent(1) * sizeof(T), 16) / sizeof(T); if ((dst.extent(0) != src.extent(0)) || (static_cast<size_t>(dst.extent(1)) != padded_dim)) { // clear existing memory before allocating to prevent OOM errors on large datasets if (dst.size()) { dst = make_device_matrix<T, int64_t>(res, 0, 0); } dst = make_device_mdarray<T>(res, mr, make_extents<int64_t>(src.extent(0), padded_dim)); } if (dst.extent(1) == src.extent(1)) { raft::copy(dst.data_handle(), src.data_handle(), src.size(), resource::get_cuda_stream(res)); } else { // copy with padding RAFT_CUDA_TRY(cudaMemsetAsync( dst.data_handle(), 0, dst.size() * sizeof(T), resource::get_cuda_stream(res))); RAFT_CUDA_TRY(cudaMemcpy2DAsync(dst.data_handle(), sizeof(T) * dst.extent(1), src.data_handle(), sizeof(T) * src.extent(1), sizeof(T) * src.extent(1), src.extent(0), cudaMemcpyDefault, resource::get_cuda_stream(res))); } } } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/hashmap.hpp
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "utils.hpp" #include <cstdint> #include <raft/core/detail/macros.hpp> #include <raft/util/device_atomics.cuh> // #pragma GCC diagnostic push // #pragma GCC diagnostic ignored // #pragma GCC diagnostic pop namespace raft::neighbors::cagra::detail { namespace hashmap { _RAFT_HOST_DEVICE inline uint32_t get_size(const uint32_t bitlen) { return 1U << bitlen; } template <class IdxT> _RAFT_DEVICE inline void init(IdxT* const table, const unsigned bitlen, unsigned FIRST_TID = 0) { if (threadIdx.x < FIRST_TID) return; for (unsigned i = threadIdx.x - FIRST_TID; i < get_size(bitlen); i += blockDim.x - FIRST_TID) { table[i] = utils::get_max_value<IdxT>(); } } template <class IdxT> _RAFT_DEVICE inline uint32_t insert(IdxT* const table, const uint32_t bitlen, const IdxT key) { // Open addressing is used for collision resolution const uint32_t size = get_size(bitlen); const uint32_t bit_mask = size - 1; #if 1 // Linear probing IdxT index = (key ^ (key >> bitlen)) & bit_mask; constexpr uint32_t stride = 1; #else // Double hashing uint32_t index = key & bit_mask; const uint32_t stride = (key >> bitlen) * 2 + 1; #endif for (unsigned i = 0; i < size; i++) { const IdxT old = atomicCAS(&table[index], ~static_cast<IdxT>(0), key); if (old == ~static_cast<IdxT>(0)) { return 1; } else if (old == key) { return 0; } index = (index + stride) & bit_mask; } return 0; } template <unsigned TEAM_SIZE, class IdxT> _RAFT_DEVICE inline uint32_t insert(IdxT* const table, const uint32_t bitlen, const IdxT key) { IdxT ret = 0; if (threadIdx.x % TEAM_SIZE == 0) { ret = insert(table, bitlen, key); } for (unsigned offset = 1; offset < TEAM_SIZE; offset *= 2) { ret |= __shfl_xor_sync(0xffffffff, ret, offset); } return ret; } } // namespace hashmap } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/cagra_build.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "../../cagra_types.hpp" #include "graph_core.cuh" #include <chrono> #include <cstdio> #include <raft/core/resource/cuda_stream.hpp> #include <vector> #include <raft/core/device_mdarray.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/host_device_accessor.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/host_mdspan.hpp> #include <raft/core/logger.hpp> #include <raft/core/resource/detail/device_memory_resource.hpp> #include <raft/distance/distance_types.hpp> #include <raft/spatial/knn/detail/ann_utils.cuh> #include <raft/neighbors/detail/refine.cuh> #include <raft/neighbors/ivf_pq.cuh> #include <raft/neighbors/ivf_pq_types.hpp> #include <raft/neighbors/nn_descent.cuh> #include <raft/neighbors/refine.cuh> namespace raft::neighbors::cagra::detail { template <typename DataT, typename IdxT, typename accessor> void build_knn_graph(raft::resources const& res, mdspan<const DataT, matrix_extent<int64_t>, row_major, accessor> dataset, raft::host_matrix_view<IdxT, int64_t, row_major> knn_graph, std::optional<float> refine_rate = std::nullopt, std::optional<ivf_pq::index_params> build_params = std::nullopt, std::optional<ivf_pq::search_params> search_params = std::nullopt) { resource::detail::warn_non_pool_workspace(res, "raft::neighbors::cagra::build"); RAFT_EXPECTS(!build_params || build_params->metric == distance::DistanceType::L2Expanded, "Currently only L2Expanded metric is supported"); uint32_t node_degree = knn_graph.extent(1); common::nvtx::range<common::nvtx::domain::raft> fun_scope("cagra::build_graph(%zu, %zu, %u)", size_t(dataset.extent(0)), size_t(dataset.extent(1)), node_degree); if (!build_params) { build_params = ivf_pq::index_params{}; build_params->n_lists = dataset.extent(0) < 4 * 2500 ? 4 : (uint32_t)(dataset.extent(0) / 2500); build_params->pq_dim = raft::Pow2<8>::roundUp(dataset.extent(1) / 2); build_params->pq_bits = 8; build_params->kmeans_trainset_fraction = dataset.extent(0) < 10000 ? 1 : 10; build_params->kmeans_n_iters = 25; build_params->add_data_on_build = true; } // Make model name const std::string model_name = [&]() { char model_name[1024]; sprintf(model_name, "%s-%lux%lu.cluster_%u.pq_%u.%ubit.itr_%u.metric_%u.pqcenter_%u", "IVF-PQ", static_cast<size_t>(dataset.extent(0)), static_cast<size_t>(dataset.extent(1)), build_params->n_lists, build_params->pq_dim, build_params->pq_bits, build_params->kmeans_n_iters, build_params->metric, static_cast<uint32_t>(build_params->codebook_kind)); return std::string(model_name); }(); RAFT_LOG_DEBUG("# Building IVF-PQ index %s", model_name.c_str()); auto index = ivf_pq::build<DataT, int64_t>( res, *build_params, dataset.data_handle(), dataset.extent(0), dataset.extent(1)); // // search top (k + 1) neighbors // if (!search_params) { search_params = ivf_pq::search_params{}; search_params->n_probes = std::min<IdxT>(dataset.extent(1) * 2, build_params->n_lists); search_params->lut_dtype = CUDA_R_8U; search_params->internal_distance_dtype = CUDA_R_32F; } const auto top_k = node_degree + 1; uint32_t gpu_top_k = node_degree * refine_rate.value_or(2.0f); gpu_top_k = std::min<IdxT>(std::max(gpu_top_k, top_k), dataset.extent(0)); const auto num_queries = dataset.extent(0); const auto max_batch_size = 1024; RAFT_LOG_DEBUG( "IVF-PQ search node_degree: %d, top_k: %d, gpu_top_k: %d, max_batch_size:: %d, n_probes: %u", node_degree, top_k, gpu_top_k, max_batch_size, search_params->n_probes); auto distances = raft::make_device_matrix<float, int64_t>(res, max_batch_size, gpu_top_k); auto neighbors = raft::make_device_matrix<int64_t, int64_t>(res, max_batch_size, gpu_top_k); auto refined_distances = raft::make_device_matrix<float, int64_t>(res, max_batch_size, top_k); auto refined_neighbors = raft::make_device_matrix<int64_t, int64_t>(res, max_batch_size, top_k); auto neighbors_host = raft::make_host_matrix<int64_t, int64_t>(max_batch_size, gpu_top_k); auto queries_host = raft::make_host_matrix<DataT, int64_t>(max_batch_size, dataset.extent(1)); auto refined_neighbors_host = raft::make_host_matrix<int64_t, int64_t>(max_batch_size, top_k); auto refined_distances_host = raft::make_host_matrix<float, int64_t>(max_batch_size, top_k); // TODO(tfeher): batched search with multiple GPUs std::size_t num_self_included = 0; bool first = true; const auto start_clock = std::chrono::system_clock::now(); rmm::mr::device_memory_resource* device_memory = nullptr; auto pool_guard = raft::get_pool_memory_resource(device_memory, 1024 * 1024); if (pool_guard) { RAFT_LOG_DEBUG("ivf_pq using pool memory resource"); } raft::spatial::knn::detail::utils::batch_load_iterator<DataT> vec_batches( dataset.data_handle(), dataset.extent(0), dataset.extent(1), max_batch_size, resource::get_cuda_stream(res), device_memory); size_t next_report_offset = 0; size_t d_report_offset = dataset.extent(0) / 100; // Report progress in 1% steps. for (const auto& batch : vec_batches) { // Map int64_t to uint32_t because ivf_pq requires the latter. // TODO(tfeher): remove this mapping once ivf_pq accepts mdspan with int64_t index type auto queries_view = raft::make_device_matrix_view<const DataT, uint32_t>( batch.data(), batch.size(), batch.row_width()); auto neighbors_view = make_device_matrix_view<int64_t, uint32_t>( neighbors.data_handle(), batch.size(), neighbors.extent(1)); auto distances_view = make_device_matrix_view<float, uint32_t>( distances.data_handle(), batch.size(), distances.extent(1)); ivf_pq::search(res, *search_params, index, queries_view, neighbors_view, distances_view); if constexpr (is_host_mdspan_v<decltype(dataset)>) { raft::copy(neighbors_host.data_handle(), neighbors.data_handle(), neighbors_view.size(), resource::get_cuda_stream(res)); raft::copy(queries_host.data_handle(), batch.data(), queries_view.size(), resource::get_cuda_stream(res)); auto queries_host_view = make_host_matrix_view<const DataT, int64_t>( queries_host.data_handle(), batch.size(), batch.row_width()); auto neighbors_host_view = make_host_matrix_view<const int64_t, int64_t>( neighbors_host.data_handle(), batch.size(), neighbors.extent(1)); auto refined_neighbors_host_view = make_host_matrix_view<int64_t, int64_t>( refined_neighbors_host.data_handle(), batch.size(), top_k); auto refined_distances_host_view = make_host_matrix_view<float, int64_t>( refined_distances_host.data_handle(), batch.size(), top_k); resource::sync_stream(res); raft::neighbors::detail::refine_host<int64_t, DataT, float, int64_t>( dataset, queries_host_view, neighbors_host_view, refined_neighbors_host_view, refined_distances_host_view, build_params->metric); } else { auto neighbor_candidates_view = make_device_matrix_view<const int64_t, uint64_t>( neighbors.data_handle(), batch.size(), gpu_top_k); auto refined_neighbors_view = make_device_matrix_view<int64_t, int64_t>( refined_neighbors.data_handle(), batch.size(), top_k); auto refined_distances_view = make_device_matrix_view<float, int64_t>( refined_distances.data_handle(), batch.size(), top_k); auto dataset_view = make_device_matrix_view<const DataT, int64_t>( dataset.data_handle(), dataset.extent(0), dataset.extent(1)); raft::neighbors::detail::refine_device<int64_t, DataT, float, int64_t>( res, dataset_view, queries_view, neighbor_candidates_view, refined_neighbors_view, refined_distances_view, build_params->metric); raft::copy(refined_neighbors_host.data_handle(), refined_neighbors_view.data_handle(), refined_neighbors_view.size(), resource::get_cuda_stream(res)); resource::sync_stream(res); } // omit itself & write out // TODO(tfeher): do this in parallel with GPU processing of next batch for (std::size_t i = 0; i < batch.size(); i++) { size_t vec_idx = i + batch.offset(); for (std::size_t j = 0, num_added = 0; j < top_k && num_added < node_degree; j++) { const auto v = refined_neighbors_host(i, j); if (static_cast<size_t>(v) == vec_idx) { num_self_included++; continue; } knn_graph(vec_idx, num_added) = v; num_added++; } } size_t num_queries_done = batch.offset() + batch.size(); const auto end_clock = std::chrono::system_clock::now(); if (batch.offset() > next_report_offset) { next_report_offset += d_report_offset; const auto time = std::chrono::duration_cast<std::chrono::microseconds>(end_clock - start_clock).count() * 1e-6; const auto throughput = num_queries_done / time; RAFT_LOG_DEBUG( "# Search %12lu / %12lu (%3.2f %%), %e queries/sec, %.2f minutes ETA, self included = " "%3.2f %% \r", num_queries_done, dataset.extent(0), num_queries_done / static_cast<double>(dataset.extent(0)) * 100, throughput, (num_queries - num_queries_done) / throughput / 60, static_cast<double>(num_self_included) / num_queries_done * 100.); } first = false; } if (!first) RAFT_LOG_DEBUG("# Finished building kNN graph"); } template <typename DataT, typename IdxT, typename accessor> void build_knn_graph(raft::resources const& res, mdspan<const DataT, matrix_extent<int64_t>, row_major, accessor> dataset, raft::host_matrix_view<IdxT, int64_t, row_major> knn_graph, experimental::nn_descent::index_params build_params) { auto nn_descent_idx = experimental::nn_descent::index<IdxT>(res, knn_graph); experimental::nn_descent::build<DataT, IdxT>(res, build_params, dataset, nn_descent_idx); using internal_IdxT = typename std::make_unsigned<IdxT>::type; using g_accessor = typename decltype(nn_descent_idx.graph())::accessor_type; using g_accessor_internal = host_device_accessor<std::experimental::default_accessor<internal_IdxT>, g_accessor::mem_type>; auto knn_graph_internal = mdspan<internal_IdxT, matrix_extent<int64_t>, row_major, g_accessor_internal>( reinterpret_cast<internal_IdxT*>(nn_descent_idx.graph().data_handle()), nn_descent_idx.graph().extent(0), nn_descent_idx.graph().extent(1)); graph::sort_knn_graph(res, dataset, knn_graph_internal); } template <typename IdxT = uint32_t, typename g_accessor = host_device_accessor<std::experimental::default_accessor<IdxT>, memory_type::host>> void optimize(raft::resources const& res, mdspan<IdxT, matrix_extent<int64_t>, row_major, g_accessor> knn_graph, raft::host_matrix_view<IdxT, int64_t, row_major> new_graph) { using internal_IdxT = typename std::make_unsigned<IdxT>::type; auto new_graph_internal = raft::make_host_matrix_view<internal_IdxT, int64_t>( reinterpret_cast<internal_IdxT*>(new_graph.data_handle()), new_graph.extent(0), new_graph.extent(1)); using g_accessor_internal = host_device_accessor<std::experimental::default_accessor<internal_IdxT>, memory_type::host>; auto knn_graph_internal = mdspan<internal_IdxT, matrix_extent<int64_t>, row_major, g_accessor_internal>( reinterpret_cast<internal_IdxT*>(knn_graph.data_handle()), knn_graph.extent(0), knn_graph.extent(1)); cagra::detail::graph::optimize(res, knn_graph_internal, new_graph_internal); } template <typename T, typename IdxT = uint32_t, typename Accessor = host_device_accessor<std::experimental::default_accessor<T>, memory_type::host>> index<T, IdxT> build( raft::resources const& res, const index_params& params, mdspan<const T, matrix_extent<int64_t>, row_major, Accessor> dataset, std::optional<experimental::nn_descent::index_params> nn_descent_params = std::nullopt, std::optional<float> refine_rate = std::nullopt, std::optional<ivf_pq::index_params> pq_build_params = std::nullopt, std::optional<ivf_pq::search_params> search_params = std::nullopt) { size_t intermediate_degree = params.intermediate_graph_degree; size_t graph_degree = params.graph_degree; if (intermediate_degree >= static_cast<size_t>(dataset.extent(0))) { RAFT_LOG_WARN( "Intermediate graph degree cannot be larger than dataset size, reducing it to %lu", dataset.extent(0)); intermediate_degree = dataset.extent(0) - 1; } if (intermediate_degree < graph_degree) { RAFT_LOG_WARN( "Graph degree (%lu) cannot be larger than intermediate graph degree (%lu), reducing " "graph_degree.", graph_degree, intermediate_degree); graph_degree = intermediate_degree; } std::optional<raft::host_matrix<IdxT, int64_t>> knn_graph( raft::make_host_matrix<IdxT, int64_t>(dataset.extent(0), intermediate_degree)); if (params.build_algo == graph_build_algo::IVF_PQ) { build_knn_graph(res, dataset, knn_graph->view(), refine_rate, pq_build_params, search_params); } else { // Use nn-descent to build CAGRA knn graph if (!nn_descent_params) { nn_descent_params = experimental::nn_descent::index_params(); nn_descent_params->graph_degree = intermediate_degree; nn_descent_params->intermediate_graph_degree = 1.5 * intermediate_degree; nn_descent_params->max_iterations = params.nn_descent_niter; } build_knn_graph<T, IdxT>(res, dataset, knn_graph->view(), *nn_descent_params); } auto cagra_graph = raft::make_host_matrix<IdxT, int64_t>(dataset.extent(0), graph_degree); optimize<IdxT>(res, knn_graph->view(), cagra_graph.view()); // free intermediate graph before trying to create the index knn_graph.reset(); // Construct an index from dataset and optimized knn graph. return index<T, IdxT>(res, params.metric, dataset, raft::make_const_mdspan(cagra_graph.view())); } } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_multi_cta_kernel-ext.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/neighbors/sample_filter_types.hpp> // none_cagra_sample_filter #include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT namespace raft::neighbors::cagra::detail { namespace multi_cta_search { #ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, class DATA_T, class INDEX_T, class DISTANCE_T, class SAMPLE_FILTER_T> void select_and_run(raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, INDEX_T* const topk_indices_ptr, DISTANCE_T* const topk_distances_ptr, const DATA_T* const queries_ptr, const uint32_t num_queries, const INDEX_T* dev_seed_ptr, uint32_t* const num_executed_iterations, uint32_t topk, uint32_t block_size, uint32_t result_buffer_size, uint32_t smem_size, int64_t hash_bitlen, INDEX_T* hashmap_ptr, uint32_t num_cta_per_query, uint32_t num_random_samplings, uint64_t rand_xor_mask, uint32_t num_seeds, size_t itopk_size, size_t search_width, size_t min_iterations, size_t max_iterations, SAMPLE_FILTER_T sample_filter, cudaStream_t stream) RAFT_EXPLICIT; #endif // RAFT_EXPLICIT_INSTANTIATE_ONLY #define instantiate_kernel_selection( \ TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T) \ extern template void \ select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( \ raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \ raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \ INDEX_T* const topk_indices_ptr, \ DISTANCE_T* const topk_distances_ptr, \ const DATA_T* const queries_ptr, \ const uint32_t num_queries, \ const INDEX_T* dev_seed_ptr, \ uint32_t* const num_executed_iterations, \ uint32_t topk, \ uint32_t block_size, \ uint32_t result_buffer_size, \ uint32_t smem_size, \ int64_t hash_bitlen, \ INDEX_T* hashmap_ptr, \ uint32_t num_cta_per_query, \ uint32_t num_random_samplings, \ uint64_t rand_xor_mask, \ uint32_t num_seeds, \ size_t itopk_size, \ size_t search_width, \ size_t min_iterations, \ size_t max_iterations, \ SAMPLE_FILTER_T sample_filter, \ cudaStream_t stream); instantiate_kernel_selection( 32, 1024, float, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 8, 128, float, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 16, 256, float, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 32, 512, float, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 32, 1024, int8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 8, 128, int8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 16, 256, int8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 32, 512, int8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 32, 1024, uint8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 8, 128, uint8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 16, 256, uint8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); instantiate_kernel_selection( 32, 512, uint8_t, uint32_t, float, raft::neighbors::filtering::none_cagra_sample_filter); #undef instantiate_kernel_selection } // namespace multi_cta_search } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/cagra_serialize.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <cstdint> #include <raft/core/host_mdarray.hpp> #include <raft/core/mdarray.hpp> #include <raft/core/mdspan_types.hpp> #include <raft/core/nvtx.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/serialize.hpp> #include <raft/neighbors/cagra_types.hpp> #include <fstream> #include <type_traits> namespace raft::neighbors::cagra::detail { constexpr int serialization_version = 3; /** * Save the index to file. * * Experimental, both the API and the serialization format are subject to change. * * @param[in] res the raft resource handle * @param[in] filename the file name for saving the index * @param[in] index_ CAGRA index * */ template <typename T, typename IdxT> void serialize(raft::resources const& res, std::ostream& os, const index<T, IdxT>& index_, bool include_dataset) { common::nvtx::range<common::nvtx::domain::raft> fun_scope("cagra::serialize"); RAFT_LOG_DEBUG( "Saving CAGRA index, size %zu, dim %u", static_cast<size_t>(index_.size()), index_.dim()); std::string dtype_string = raft::detail::numpy_serializer::get_numpy_dtype<T>().to_string(); dtype_string.resize(4); os << dtype_string; serialize_scalar(res, os, serialization_version); serialize_scalar(res, os, index_.size()); serialize_scalar(res, os, index_.dim()); serialize_scalar(res, os, index_.graph_degree()); serialize_scalar(res, os, index_.metric()); serialize_mdspan(res, os, index_.graph()); serialize_scalar(res, os, include_dataset); if (include_dataset) { auto dataset = index_.dataset(); // Remove padding before saving the dataset auto host_dataset = make_host_matrix<T, int64_t>(dataset.extent(0), dataset.extent(1)); RAFT_CUDA_TRY(cudaMemcpy2DAsync(host_dataset.data_handle(), sizeof(T) * host_dataset.extent(1), dataset.data_handle(), sizeof(T) * dataset.stride(0), sizeof(T) * host_dataset.extent(1), dataset.extent(0), cudaMemcpyDefault, resource::get_cuda_stream(res))); resource::sync_stream(res); serialize_mdspan(res, os, host_dataset.view()); } } template <typename T, typename IdxT> void serialize(raft::resources const& res, const std::string& filename, const index<T, IdxT>& index_, bool include_dataset) { std::ofstream of(filename, std::ios::out | std::ios::binary); if (!of) { RAFT_FAIL("Cannot open file %s", filename.c_str()); } detail::serialize(res, of, index_, include_dataset); of.close(); if (!of) { RAFT_FAIL("Error writing output %s", filename.c_str()); } } template <typename T, typename IdxT> void serialize_to_hnswlib(raft::resources const& res, std::ostream& os, const index<T, IdxT>& index_) { common::nvtx::range<common::nvtx::domain::raft> fun_scope("cagra::serialize_to_hnswlib"); RAFT_LOG_DEBUG("Saving CAGRA index to hnswlib format, size %zu, dim %u", static_cast<size_t>(index_.size()), index_.dim()); // offset_level_0 std::size_t offset_level_0 = 0; os.write(reinterpret_cast<char*>(&offset_level_0), sizeof(std::size_t)); // max_element std::size_t max_element = index_.size(); os.write(reinterpret_cast<char*>(&max_element), sizeof(std::size_t)); // curr_element_count std::size_t curr_element_count = index_.size(); os.write(reinterpret_cast<char*>(&curr_element_count), sizeof(std::size_t)); // Example:M: 16, dim = 128, data_t = float, index_t = uint32_t, list_size_type = uint32_t, // labeltype: size_t size_data_per_element_ = M * 2 * sizeof(index_t) + sizeof(list_size_type) + // dim * sizeof(data_t) + sizeof(labeltype) auto size_data_per_element = static_cast<std::size_t>(index_.graph_degree() * 4 + 4 + index_.dim() * 4 + 8); os.write(reinterpret_cast<char*>(&size_data_per_element), sizeof(std::size_t)); // label_offset std::size_t label_offset = size_data_per_element - 8; os.write(reinterpret_cast<char*>(&label_offset), sizeof(std::size_t)); // offset_data auto offset_data = static_cast<std::size_t>(index_.graph_degree() * 4 + 4); os.write(reinterpret_cast<char*>(&offset_data), sizeof(std::size_t)); // max_level int max_level = 1; os.write(reinterpret_cast<char*>(&max_level), sizeof(int)); // entrypoint_node auto entrypoint_node = static_cast<int>(index_.size() / 2); os.write(reinterpret_cast<char*>(&entrypoint_node), sizeof(int)); // max_M auto max_M = static_cast<std::size_t>(index_.graph_degree() / 2); os.write(reinterpret_cast<char*>(&max_M), sizeof(std::size_t)); // max_M0 std::size_t max_M0 = index_.graph_degree(); os.write(reinterpret_cast<char*>(&max_M0), sizeof(std::size_t)); // M auto M = static_cast<std::size_t>(index_.graph_degree() / 2); os.write(reinterpret_cast<char*>(&M), sizeof(std::size_t)); // mult, can be anything double mult = 0.42424242; os.write(reinterpret_cast<char*>(&mult), sizeof(double)); // efConstruction, can be anything std::size_t efConstruction = 500; os.write(reinterpret_cast<char*>(&efConstruction), sizeof(std::size_t)); auto dataset = index_.dataset(); // Remove padding before saving the dataset auto host_dataset = make_host_matrix<T, int64_t>(dataset.extent(0), dataset.extent(1)); RAFT_CUDA_TRY(cudaMemcpy2DAsync(host_dataset.data_handle(), sizeof(T) * host_dataset.extent(1), dataset.data_handle(), sizeof(T) * dataset.stride(0), sizeof(T) * host_dataset.extent(1), dataset.extent(0), cudaMemcpyDefault, resource::get_cuda_stream(res))); resource::sync_stream(res); auto graph = index_.graph(); auto host_graph = raft::make_host_matrix<IdxT, int64_t, raft::row_major>(graph.extent(0), graph.extent(1)); raft::copy(host_graph.data_handle(), graph.data_handle(), graph.size(), raft::resource::get_cuda_stream(res)); resource::sync_stream(res); // Write one dataset and graph row at a time for (std::size_t i = 0; i < index_.size(); i++) { auto graph_degree = static_cast<int>(index_.graph_degree()); os.write(reinterpret_cast<char*>(&graph_degree), sizeof(int)); for (std::size_t j = 0; j < index_.graph_degree(); ++j) { auto graph_elem = host_graph(i, j); os.write(reinterpret_cast<char*>(&graph_elem), sizeof(IdxT)); } auto data_row = host_dataset.data_handle() + (index_.dim() * i); if constexpr (std::is_same_v<T, float>) { for (std::size_t j = 0; j < index_.dim(); ++j) { auto data_elem = host_dataset(i, j); os.write(reinterpret_cast<char*>(&data_elem), sizeof(T)); } } else if constexpr (std::is_same_v<T, std::int8_t> or std::is_same_v<T, std::uint8_t>) { for (std::size_t j = 0; j < index_.dim(); ++j) { auto data_elem = static_cast<int>(host_dataset(i, j)); os.write(reinterpret_cast<char*>(&data_elem), sizeof(int)); } } os.write(reinterpret_cast<char*>(&i), sizeof(std::size_t)); } for (std::size_t i = 0; i < index_.size(); i++) { // zeroes auto zero = 0; os.write(reinterpret_cast<char*>(&zero), sizeof(int)); } // delete [] host_graph; } template <typename T, typename IdxT> void serialize_to_hnswlib(raft::resources const& res, const std::string& filename, const index<T, IdxT>& index_) { std::ofstream of(filename, std::ios::out | std::ios::binary); if (!of) { RAFT_FAIL("Cannot open file %s", filename.c_str()); } detail::serialize_to_hnswlib<T, IdxT>(res, of, index_); of.close(); if (!of) { RAFT_FAIL("Error writing output %s", filename.c_str()); } } /** Load an index from file. * * Experimental, both the API and the serialization format are subject to change. * * @param[in] res the raft resource handle * @param[in] filename the name of the file that stores the index * @param[in] index_ CAGRA index * */ template <typename T, typename IdxT> auto deserialize(raft::resources const& res, std::istream& is) -> index<T, IdxT> { common::nvtx::range<common::nvtx::domain::raft> fun_scope("cagra::deserialize"); char dtype_string[4]; is.read(dtype_string, 4); auto ver = deserialize_scalar<int>(res, is); if (ver != serialization_version) { RAFT_FAIL("serialization version mismatch, expected %d, got %d ", serialization_version, ver); } auto n_rows = deserialize_scalar<IdxT>(res, is); auto dim = deserialize_scalar<std::uint32_t>(res, is); auto graph_degree = deserialize_scalar<std::uint32_t>(res, is); auto metric = deserialize_scalar<raft::distance::DistanceType>(res, is); auto graph = raft::make_host_matrix<IdxT, int64_t>(n_rows, graph_degree); deserialize_mdspan(res, is, graph.view()); bool has_dataset = deserialize_scalar<bool>(res, is); if (has_dataset) { auto dataset = raft::make_host_matrix<T, int64_t>(n_rows, dim); deserialize_mdspan(res, is, dataset.view()); return index<T, IdxT>( res, metric, raft::make_const_mdspan(dataset.view()), raft::make_const_mdspan(graph.view())); } else { // create a new index with no dataset - the user must supply via update_dataset themselves // later (this avoids allocating GPU memory in the meantime) index<T, IdxT> idx(res, metric); idx.update_graph(res, raft::make_const_mdspan(graph.view())); return idx; } } template <typename T, typename IdxT> auto deserialize(raft::resources const& res, const std::string& filename) -> index<T, IdxT> { std::ifstream is(filename, std::ios::in | std::ios::binary); if (!is) { RAFT_FAIL("Cannot open file %s", filename.c_str()); } auto index = detail::deserialize<T, IdxT>(res, is); is.close(); return index; } } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/search_multi_kernel.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <raft/spatial/knn/detail/ann_utils.cuh> #include <algorithm> #include <cassert> #include <iostream> #include <memory> #include <numeric> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/neighbors/sample_filter_types.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <vector> #include "compute_distance.hpp" #include "device_common.hpp" #include "fragment.hpp" #include "hashmap.hpp" #include "search_plan.cuh" #include "topk_for_cagra/topk_core.cuh" //todo replace with raft kernel #include "utils.hpp" #include <raft/core/logger.hpp> #include <raft/util/cuda_rt_essentials.hpp> #include <raft/util/cudart_utils.hpp> // RAFT_CUDA_TRY_NOT_THROW is used TODO(tfeher): consider moving this to cuda_rt_essentials.hpp namespace raft::neighbors::cagra::detail { namespace multi_kernel_search { template <class T> RAFT_KERNEL set_value_kernel(T* const dev_ptr, const T val) { *dev_ptr = val; } template <class T> RAFT_KERNEL set_value_kernel(T* const dev_ptr, const T val, const std::size_t count) { const auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= count) { return; } dev_ptr[tid] = val; } template <class T> void set_value(T* const dev_ptr, const T val, cudaStream_t cuda_stream) { set_value_kernel<T><<<1, 1, 0, cuda_stream>>>(dev_ptr, val); } template <class T> void set_value(T* const dev_ptr, const T val, const std::size_t count, cudaStream_t cuda_stream) { constexpr std::uint32_t block_size = 256; const auto grid_size = (count + block_size - 1) / block_size; set_value_kernel<T><<<grid_size, block_size, 0, cuda_stream>>>(dev_ptr, val, count); } template <class T> RAFT_KERNEL get_value_kernel(T* const host_ptr, const T* const dev_ptr) { *host_ptr = *dev_ptr; } template <class T> void get_value(T* const host_ptr, const T* const dev_ptr, cudaStream_t cuda_stream) { get_value_kernel<T><<<1, 1, 0, cuda_stream>>>(host_ptr, dev_ptr); } // MAX_DATASET_DIM : must equal to or greater than dataset_dim template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, class DATA_T, class DISTANCE_T, class INDEX_T> RAFT_KERNEL random_pickup_kernel(const DATA_T* const dataset_ptr, // [dataset_size, dataset_dim] const std::size_t dataset_dim, const std::size_t dataset_size, const std::size_t dataset_ld, const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const std::size_t num_pickup, const unsigned num_distilation, const uint64_t rand_xor_mask, const INDEX_T* seed_ptr, // [num_queries, num_seeds] const uint32_t num_seeds, INDEX_T* const result_indices_ptr, // [num_queries, ldr] DISTANCE_T* const result_distances_ptr, // [num_queries, ldr] const std::uint32_t ldr, // (*) ldr >= num_pickup INDEX_T* const visited_hashmap_ptr, // [num_queries, 1 << bitlen] const std::uint32_t hash_bitlen) { const auto ldb = hashmap::get_size(hash_bitlen); const auto global_team_index = (blockIdx.x * blockDim.x + threadIdx.x) / TEAM_SIZE; const uint32_t query_id = blockIdx.y; if (global_team_index >= num_pickup) { return; } // Load a query device::fragment<MAX_DATASET_DIM, DATA_T, TEAM_SIZE> query_frag; device::load_vector_sync(query_frag, queries_ptr + query_id * dataset_dim, dataset_dim); INDEX_T best_index_team_local; DISTANCE_T best_norm2_team_local = utils::get_max_value<DISTANCE_T>(); for (unsigned i = 0; i < num_distilation; i++) { INDEX_T seed_index; if (seed_ptr && (global_team_index < num_seeds)) { seed_index = seed_ptr[global_team_index + (num_seeds * query_id)]; } else { // Chose a seed node randomly seed_index = device::xorshift64((global_team_index ^ rand_xor_mask) * (i + 1)) % dataset_size; } device::fragment<MAX_DATASET_DIM, DATA_T, TEAM_SIZE> random_data_frag; device::load_vector_sync( random_data_frag, dataset_ptr + (dataset_ld * seed_index), dataset_dim); // Compute the norm of two data const auto norm2 = device::norm2<DISTANCE_T>( query_frag, random_data_frag, static_cast<float>(1.0 / spatial::knn::detail::utils::config<DATA_T>::kDivisor) /*, scale*/ ); if (norm2 < best_norm2_team_local) { best_norm2_team_local = norm2; best_index_team_local = seed_index; } } const auto store_gmem_index = global_team_index + (ldr * query_id); if (threadIdx.x % TEAM_SIZE == 0) { if (hashmap::insert( visited_hashmap_ptr + (ldb * query_id), hash_bitlen, best_index_team_local)) { result_distances_ptr[store_gmem_index] = best_norm2_team_local; result_indices_ptr[store_gmem_index] = best_index_team_local; } else { result_distances_ptr[store_gmem_index] = utils::get_max_value<DISTANCE_T>(); result_indices_ptr[store_gmem_index] = utils::get_max_value<INDEX_T>(); } } } // MAX_DATASET_DIM : must be equal to or greater than dataset_dim template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, class DATA_T, class DISTANCE_T, class INDEX_T> void random_pickup(const DATA_T* const dataset_ptr, // [dataset_size, dataset_dim] const std::size_t dataset_dim, const std::size_t dataset_size, const std::size_t dataset_ld, const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const std::size_t num_queries, const std::size_t num_pickup, const unsigned num_distilation, const uint64_t rand_xor_mask, const INDEX_T* seed_ptr, // [num_queries, num_seeds] const uint32_t num_seeds, INDEX_T* const result_indices_ptr, // [num_queries, ldr] DISTANCE_T* const result_distances_ptr, // [num_queries, ldr] const std::size_t ldr, // (*) ldr >= num_pickup INDEX_T* const visited_hashmap_ptr, // [num_queries, 1 << bitlen] const std::uint32_t hash_bitlen, cudaStream_t const cuda_stream = 0) { const auto block_size = 256u; const auto num_teams_per_threadblock = block_size / TEAM_SIZE; const dim3 grid_size((num_pickup + num_teams_per_threadblock - 1) / num_teams_per_threadblock, num_queries); random_pickup_kernel<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, DISTANCE_T, INDEX_T> <<<grid_size, block_size, 0, cuda_stream>>>(dataset_ptr, dataset_dim, dataset_size, dataset_ld, queries_ptr, num_pickup, num_distilation, rand_xor_mask, seed_ptr, num_seeds, result_indices_ptr, result_distances_ptr, ldr, visited_hashmap_ptr, hash_bitlen); } template <class INDEX_T> RAFT_KERNEL pickup_next_parents_kernel( INDEX_T* const parent_candidates_ptr, // [num_queries, lds] const std::size_t lds, // (*) lds >= parent_candidates_size const std::uint32_t parent_candidates_size, // INDEX_T* const visited_hashmap_ptr, // [num_queries, 1 << hash_bitlen] const std::size_t hash_bitlen, const std::uint32_t small_hash_bitlen, INDEX_T* const parent_list_ptr, // [num_queries, ldd] const std::size_t ldd, // (*) ldd >= parent_list_size const std::size_t parent_list_size, // std::uint32_t* const terminate_flag) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const std::size_t ldb = hashmap::get_size(hash_bitlen); const uint32_t query_id = blockIdx.x; if (threadIdx.x < 32) { // pickup next parents with single warp for (std::uint32_t i = threadIdx.x; i < parent_list_size; i += 32) { parent_list_ptr[i + (ldd * query_id)] = utils::get_max_value<INDEX_T>(); } std::uint32_t parent_candidates_size_max = parent_candidates_size; if (parent_candidates_size % 32) { parent_candidates_size_max += 32 - (parent_candidates_size % 32); } std::uint32_t num_new_parents = 0; for (std::uint32_t j = threadIdx.x; j < parent_candidates_size_max; j += 32) { INDEX_T index; int new_parent = 0; if (j < parent_candidates_size) { index = parent_candidates_ptr[j + (lds * query_id)]; if ((index & index_msb_1_mask) == 0) { // check most significant bit new_parent = 1; } } const std::uint32_t ballot_mask = __ballot_sync(0xffffffff, new_parent); if (new_parent) { const auto i = __popc(ballot_mask & ((1 << threadIdx.x) - 1)) + num_new_parents; if (i < parent_list_size) { parent_list_ptr[i + (ldd * query_id)] = j; parent_candidates_ptr[j + (lds * query_id)] |= index_msb_1_mask; // set most significant bit as used node } } num_new_parents += __popc(ballot_mask); if (num_new_parents >= parent_list_size) { break; } } if ((num_new_parents > 0) && (threadIdx.x == 0)) { *terminate_flag = 0; } } else if (small_hash_bitlen) { // reset small-hash hashmap::init(visited_hashmap_ptr + (ldb * query_id), hash_bitlen, 32); } if (small_hash_bitlen) { __syncthreads(); // insert internal-topk indices into small-hash for (unsigned i = threadIdx.x; i < parent_candidates_size; i += blockDim.x) { auto key = parent_candidates_ptr[i + (lds * query_id)] & ~index_msb_1_mask; // clear most significant bit hashmap::insert(visited_hashmap_ptr + (ldb * query_id), hash_bitlen, key); } } } template <class INDEX_T> void pickup_next_parents(INDEX_T* const parent_candidates_ptr, // [num_queries, lds] const std::size_t lds, // (*) lds >= parent_candidates_size const std::size_t parent_candidates_size, // const std::size_t num_queries, INDEX_T* const visited_hashmap_ptr, // [num_queries, 1 << hash_bitlen] const std::size_t hash_bitlen, const std::size_t small_hash_bitlen, INDEX_T* const parent_list_ptr, // [num_queries, ldd] const std::size_t ldd, // (*) ldd >= parent_list_size const std::size_t parent_list_size, // std::uint32_t* const terminate_flag, cudaStream_t cuda_stream = 0) { std::uint32_t block_size = 32; if (small_hash_bitlen) { block_size = 128; while (parent_candidates_size > block_size) { block_size *= 2; } block_size = min(block_size, (uint32_t)512); } pickup_next_parents_kernel<INDEX_T> <<<num_queries, block_size, 0, cuda_stream>>>(parent_candidates_ptr, lds, parent_candidates_size, visited_hashmap_ptr, hash_bitlen, small_hash_bitlen, parent_list_ptr, ldd, parent_list_size, terminate_flag); } template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, class DATA_T, class INDEX_T, class DISTANCE_T, class SAMPLE_FILTER_T> RAFT_KERNEL compute_distance_to_child_nodes_kernel( const INDEX_T* const parent_node_list, // [num_queries, search_width] INDEX_T* const parent_candidates_ptr, // [num_queries, search_width] DISTANCE_T* const parent_distance_ptr, // [num_queries, search_width] const std::size_t lds, const std::uint32_t search_width, const DATA_T* const dataset_ptr, // [dataset_size, data_dim] const std::uint32_t data_dim, const std::uint32_t dataset_size, const std::uint32_t dataset_ld, const INDEX_T* const neighbor_graph_ptr, // [dataset_size, graph_degree] const std::uint32_t graph_degree, const DATA_T* query_ptr, // [num_queries, data_dim] INDEX_T* const visited_hashmap_ptr, // [num_queries, 1 << hash_bitlen] const std::uint32_t hash_bitlen, INDEX_T* const result_indices_ptr, // [num_queries, ldd] DISTANCE_T* const result_distances_ptr, // [num_queries, ldd] const std::uint32_t ldd, // (*) ldd >= search_width * graph_degree SAMPLE_FILTER_T sample_filter) { const uint32_t ldb = hashmap::get_size(hash_bitlen); const auto tid = threadIdx.x + blockDim.x * blockIdx.x; const auto global_team_id = tid / TEAM_SIZE; const auto query_id = blockIdx.y; if (global_team_id >= search_width * graph_degree) { return; } const std::size_t parent_list_index = parent_node_list[global_team_id / graph_degree + (search_width * blockIdx.y)]; if (parent_list_index == utils::get_max_value<INDEX_T>()) { return; } constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const auto parent_index = parent_candidates_ptr[parent_list_index + (lds * query_id)] & ~index_msb_1_mask; if (parent_index == utils::get_max_value<INDEX_T>()) { result_distances_ptr[ldd * blockIdx.y + global_team_id] = utils::get_max_value<DISTANCE_T>(); return; } const auto neighbor_list_head_ptr = neighbor_graph_ptr + (graph_degree * parent_index); const std::size_t child_id = neighbor_list_head_ptr[global_team_id % graph_degree]; if (hashmap::insert<TEAM_SIZE, INDEX_T>( visited_hashmap_ptr + (ldb * blockIdx.y), hash_bitlen, child_id)) { device::fragment<MAX_DATASET_DIM, DATA_T, TEAM_SIZE> frag_target; device::load_vector_sync(frag_target, dataset_ptr + (dataset_ld * child_id), data_dim); device::fragment<MAX_DATASET_DIM, DATA_T, TEAM_SIZE> frag_query; device::load_vector_sync(frag_query, query_ptr + blockIdx.y * data_dim, data_dim); const auto norm2 = device::norm2<DISTANCE_T>( frag_target, frag_query, static_cast<float>(1.0 / spatial::knn::detail::utils::config<DATA_T>::kDivisor)); if (threadIdx.x % TEAM_SIZE == 0) { result_indices_ptr[ldd * blockIdx.y + global_team_id] = child_id; result_distances_ptr[ldd * blockIdx.y + global_team_id] = norm2; } } else { if (threadIdx.x % TEAM_SIZE == 0) { result_distances_ptr[ldd * blockIdx.y + global_team_id] = utils::get_max_value<DISTANCE_T>(); } } if constexpr (!std::is_same<SAMPLE_FILTER_T, raft::neighbors::filtering::none_cagra_sample_filter>::value) { if (!sample_filter(query_id, parent_index)) { parent_candidates_ptr[parent_list_index + (lds * query_id)] = utils::get_max_value<INDEX_T>(); parent_distance_ptr[parent_list_index + (lds * query_id)] = utils::get_max_value<DISTANCE_T>(); } } } template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, class DATA_T, class INDEX_T, class DISTANCE_T, class SAMPLE_FILTER_T> void compute_distance_to_child_nodes( const INDEX_T* const parent_node_list, // [num_queries, search_width] INDEX_T* const parent_candidates_ptr, // [num_queries, search_width] DISTANCE_T* const parent_distance_ptr, // [num_queries, search_width] const std::size_t lds, const uint32_t search_width, const DATA_T* const dataset_ptr, // [dataset_size, data_dim] const std::uint32_t data_dim, const std::uint32_t dataset_size, const std::uint32_t dataset_ld, const INDEX_T* const neighbor_graph_ptr, // [dataset_size, graph_degree] const std::uint32_t graph_degree, const DATA_T* query_ptr, // [num_queries, data_dim] const std::uint32_t num_queries, INDEX_T* const visited_hashmap_ptr, // [num_queries, 1 << hash_bitlen] const std::uint32_t hash_bitlen, INDEX_T* const result_indices_ptr, // [num_queries, ldd] DISTANCE_T* const result_distances_ptr, // [num_queries, ldd] const std::uint32_t ldd, // (*) ldd >= search_width * graph_degree SAMPLE_FILTER_T sample_filter, cudaStream_t cuda_stream = 0) { const auto block_size = 128; const dim3 grid_size( (search_width * graph_degree + (block_size / TEAM_SIZE) - 1) / (block_size / TEAM_SIZE), num_queries); compute_distance_to_child_nodes_kernel<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T> <<<grid_size, block_size, 0, cuda_stream>>>(parent_node_list, parent_candidates_ptr, parent_distance_ptr, lds, search_width, dataset_ptr, data_dim, dataset_size, dataset_ld, neighbor_graph_ptr, graph_degree, query_ptr, visited_hashmap_ptr, hash_bitlen, result_indices_ptr, result_distances_ptr, ldd, sample_filter); } template <class INDEX_T> RAFT_KERNEL remove_parent_bit_kernel(const std::uint32_t num_queries, const std::uint32_t num_topk, INDEX_T* const topk_indices_ptr, // [ld, num_queries] const std::uint32_t ld) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; uint32_t i_query = blockIdx.x; if (i_query >= num_queries) return; for (unsigned i = threadIdx.x; i < num_topk; i += blockDim.x) { topk_indices_ptr[i + (ld * i_query)] &= ~index_msb_1_mask; // clear most significant bit } } template <class INDEX_T> void remove_parent_bit(const std::uint32_t num_queries, const std::uint32_t num_topk, INDEX_T* const topk_indices_ptr, // [ld, num_queries] const std::uint32_t ld, cudaStream_t cuda_stream = 0) { const std::size_t grid_size = num_queries; const std::size_t block_size = 256; remove_parent_bit_kernel<<<grid_size, block_size, 0, cuda_stream>>>( num_queries, num_topk, topk_indices_ptr, ld); } // This function called after the `remove_parent_bit` function template <class INDEX_T, class DISTANCE_T, class SAMPLE_FILTER_T> RAFT_KERNEL apply_filter_kernel(INDEX_T* const result_indices_ptr, DISTANCE_T* const result_distances_ptr, const std::size_t lds, const std::uint32_t result_buffer_size, const std::uint32_t num_queries, const INDEX_T query_id_offset, SAMPLE_FILTER_T sample_filter) { constexpr INDEX_T index_msb_1_mask = utils::gen_index_msb_1_mask<INDEX_T>::value; const auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= result_buffer_size * num_queries) { return; } const auto i = tid % result_buffer_size; const auto j = tid / result_buffer_size; const auto index = i + j * lds; if (result_indices_ptr[index] != ~index_msb_1_mask && !sample_filter(query_id_offset + j, result_indices_ptr[index])) { result_indices_ptr[index] = utils::get_max_value<INDEX_T>(); result_distances_ptr[index] = utils::get_max_value<DISTANCE_T>(); } } template <class INDEX_T, class DISTANCE_T, class SAMPLE_FILTER_T> void apply_filter(INDEX_T* const result_indices_ptr, DISTANCE_T* const result_distances_ptr, const std::size_t lds, const std::uint32_t result_buffer_size, const std::uint32_t num_queries, const INDEX_T query_id_offset, SAMPLE_FILTER_T sample_filter, cudaStream_t cuda_stream) { const std::uint32_t block_size = 256; const std::uint32_t grid_size = ceildiv(num_queries * result_buffer_size, block_size); apply_filter_kernel<<<grid_size, block_size, 0, cuda_stream>>>(result_indices_ptr, result_distances_ptr, lds, result_buffer_size, num_queries, query_id_offset, sample_filter); } template <class T> RAFT_KERNEL batched_memcpy_kernel(T* const dst, // [batch_size, ld_dst] const uint64_t ld_dst, const T* const src, // [batch_size, ld_src] const uint64_t ld_src, const uint64_t count, const uint64_t batch_size) { const auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= count * batch_size) { return; } const auto i = tid % count; const auto j = tid / count; dst[i + (ld_dst * j)] = src[i + (ld_src * j)]; } template <class T> void batched_memcpy(T* const dst, // [batch_size, ld_dst] const uint64_t ld_dst, const T* const src, // [batch_size, ld_src] const uint64_t ld_src, const uint64_t count, const uint64_t batch_size, cudaStream_t cuda_stream) { assert(ld_dst >= count); assert(ld_src >= count); constexpr uint32_t block_size = 256; const auto grid_size = (batch_size * count + block_size - 1) / block_size; batched_memcpy_kernel<T> <<<grid_size, block_size, 0, cuda_stream>>>(dst, ld_dst, src, ld_src, count, batch_size); } template <class T> RAFT_KERNEL set_value_batch_kernel(T* const dev_ptr, const std::size_t ld, const T val, const std::size_t count, const std::size_t batch_size) { const auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= count * batch_size) { return; } const auto batch_id = tid / count; const auto elem_id = tid % count; dev_ptr[elem_id + ld * batch_id] = val; } template <class T> void set_value_batch(T* const dev_ptr, const std::size_t ld, const T val, const std::size_t count, const std::size_t batch_size, cudaStream_t cuda_stream) { constexpr std::uint32_t block_size = 256; const auto grid_size = (count * batch_size + block_size - 1) / block_size; set_value_batch_kernel<T> <<<grid_size, block_size, 0, cuda_stream>>>(dev_ptr, ld, val, count, batch_size); } // result_buffer (work buffer) for "multi-kernel" // +--------------------+------------------------------+-------------------+ // | internal_top_k (A) | neighbors of internal_top_k | internal_topk (B) | // | <itopk_size> | <search_width * graph_degree> | <itopk_size> | // +--------------------+------------------------------+-------------------+ // |<--- result_buffer_allocation_size --->| // |<--- result_buffer_size --->| // Double buffer (A) // |<--- result_buffer_size --->| // Double buffer (B) template <unsigned TEAM_SIZE, unsigned MAX_DATASET_DIM, typename DATA_T, typename INDEX_T, typename DISTANCE_T, typename SAMPLE_FILTER_T> struct search : search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T> { using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_queries; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::itopk_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::algo; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::team_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::search_width; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::min_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::thread_block_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_mode; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_min_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_max_fill_rate; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_random_samplings; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::rand_xor_mask; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::max_dim; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dim; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::graph_degree; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::topk; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hash_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::small_hash_bitlen; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::small_hash_reset_interval; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dataset_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::result_buffer_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::smem_size; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::hashmap; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_executed_iterations; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::dev_seed; using search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>::num_seeds; size_t result_buffer_allocation_size; rmm::device_uvector<INDEX_T> result_indices; // results_indices_buffer rmm::device_uvector<float> result_distances; // result_distances_buffer rmm::device_uvector<INDEX_T> parent_node_list; rmm::device_uvector<uint32_t> topk_hint; rmm::device_scalar<uint32_t> terminate_flag; // dev_terminate_flag, host_terminate_flag.; rmm::device_uvector<uint32_t> topk_workspace; search(raft::resources const& res, search_params params, int64_t dim, int64_t graph_degree, uint32_t topk) : search_plan_impl<DATA_T, INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( res, params, dim, graph_degree, topk), result_indices(0, resource::get_cuda_stream(res)), result_distances(0, resource::get_cuda_stream(res)), parent_node_list(0, resource::get_cuda_stream(res)), topk_hint(0, resource::get_cuda_stream(res)), topk_workspace(0, resource::get_cuda_stream(res)), terminate_flag(resource::get_cuda_stream(res)) { set_params(res); } void set_params(raft::resources const& res) { // // Allocate memory for intermediate buffer and workspace. // result_buffer_size = itopk_size + (search_width * graph_degree); result_buffer_allocation_size = result_buffer_size + itopk_size; result_indices.resize(result_buffer_allocation_size * max_queries, resource::get_cuda_stream(res)); result_distances.resize(result_buffer_allocation_size * max_queries, resource::get_cuda_stream(res)); parent_node_list.resize(max_queries * search_width, resource::get_cuda_stream(res)); topk_hint.resize(max_queries, resource::get_cuda_stream(res)); size_t topk_workspace_size = _cuann_find_topk_bufferSize( itopk_size, max_queries, result_buffer_size, utils::get_cuda_data_type<DATA_T>()); RAFT_LOG_DEBUG("# topk_workspace_size: %lu", topk_workspace_size); topk_workspace.resize(topk_workspace_size, resource::get_cuda_stream(res)); hashmap.resize(hashmap_size, resource::get_cuda_stream(res)); } ~search() {} void operator()(raft::resources const& res, raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, INDEX_T* const topk_indices_ptr, // [num_queries, topk] DISTANCE_T* const topk_distances_ptr, // [num_queries, topk] const DATA_T* const queries_ptr, // [num_queries, dataset_dim] const uint32_t num_queries, const INDEX_T* dev_seed_ptr, // [num_queries, num_seeds] uint32_t* const num_executed_iterations, // [num_queries,] uint32_t topk, SAMPLE_FILTER_T sample_filter) { // Init hashmap cudaStream_t stream = resource::get_cuda_stream(res); const uint32_t hash_size = hashmap::get_size(hash_bitlen); set_value_batch( hashmap.data(), hash_size, utils::get_max_value<INDEX_T>(), hash_size, num_queries, stream); // Init topk_hint if (topk_hint.size() > 0) { set_value(topk_hint.data(), 0xffffffffu, num_queries, stream); } // Choose initial entry point candidates at random random_pickup<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, DISTANCE_T, INDEX_T>( dataset.data_handle(), dataset.extent(1), dataset.extent(0), dataset.stride(0), queries_ptr, num_queries, result_buffer_size, num_random_samplings, rand_xor_mask, dev_seed_ptr, num_seeds, result_indices.data(), result_distances.data(), result_buffer_allocation_size, hashmap.data(), hash_bitlen, stream); unsigned iter = 0; while (1) { // Make an index list of internal top-k nodes _cuann_find_topk(itopk_size, num_queries, result_buffer_size, result_distances.data() + (iter & 0x1) * itopk_size, result_buffer_allocation_size, result_indices.data() + (iter & 0x1) * itopk_size, result_buffer_allocation_size, result_distances.data() + (1 - (iter & 0x1)) * result_buffer_size, result_buffer_allocation_size, result_indices.data() + (1 - (iter & 0x1)) * result_buffer_size, result_buffer_allocation_size, topk_workspace.data(), true, topk_hint.data(), stream); // termination (1) if ((iter + 1 == max_iterations)) { iter++; break; } if (iter + 1 >= min_iterations) { set_value<uint32_t>(terminate_flag.data(), 1, stream); } // pickup parent nodes uint32_t _small_hash_bitlen = 0; if ((iter + 1) % small_hash_reset_interval == 0) { _small_hash_bitlen = small_hash_bitlen; } pickup_next_parents(result_indices.data() + (1 - (iter & 0x1)) * result_buffer_size, result_buffer_allocation_size, itopk_size, num_queries, hashmap.data(), hash_bitlen, _small_hash_bitlen, parent_node_list.data(), search_width, search_width, terminate_flag.data(), stream); // termination (2) if (iter + 1 >= min_iterations && terminate_flag.value(stream)) { iter++; break; } // Compute distance to child nodes that are adjacent to the parent node compute_distance_to_child_nodes<TEAM_SIZE, MAX_DATASET_DIM>( parent_node_list.data(), result_indices.data() + (1 - (iter & 0x1)) * result_buffer_size, result_distances.data() + (1 - (iter & 0x1)) * result_buffer_size, result_buffer_allocation_size, search_width, dataset.data_handle(), dataset.extent(1), dataset.extent(0), dataset.stride(0), graph.data_handle(), graph.extent(1), queries_ptr, num_queries, hashmap.data(), hash_bitlen, result_indices.data() + itopk_size, result_distances.data() + itopk_size, result_buffer_allocation_size, sample_filter, stream); iter++; } // while ( 1 ) auto result_indices_ptr = result_indices.data() + (iter & 0x1) * result_buffer_size; auto result_distances_ptr = result_distances.data() + (iter & 0x1) * result_buffer_size; if constexpr (!std::is_same<SAMPLE_FILTER_T, raft::neighbors::filtering::none_cagra_sample_filter>::value) { // Remove parent bit in search results remove_parent_bit(num_queries, result_buffer_size, result_indices.data() + (iter & 0x1) * itopk_size, result_buffer_allocation_size, stream); apply_filter<INDEX_T, DISTANCE_T, SAMPLE_FILTER_T>( result_indices.data() + (iter & 0x1) * itopk_size, result_distances.data() + (iter & 0x1) * itopk_size, result_buffer_allocation_size, result_buffer_size, num_queries, 0, sample_filter, stream); result_indices_ptr = result_indices.data() + (1 - (iter & 0x1)) * result_buffer_size; result_distances_ptr = result_distances.data() + (1 - (iter & 0x1)) * result_buffer_size; _cuann_find_topk(itopk_size, num_queries, result_buffer_size, result_distances.data() + (iter & 0x1) * itopk_size, result_buffer_allocation_size, result_indices.data() + (iter & 0x1) * itopk_size, result_buffer_allocation_size, result_distances_ptr, result_buffer_allocation_size, result_indices_ptr, result_buffer_allocation_size, topk_workspace.data(), true, topk_hint.data(), stream); } else { // Remove parent bit in search results remove_parent_bit( num_queries, itopk_size, result_indices_ptr, result_buffer_allocation_size, stream); } // Copy results from working buffer to final buffer batched_memcpy(topk_indices_ptr, topk, result_indices_ptr, result_buffer_allocation_size, topk, num_queries, stream); if (topk_distances_ptr) { batched_memcpy(topk_distances_ptr, topk, result_distances_ptr, result_buffer_allocation_size, topk, num_queries, stream); } if (num_executed_iterations) { for (std::uint32_t i = 0; i < num_queries; i++) { num_executed_iterations[i] = iter; } } RAFT_CUDA_TRY(cudaPeekAtLastError()); } }; } // namespace multi_kernel_search } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/topk_for_cagra/topk.h
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_fp16.h> #include <stdint.h> namespace raft::neighbors::cagra::detail { // size_t _cuann_find_topk_bufferSize(uint32_t topK, uint32_t sizeBatch, uint32_t numElements, cudaDataType_t sampleDtype = CUDA_R_32F); // template <class ValT> void _cuann_find_topk(uint32_t topK, uint32_t sizeBatch, uint32_t numElements, const float* inputKeys, // [sizeBatch, ldIK,] uint32_t ldIK, // (*) ldIK >= numElements const ValT* inputVals, // [sizeBatch, ldIV,] uint32_t ldIV, // (*) ldIV >= numElements float* outputKeys, // [sizeBatch, ldOK,] uint32_t ldOK, // (*) ldOK >= topK ValT* outputVals, // [sizeBatch, ldOV,] uint32_t ldOV, // (*) ldOV >= topK void* workspace, bool sort = false, uint32_t* hint = NULL, cudaStream_t stream = 0); #ifdef __CUDA_ARCH__ #define CUDA_DEVICE_HOST_FUNC __device__ #else #define CUDA_DEVICE_HOST_FUNC #endif // CUDA_DEVICE_HOST_FUNC inline size_t _cuann_aligned(size_t size, size_t unit = 128) { if (size % unit) { size += unit - (size % unit); } return size; } } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/cagra/topk_for_cagra/topk_core.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "topk.h" #include <assert.h> #include <cub/cub.cuh> #include <float.h> #include <stdint.h> #include <stdio.h> namespace raft::neighbors::cagra::detail { // __device__ inline uint32_t convert(uint32_t x) { if (x & 0x80000000) { return x ^ 0xffffffff; } else { return x ^ 0x80000000; } } // __device__ inline uint16_t convert(uint16_t x) { if (x & 0x8000) { return x ^ 0xffff; } else { return x ^ 0x8000; } } // struct u32_vector { uint1 x1; uint2 x2; uint4 x4; ulonglong4 x8; }; // struct u16_vector { ushort1 x1; ushort2 x2; ushort4 x4; uint4 x8; }; // template <int vecLen> __device__ inline void load_u32_vector(struct u32_vector& vec, const uint32_t* x, int i) { if (vecLen == 1) { vec.x1 = ((uint1*)(x + i))[0]; } else if (vecLen == 2) { vec.x2 = ((uint2*)(x + i))[0]; } else if (vecLen == 4) { vec.x4 = ((uint4*)(x + i))[0]; } else if (vecLen == 8) { vec.x8 = ((ulonglong4*)(x + i))[0]; } } // template <int vecLen> __device__ inline void load_u16_vector(struct u16_vector& vec, const uint16_t* x, int i) { if (vecLen == 1) { vec.x1 = ((ushort1*)(x + i))[0]; } else if (vecLen == 2) { vec.x2 = ((ushort2*)(x + i))[0]; } else if (vecLen == 4) { vec.x4 = ((ushort4*)(x + i))[0]; } else if (vecLen == 8) { vec.x8 = ((uint4*)(x + i))[0]; } } // template <int vecLen> __device__ inline uint32_t get_element_from_u32_vector(struct u32_vector& vec, int i) { uint32_t xi; if (vecLen == 1) { xi = convert(vec.x1.x); } else if (vecLen == 2) { if (i == 0) xi = convert(vec.x2.x); else xi = convert(vec.x2.y); } else if (vecLen == 4) { if (i == 0) xi = convert(vec.x4.x); else if (i == 1) xi = convert(vec.x4.y); else if (i == 2) xi = convert(vec.x4.z); else xi = convert(vec.x4.w); } else if (vecLen == 8) { if (i == 0) xi = convert((uint32_t)(vec.x8.x & 0xffffffff)); else if (i == 1) xi = convert((uint32_t)(vec.x8.x >> 32)); else if (i == 2) xi = convert((uint32_t)(vec.x8.y & 0xffffffff)); else if (i == 3) xi = convert((uint32_t)(vec.x8.y >> 32)); else if (i == 4) xi = convert((uint32_t)(vec.x8.z & 0xffffffff)); else if (i == 5) xi = convert((uint32_t)(vec.x8.z >> 32)); else if (i == 6) xi = convert((uint32_t)(vec.x8.w & 0xffffffff)); else xi = convert((uint32_t)(vec.x8.w >> 32)); } return xi; } // template <int vecLen> __device__ inline uint16_t get_element_from_u16_vector(struct u16_vector& vec, int i) { uint16_t xi; if (vecLen == 1) { xi = convert(vec.x1.x); } else if (vecLen == 2) { if (i == 0) xi = convert(vec.x2.x); else xi = convert(vec.x2.y); } else if (vecLen == 4) { if (i == 0) xi = convert(vec.x4.x); else if (i == 1) xi = convert(vec.x4.y); else if (i == 2) xi = convert(vec.x4.z); else xi = convert(vec.x4.w); } else if (vecLen == 8) { if (i == 0) xi = convert((uint16_t)(vec.x8.x & 0xffff)); else if (i == 1) xi = convert((uint16_t)(vec.x8.x >> 16)); else if (i == 2) xi = convert((uint16_t)(vec.x8.y & 0xffff)); else if (i == 3) xi = convert((uint16_t)(vec.x8.y >> 16)); else if (i == 4) xi = convert((uint16_t)(vec.x8.z & 0xffff)); else if (i == 5) xi = convert((uint16_t)(vec.x8.z >> 16)); else if (i == 6) xi = convert((uint16_t)(vec.x8.w & 0xffff)); else xi = convert((uint16_t)(vec.x8.w >> 16)); } return xi; } template <typename T> __device__ inline void block_scan(const T input, T& output) { switch (blockDim.x) { case 32: { typedef cub::BlockScan<T, 32> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; BlockScanT(temp_storage).InclusiveSum(input, output); } break; case 64: { typedef cub::BlockScan<T, 64> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; BlockScanT(temp_storage).InclusiveSum(input, output); } break; case 128: { typedef cub::BlockScan<T, 128> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; BlockScanT(temp_storage).InclusiveSum(input, output); } break; case 256: { typedef cub::BlockScan<T, 256> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; BlockScanT(temp_storage).InclusiveSum(input, output); } break; case 512: { typedef cub::BlockScan<T, 512> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; BlockScanT(temp_storage).InclusiveSum(input, output); } break; case 1024: { typedef cub::BlockScan<T, 1024> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; BlockScanT(temp_storage).InclusiveSum(input, output); } break; default: break; } } // template <typename T, int stateBitLen, int vecLen> __device__ inline void update_histogram(int itr, uint32_t thread_id, uint32_t num_threads, uint32_t hint, uint32_t threshold, uint32_t& num_bins, uint32_t& shift, const T* x, // [nx,] uint32_t nx, uint32_t* hist, // [num_bins] uint8_t* state, uint32_t* output, // [topk] uint32_t* output_count) { if (sizeof(T) == 4) { // 32-bit (uint32_t) // itr:0, calculate histogram with 11 bits from bit-21 to bit-31 // itr:1, calculate histogram with 11 bits from bit-10 to bit-20 // itr:2, calculate histogram with 10 bits from bit-0 to bit-9 if (itr == 0) { shift = 21; num_bins = 2048; } else if (itr == 1) { shift = 10; num_bins = 2048; } else { shift = 0; num_bins = 1024; } } else if (sizeof(T) == 2) { // 16-bit (uint16_t) // itr:0, calculate histogram with 8 bits from bit-8 to bit-15 // itr:1, calculate histogram with 8 bits from bit-0 to bit-7 if (itr == 0) { shift = 8; num_bins = 256; } else { shift = 0; num_bins = 256; } } else { return; } if (itr > 0) { for (int i = threadIdx.x; i < num_bins; i += blockDim.x) { hist[i] = 0; } __syncthreads(); } // (*) Note that 'thread_id' may be different from 'threadIdx.x', // and 'num_threads' may be different from 'blockDim.x' int ii = 0; for (int i = thread_id * vecLen; i < nx; i += num_threads * max(vecLen, stateBitLen), ii++) { uint8_t iState = 0; if ((stateBitLen == 8) && (itr > 0)) { iState = state[thread_id + (num_threads * ii)]; if (iState == (uint8_t)0xff) continue; } #pragma unroll for (int v = 0; v < max(vecLen, stateBitLen); v += vecLen) { const int iv = i + (num_threads * v); if (iv >= nx) break; struct u32_vector x_u32_vec; struct u16_vector x_u16_vec; if (sizeof(T) == 4) { load_u32_vector<vecLen>(x_u32_vec, (const uint32_t*)x, iv); } else { load_u16_vector<vecLen>(x_u16_vec, (const uint16_t*)x, iv); } #pragma unroll for (int u = 0; u < vecLen; u++) { const int ivu = iv + u; if (ivu >= nx) break; uint8_t mask = (uint8_t)0x1 << (v + u); if ((stateBitLen == 8) && (iState & mask)) continue; uint32_t xi; if (sizeof(T) == 4) { xi = get_element_from_u32_vector<vecLen>(x_u32_vec, u); } else { xi = get_element_from_u16_vector<vecLen>(x_u16_vec, u); } if ((xi > hint) && (itr == 0)) { if (stateBitLen == 8) { iState |= mask; } } else if (xi < threshold) { if (stateBitLen == 8) { // If the condition is already met, record the index. output[atomicAdd(output_count, 1)] = ivu; iState |= mask; } } else { const uint32_t k = (xi - threshold) >> shift; // 0 <= k if (k >= num_bins) { if (stateBitLen == 8) { iState |= mask; } } else if (k + 1 < num_bins) { // Update histogram atomicAdd(&(hist[k + 1]), 1); } } } } if (stateBitLen == 8) { state[thread_id + (num_threads * ii)] = iState; } } __syncthreads(); } template <unsigned blockDim_x> __device__ inline void select_best_index_for_next_threshold_core(uint32_t& my_index, uint32_t& my_csum, const unsigned num_bins, const uint32_t* const hist, const uint32_t nx_below_threshold, const uint32_t max_threshold, const uint32_t threshold, const uint32_t shift, const uint32_t topk) { typedef cub::BlockScan<uint32_t, blockDim_x> BlockScanT; __shared__ typename BlockScanT::TempStorage temp_storage; if (num_bins == 2048) { constexpr int n_data = 2048 / blockDim_x; uint32_t csum[n_data]; for (int i = 0; i < n_data; i++) { csum[i] = hist[i + (n_data * threadIdx.x)]; } BlockScanT(temp_storage).InclusiveSum(csum, csum); for (int i = n_data - 1; i >= 0; i--) { if (nx_below_threshold + csum[i] > topk) continue; const uint32_t index = i + (n_data * threadIdx.x); if (threshold + (index << shift) > max_threshold) continue; my_index = index; my_csum = csum[i]; break; } } else if (num_bins == 1024) { constexpr int n_data = 1024 / blockDim_x; uint32_t csum[n_data]; for (int i = 0; i < n_data; i++) { csum[i] = hist[i + (n_data * threadIdx.x)]; } BlockScanT(temp_storage).InclusiveSum(csum, csum); for (int i = n_data - 1; i >= 0; i--) { if (nx_below_threshold + csum[i] > topk) continue; const uint32_t index = i + (n_data * threadIdx.x); if (threshold + (index << shift) > max_threshold) continue; my_index = index; my_csum = csum[i]; break; } } } // __device__ inline void select_best_index_for_next_threshold( const uint32_t topk, const uint32_t threshold, const uint32_t max_threshold, const uint32_t nx_below_threshold, const uint32_t num_bins, const uint32_t shift, const uint32_t* const hist, // [num_bins] uint32_t* const best_index, uint32_t* const best_csum) { // Scan the histogram ('hist') and compute csum. Then, find the largest // index under the condition that the sum of the number of elements found // so far ('nx_below_threshold') and the csum value does not exceed the // topk value. uint32_t my_index = 0xffffffff; uint32_t my_csum = 0; if (num_bins <= blockDim.x) { uint32_t csum = 0; if (threadIdx.x < num_bins) { csum = hist[threadIdx.x]; } detail::block_scan(csum, csum); if (threadIdx.x < num_bins) { const uint32_t index = threadIdx.x; if ((nx_below_threshold + csum <= topk) && (threshold + (index << shift) <= max_threshold)) { my_index = index; my_csum = csum; } } } else { switch (blockDim.x) { case 64: select_best_index_for_next_threshold_core<64>(my_index, my_csum, num_bins, hist, nx_below_threshold, max_threshold, threshold, shift, topk); break; case 128: select_best_index_for_next_threshold_core<128>(my_index, my_csum, num_bins, hist, nx_below_threshold, max_threshold, threshold, shift, topk); break; case 256: select_best_index_for_next_threshold_core<256>(my_index, my_csum, num_bins, hist, nx_below_threshold, max_threshold, threshold, shift, topk); break; case 512: select_best_index_for_next_threshold_core<512>(my_index, my_csum, num_bins, hist, nx_below_threshold, max_threshold, threshold, shift, topk); break; case 1024: select_best_index_for_next_threshold_core<1024>(my_index, my_csum, num_bins, hist, nx_below_threshold, max_threshold, threshold, shift, topk); break; } } if (threadIdx.x < num_bins) { const int laneid = 31 - __clz(__ballot_sync(0xffffffff, (my_index != 0xffffffff))); if ((threadIdx.x & 0x1f) == laneid) { const uint32_t old_index = atomicMax(best_index, my_index); if (old_index < my_index) { atomicMax(best_csum, my_csum); } } } __syncthreads(); } // template <typename T, int stateBitLen, int vecLen> __device__ inline void output_index_below_threshold(const uint32_t topk, const uint32_t thread_id, const uint32_t num_threads, const uint32_t threshold, const uint32_t nx_below_threshold, const T* const x, // [nx,] const uint32_t nx, const uint8_t* state, uint32_t* const output, // [topk] uint32_t* const output_count, uint32_t* const output_count_eq) { int ii = 0; for (int i = thread_id * vecLen; i < nx; i += num_threads * max(vecLen, stateBitLen), ii++) { uint8_t iState = 0; if (stateBitLen == 8) { iState = state[thread_id + (num_threads * ii)]; if (iState == (uint8_t)0xff) continue; } #pragma unroll for (int v = 0; v < max(vecLen, stateBitLen); v += vecLen) { const int iv = i + (num_threads * v); if (iv >= nx) break; struct u32_vector u32_vec; struct u16_vector u16_vec; if (sizeof(T) == 4) { load_u32_vector<vecLen>(u32_vec, (const uint32_t*)x, iv); } else { load_u16_vector<vecLen>(u16_vec, (const uint16_t*)x, iv); } #pragma unroll for (int u = 0; u < vecLen; u++) { const int ivu = iv + u; if (ivu >= nx) break; const uint8_t mask = (uint8_t)0x1 << (v + u); if ((stateBitLen == 8) && (iState & mask)) continue; uint32_t xi; if (sizeof(T) == 4) { xi = get_element_from_u32_vector<vecLen>(u32_vec, u); } else { xi = get_element_from_u16_vector<vecLen>(u16_vec, u); } if (xi < threshold) { output[atomicAdd(output_count, 1)] = ivu; } else if (xi == threshold) { // (*) If the value is equal to the threshold, the index // processed first is recorded. Cause of non-determinism. if (nx_below_threshold + atomicAdd(output_count_eq, 1) < topk) { output[atomicAdd(output_count, 1)] = ivu; } } } } } } // template <typename T> __device__ inline void swap(T& val1, T& val2) { const T val0 = val1; val1 = val2; val2 = val0; } // template <typename K> __device__ inline bool swap_if_needed(K& key1, K& key2) { if (key1 > key2) { swap<K>(key1, key2); return true; } return false; } // template <typename K, typename V> __device__ inline bool swap_if_needed(K& key1, K& key2, V& val1, V& val2) { if (key1 > key2) { swap<K>(key1, key2); swap<V>(val1, val2); return true; } return false; } // template <typename K, typename V> __device__ inline bool swap_if_needed(K& key1, K& key2, V& val1, V& val2, bool ascending) { if (key1 == key2) { return false; } if ((key1 > key2) == ascending) { swap<K>(key1, key2); swap<V>(val1, val2); return true; } return false; } // template <typename T> __device__ inline T max_value_of(); template <> __device__ inline float max_value_of<float>() { return FLT_MAX; } template <> __device__ inline uint32_t max_value_of<uint32_t>() { return ~0u; } template <int stateBitLen, unsigned BLOCK_SIZE = 0> __device__ __host__ inline uint32_t get_state_size(uint32_t len_x) { #ifdef __CUDA_ARCH__ const uint32_t num_threads = blockDim.x; #else const uint32_t num_threads = BLOCK_SIZE; #endif if (stateBitLen == 8) { uint32_t numElements_perThread = (len_x + num_threads - 1) / num_threads; uint32_t numState_perThread = (numElements_perThread + stateBitLen - 1) / stateBitLen; return numState_perThread * num_threads; } return 0; } // template <int stateBitLen, int vecLen, int maxTopk, int numSortThreads, class ValT> __device__ inline void topk_cta_11_core(uint32_t topk, uint32_t len_x, const uint32_t* _x, // [size_batch, ld_x,] const ValT* _in_vals, // [size_batch, ld_iv,] uint32_t* _y, // [size_batch, ld_y,] ValT* _out_vals, // [size_batch, ld_ov,] uint8_t* _state, // [size_batch, ...,] uint32_t* _hint, bool sort, uint32_t* _smem) { uint32_t* const smem_out_vals = _smem; uint32_t* const hist = &(_smem[2 * maxTopk]); uint32_t* const best_index = &(_smem[2 * maxTopk + 2048]); uint32_t* const best_csum = &(_smem[2 * maxTopk + 2048 + 3]); const uint32_t num_threads = blockDim.x; const uint32_t thread_id = threadIdx.x; uint32_t nx = len_x; const uint32_t* const x = _x; const ValT* in_vals = NULL; if (_in_vals) { in_vals = _in_vals; } uint32_t* y = NULL; if (_y) { y = _y; } ValT* out_vals = NULL; if (_out_vals) { out_vals = _out_vals; } uint8_t* state = _state; const uint32_t hint = (_hint == NULL ? ~0u : *_hint); // Initialize shared memory for (int i = 2 * maxTopk + thread_id; i < 2 * maxTopk + 2048 + 8; i += num_threads) { _smem[i] = 0; } uint32_t* const output_count = &(_smem[2 * maxTopk + 2048 + 6]); uint32_t* const output_count_eq = &(_smem[2 * maxTopk + 2048 + 7]); uint32_t threshold = 0; uint32_t nx_below_threshold = 0; __syncthreads(); // // Search for the maximum threshold that satisfies "(x < threshold).sum() <= topk". // #pragma unroll for (int j = 0; j < 3; j += 1) { uint32_t num_bins; uint32_t shift; update_histogram<uint32_t, stateBitLen, vecLen>(j, thread_id, num_threads, hint, threshold, num_bins, shift, x, nx, hist, state, smem_out_vals, output_count); select_best_index_for_next_threshold(topk, threshold, hint, nx_below_threshold, num_bins, shift, hist, best_index + j, best_csum + j); threshold += (best_index[j] << shift); nx_below_threshold += best_csum[j]; if (nx_below_threshold == topk) break; } if ((_hint != NULL) && (thread_id == 0)) { *_hint = min(threshold, hint); } // // Output index that satisfies "x[i] < threshold". // output_index_below_threshold<uint32_t, stateBitLen, vecLen>(topk, thread_id, num_threads, threshold, nx_below_threshold, x, nx, state, smem_out_vals, output_count, output_count_eq); __syncthreads(); #ifdef CUANN_DEBUG if (thread_id == 0 && output_count[0] < topk) { RAFT_LOG_DEBUG( "# i_batch:%d, topk:%d, output_count:%d, nx_below_threshold:%d, threshold:%08x\n", i_batch, topk, output_count[0], nx_below_threshold, threshold); } #endif if (!sort) { for (int k = thread_id; k < topk; k += blockDim.x) { const uint32_t i = smem_out_vals[k]; if (y) { y[k] = x[i]; } if (out_vals) { if (in_vals) { out_vals[k] = in_vals[i]; } else { out_vals[k] = i; } } } return; } constexpr int numTopkPerThread = maxTopk / numSortThreads; float my_keys[numTopkPerThread]; ValT my_vals[numTopkPerThread]; // Read keys and values to registers if (thread_id < numSortThreads) { for (int i = 0; i < numTopkPerThread; i++) { const int k = thread_id + (numSortThreads * i); if (k < topk) { const int j = smem_out_vals[k]; my_keys[i] = ((float*)x)[j]; if (in_vals) { my_vals[i] = in_vals[j]; } else { my_vals[i] = j; } } else { my_keys[i] = FLT_MAX; my_vals[i] = ~static_cast<ValT>(0); } } } uint32_t mask = 1; // Sorting by thread if (thread_id < numSortThreads) { const bool ascending = ((thread_id & mask) == 0); if (numTopkPerThread == 3) { swap_if_needed<float, ValT>(my_keys[0], my_keys[1], my_vals[0], my_vals[1], ascending); swap_if_needed<float, ValT>(my_keys[0], my_keys[2], my_vals[0], my_vals[2], ascending); swap_if_needed<float, ValT>(my_keys[1], my_keys[2], my_vals[1], my_vals[2], ascending); } else { for (int j = 0; j < numTopkPerThread / 2; j += 1) { #pragma unroll for (int i = 0; i < numTopkPerThread; i += 2) { swap_if_needed<float, ValT>( my_keys[i], my_keys[i + 1], my_vals[i], my_vals[i + 1], ascending); } #pragma unroll for (int i = 1; i < numTopkPerThread - 1; i += 2) { swap_if_needed<float, ValT>( my_keys[i], my_keys[i + 1], my_vals[i], my_vals[i + 1], ascending); } } } } // Bitonic Sorting while (mask < numSortThreads) { uint32_t next_mask = mask << 1; for (uint32_t curr_mask = mask; curr_mask > 0; curr_mask >>= 1) { const bool ascending = ((thread_id & curr_mask) == 0) == ((thread_id & next_mask) == 0); if (curr_mask >= 32) { // inter warp ValT* const smem_vals = reinterpret_cast<ValT*>(_smem); // [maxTopk] float* const smem_keys = reinterpret_cast<float*>(smem_vals + maxTopk); // [numTopkPerThread, numSortThreads] __syncthreads(); if (thread_id < numSortThreads) { #pragma unroll for (int i = 0; i < numTopkPerThread; i++) { smem_keys[thread_id + (numSortThreads * i)] = my_keys[i]; smem_vals[thread_id + (numSortThreads * i)] = my_vals[i]; } } __syncthreads(); if (thread_id < numSortThreads) { #pragma unroll for (int i = 0; i < numTopkPerThread; i++) { float opp_key = smem_keys[(thread_id ^ curr_mask) + (numSortThreads * i)]; ValT opp_val = smem_vals[(thread_id ^ curr_mask) + (numSortThreads * i)]; swap_if_needed<float, ValT>(my_keys[i], opp_key, my_vals[i], opp_val, ascending); } } } else { // intra warp if (thread_id < numSortThreads) { #pragma unroll for (int i = 0; i < numTopkPerThread; i++) { float opp_key = __shfl_xor_sync(0xffffffff, my_keys[i], curr_mask); ValT opp_val = __shfl_xor_sync(0xffffffff, my_vals[i], curr_mask); swap_if_needed<float, ValT>(my_keys[i], opp_key, my_vals[i], opp_val, ascending); } } } } if (thread_id < numSortThreads) { const bool ascending = ((thread_id & next_mask) == 0); if (numTopkPerThread == 3) { swap_if_needed<float, ValT>(my_keys[0], my_keys[1], my_vals[0], my_vals[1], ascending); swap_if_needed<float, ValT>(my_keys[0], my_keys[2], my_vals[0], my_vals[2], ascending); swap_if_needed<float, ValT>(my_keys[1], my_keys[2], my_vals[1], my_vals[2], ascending); } else { #pragma unroll for (uint32_t curr_mask = numTopkPerThread / 2; curr_mask > 0; curr_mask >>= 1) { #pragma unroll for (int i = 0; i < numTopkPerThread; i++) { const int j = i ^ curr_mask; if (i > j) continue; swap_if_needed<float, ValT>(my_keys[i], my_keys[j], my_vals[i], my_vals[j], ascending); } } } } mask = next_mask; } // Write sorted keys and values if (thread_id < numSortThreads) { for (int i = 0; i < numTopkPerThread; i++) { const int k = i + (numTopkPerThread * thread_id); if (k < topk) { if (y) { y[k] = reinterpret_cast<uint32_t*>(my_keys)[i]; } if (out_vals) { out_vals[k] = my_vals[i]; } } } } } namespace { // constexpr std::uint32_t NUM_THREADS = 1024; // DO NOT CHANGE constexpr std::uint32_t STATE_BIT_LENGTH = 8; // 0: state not used, 8: state used constexpr std::uint32_t MAX_VEC_LENGTH = 4; // 1, 2, 4 or 8 // // int _get_vecLen(uint32_t maxSamples, int maxVecLen = MAX_VEC_LENGTH) { int vecLen = min(maxVecLen, (int)MAX_VEC_LENGTH); while ((maxSamples % vecLen) != 0) { vecLen /= 2; } return vecLen; } } // unnamed namespace template <int stateBitLen, int vecLen, int maxTopk, int numSortThreads, class ValT> __launch_bounds__(1024, 1) RAFT_KERNEL kern_topk_cta_11(uint32_t topk, uint32_t size_batch, uint32_t len_x, const uint32_t* _x, // [size_batch, ld_x,] uint32_t ld_x, const ValT* _in_vals, // [size_batch, ld_iv,] uint32_t ld_iv, uint32_t* _y, // [size_batch, ld_y,] uint32_t ld_y, ValT* _out_vals, // [size_batch, ld_ov,] uint32_t ld_ov, uint8_t* _state, // [size_batch, ...,] uint32_t* _hints, // [size_batch,] bool sort) { const uint32_t i_batch = blockIdx.x; if (i_batch >= size_batch) return; constexpr uint32_t smem_len = 2 * maxTopk + 2048 + 8; static_assert(maxTopk * (1 + utils::size_of<ValT>() / utils::size_of<uint32_t>()) <= smem_len, "maxTopk * sizeof(ValT) must be smaller or equal to 8192 byte"); __shared__ uint32_t _smem[smem_len]; topk_cta_11_core<stateBitLen, vecLen, maxTopk, numSortThreads, ValT>( topk, len_x, (_x == NULL ? NULL : _x + i_batch * ld_x), (_in_vals == NULL ? NULL : _in_vals + i_batch * ld_iv), (_y == NULL ? NULL : _y + i_batch * ld_y), (_out_vals == NULL ? NULL : _out_vals + i_batch * ld_ov), (_state == NULL ? NULL : _state + i_batch * get_state_size<stateBitLen>(len_x)), (_hints == NULL ? NULL : _hints + i_batch), sort, _smem); } // size_t inline _cuann_find_topk_bufferSize(uint32_t topK, uint32_t sizeBatch, uint32_t numElements, cudaDataType_t sampleDtype) { constexpr int numThreads = NUM_THREADS; constexpr int stateBitLen = STATE_BIT_LENGTH; assert(stateBitLen == 0 || stateBitLen == 8); size_t workspaceSize = 1; // state if (stateBitLen == 8) { workspaceSize = _cuann_aligned( sizeof(uint8_t) * get_state_size<stateBitLen, numThreads>(numElements) * sizeBatch); } return workspaceSize; } template <class ValT> inline void _cuann_find_topk(uint32_t topK, uint32_t sizeBatch, uint32_t numElements, const float* inputKeys, // [sizeBatch, ldIK,] uint32_t ldIK, // (*) ldIK >= numElements const ValT* inputVals, // [sizeBatch, ldIV,] uint32_t ldIV, // (*) ldIV >= numElements float* outputKeys, // [sizeBatch, ldOK,] uint32_t ldOK, // (*) ldOK >= topK ValT* outputVals, // [sizeBatch, ldOV,] uint32_t ldOV, // (*) ldOV >= topK void* workspace, bool sort, uint32_t* hints, cudaStream_t stream) { assert(ldIK >= numElements); assert(ldIV >= numElements); assert(ldOK >= topK); assert(ldOV >= topK); constexpr int numThreads = NUM_THREADS; constexpr int stateBitLen = STATE_BIT_LENGTH; assert(stateBitLen == 0 || stateBitLen == 8); uint8_t* state = NULL; if (stateBitLen == 8) { state = (uint8_t*)workspace; } dim3 threads(numThreads, 1, 1); dim3 blocks(sizeBatch, 1, 1); void (*cta_kernel)(uint32_t, uint32_t, uint32_t, const uint32_t*, uint32_t, const ValT*, uint32_t, uint32_t*, uint32_t, ValT*, uint32_t, uint8_t*, uint32_t*, bool) = nullptr; // V:vecLen, K:maxTopk, T:numSortThreads #define SET_KERNEL_VKT(V, K, T, ValT) \ do { \ assert(numThreads >= T); \ assert((K % T) == 0); \ assert((K / T) <= 4); \ cta_kernel = kern_topk_cta_11<stateBitLen, V, K, T, ValT>; \ } while (0) // V: vecLen #define SET_KERNEL_V(V, ValT) \ do { \ if (topK <= 32) { \ SET_KERNEL_VKT(V, 32, 32, ValT); \ } else if (topK <= 64) { \ SET_KERNEL_VKT(V, 64, 32, ValT); \ } else if (topK <= 96) { \ SET_KERNEL_VKT(V, 96, 32, ValT); \ } else if (topK <= 128) { \ SET_KERNEL_VKT(V, 128, 32, ValT); \ } else if (topK <= 192) { \ SET_KERNEL_VKT(V, 192, 64, ValT); \ } else if (topK <= 256) { \ SET_KERNEL_VKT(V, 256, 64, ValT); \ } else if (topK <= 384) { \ SET_KERNEL_VKT(V, 384, 128, ValT); \ } else if (topK <= 512) { \ SET_KERNEL_VKT(V, 512, 128, ValT); \ } else if (topK <= 768) { \ SET_KERNEL_VKT(V, 768, 256, ValT); \ } else if (topK <= 1024) { \ SET_KERNEL_VKT(V, 1024, 256, ValT); \ } \ /* else if (topK <= 1536) { SET_KERNEL_VKT(V, 1536, 512); } */ \ /* else if (topK <= 2048) { SET_KERNEL_VKT(V, 2048, 512); } */ \ /* else if (topK <= 3072) { SET_KERNEL_VKT(V, 3072, 1024); } */ \ /* else if (topK <= 4096) { SET_KERNEL_VKT(V, 4096, 1024); } */ \ else { \ RAFT_FAIL("topk must be lower than or equal to 1024"); \ } \ } while (0) int _vecLen = _get_vecLen(ldIK, 2); if (_vecLen == 2) { SET_KERNEL_V(2, ValT); } else if (_vecLen == 1) { SET_KERNEL_V(1, ValT); } cta_kernel<<<blocks, threads, 0, stream>>>(topK, sizeBatch, numElements, (const uint32_t*)inputKeys, ldIK, inputVals, ldIV, (uint32_t*)outputKeys, ldOK, outputVals, ldOV, state, hints, sort); return; } } // namespace raft::neighbors::cagra::detail
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/faiss_select/Select.cuh
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file thirdparty/LICENSES/LICENSE.faiss */ #pragma once #include <raft/neighbors/detail/faiss_select/Comparators.cuh> #include <raft/neighbors/detail/faiss_select/MergeNetworkBlock.cuh> #include <raft/neighbors/detail/faiss_select/MergeNetworkWarp.cuh> #include <raft/core/kvp.hpp> #include <raft/util/cuda_utils.cuh> namespace raft::neighbors::detail::faiss_select { // Specialization for block-wide monotonic merges producing a merge sort // since what we really want is a constexpr loop expansion template <int NumWarps, int NumThreads, typename K, typename V, int NumWarpQ, bool Dir, typename Comp> struct FinalBlockMerge {}; template <int NumThreads, typename K, typename V, int NumWarpQ, bool Dir, typename Comp> struct FinalBlockMerge<1, NumThreads, K, V, NumWarpQ, Dir, Comp> { static inline __device__ void merge(K* sharedK, V* sharedV) { // no merge required; single warp } }; template <int NumThreads, typename K, typename V, int NumWarpQ, bool Dir, typename Comp> struct FinalBlockMerge<2, NumThreads, K, V, NumWarpQ, Dir, Comp> { static inline __device__ void merge(K* sharedK, V* sharedV) { // Final merge doesn't need to fully merge the second list blockMerge<NumThreads, K, V, NumThreads / (WarpSize * 2), NumWarpQ, !Dir, Comp, false>(sharedK, sharedV); } }; template <int NumThreads, typename K, typename V, int NumWarpQ, bool Dir, typename Comp> struct FinalBlockMerge<4, NumThreads, K, V, NumWarpQ, Dir, Comp> { static inline __device__ void merge(K* sharedK, V* sharedV) { blockMerge<NumThreads, K, V, NumThreads / (WarpSize * 2), NumWarpQ, !Dir, Comp>(sharedK, sharedV); // Final merge doesn't need to fully merge the second list blockMerge<NumThreads, K, V, NumThreads / (WarpSize * 4), NumWarpQ * 2, !Dir, Comp, false>( sharedK, sharedV); } }; template <int NumThreads, typename K, typename V, int NumWarpQ, bool Dir, typename Comp> struct FinalBlockMerge<8, NumThreads, K, V, NumWarpQ, Dir, Comp> { static inline __device__ void merge(K* sharedK, V* sharedV) { blockMerge<NumThreads, K, V, NumThreads / (WarpSize * 2), NumWarpQ, !Dir, Comp>(sharedK, sharedV); blockMerge<NumThreads, K, V, NumThreads / (WarpSize * 4), NumWarpQ * 2, !Dir, Comp>(sharedK, sharedV); // Final merge doesn't need to fully merge the second list blockMerge<NumThreads, K, V, NumThreads / (WarpSize * 8), NumWarpQ * 4, !Dir, Comp, false>( sharedK, sharedV); } }; // `Dir` true, produce largest values. // `Dir` false, produce smallest values. template <typename K, typename V, bool Dir, typename Comp, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock> struct BlockSelect { static constexpr int kNumWarps = ThreadsPerBlock / WarpSize; static constexpr int kTotalWarpSortSize = NumWarpQ; __device__ inline BlockSelect(K initKVal, V initVVal, K* smemK, V* smemV, int k) : initK(initKVal), initV(initVVal), numVals(0), warpKTop(initKVal), sharedK(smemK), sharedV(smemV), kMinus1(k - 1) { static_assert(utils::isPowerOf2(ThreadsPerBlock), "threads must be a power-of-2"); static_assert(utils::isPowerOf2(NumWarpQ), "warp queue must be power-of-2"); // Fill the per-thread queue keys with the default value #pragma unroll for (int i = 0; i < NumThreadQ; ++i) { threadK[i] = initK; threadV[i] = initV; } int laneId = raft::laneId(); int warpId = threadIdx.x / WarpSize; warpK = sharedK + warpId * kTotalWarpSortSize; warpV = sharedV + warpId * kTotalWarpSortSize; // Fill warp queue (only the actual queue space is fine, not where // we write the per-thread queues for merging) for (int i = laneId; i < NumWarpQ; i += WarpSize) { warpK[i] = initK; warpV[i] = initV; } warpFence(); } __device__ inline void addThreadQ(K k, V v) { if (Dir ? Comp::gt(k, warpKTop) : Comp::lt(k, warpKTop)) { // Rotate right #pragma unroll for (int i = NumThreadQ - 1; i > 0; --i) { threadK[i] = threadK[i - 1]; threadV[i] = threadV[i - 1]; } threadK[0] = k; threadV[0] = v; ++numVals; } } __device__ inline void checkThreadQ() { bool needSort = (numVals == NumThreadQ); #if CUDA_VERSION >= 9000 needSort = __any_sync(0xffffffff, needSort); #else needSort = __any(needSort); #endif if (!needSort) { // no lanes have triggered a sort return; } // This has a trailing warpFence mergeWarpQ(); // Any top-k elements have been merged into the warp queue; we're // free to reset the thread queues numVals = 0; #pragma unroll for (int i = 0; i < NumThreadQ; ++i) { threadK[i] = initK; threadV[i] = initV; } // We have to beat at least this element warpKTop = warpK[kMinus1]; warpFence(); } /// This function handles sorting and merging together the /// per-thread queues with the warp-wide queue, creating a sorted /// list across both __device__ inline void mergeWarpQ() { int laneId = raft::laneId(); // Sort all of the per-thread queues warpSortAnyRegisters<K, V, NumThreadQ, !Dir, Comp>(threadK, threadV); constexpr int kNumWarpQRegisters = NumWarpQ / WarpSize; K warpKRegisters[kNumWarpQRegisters]; V warpVRegisters[kNumWarpQRegisters]; #pragma unroll for (int i = 0; i < kNumWarpQRegisters; ++i) { warpKRegisters[i] = warpK[i * WarpSize + laneId]; warpVRegisters[i] = warpV[i * WarpSize + laneId]; } warpFence(); // The warp queue is already sorted, and now that we've sorted the // per-thread queue, merge both sorted lists together, producing // one sorted list warpMergeAnyRegisters<K, V, kNumWarpQRegisters, NumThreadQ, !Dir, Comp, false>( warpKRegisters, warpVRegisters, threadK, threadV); // Write back out the warp queue #pragma unroll for (int i = 0; i < kNumWarpQRegisters; ++i) { warpK[i * WarpSize + laneId] = warpKRegisters[i]; warpV[i * WarpSize + laneId] = warpVRegisters[i]; } warpFence(); } /// WARNING: all threads in a warp must participate in this. /// Otherwise, you must call the constituent parts separately. __device__ inline void add(K k, V v) { addThreadQ(k, v); checkThreadQ(); } __device__ inline void reduce() { // Have all warps dump and merge their queues; this will produce // the final per-warp results mergeWarpQ(); // block-wide dep; thus far, all warps have been completely // independent __syncthreads(); // All warp queues are contiguous in smem. // Now, we have kNumWarps lists of NumWarpQ elements. // This is a power of 2. FinalBlockMerge<kNumWarps, ThreadsPerBlock, K, V, NumWarpQ, Dir, Comp>::merge(sharedK, sharedV); // The block-wide merge has a trailing syncthreads } // Default element key const K initK; // Default element value const V initV; // Number of valid elements in our thread queue int numVals; // The k-th highest (Dir) or lowest (!Dir) element K warpKTop; // Thread queue values K threadK[NumThreadQ]; V threadV[NumThreadQ]; // Queues for all warps K* sharedK; V* sharedV; // Our warp's queue (points into sharedK/sharedV) // warpK[0] is highest (Dir) or lowest (!Dir) K* warpK; V* warpV; // This is a cached k-1 value int kMinus1; }; /// Specialization for k == 1 (NumWarpQ == 1) template <typename K, typename V, bool Dir, typename Comp, int NumThreadQ, int ThreadsPerBlock> struct BlockSelect<K, V, Dir, Comp, 1, NumThreadQ, ThreadsPerBlock> { static constexpr int kNumWarps = ThreadsPerBlock / WarpSize; __device__ inline BlockSelect(K initK, V initV, K* smemK, V* smemV, int k) : threadK(initK), threadV(initV), sharedK(smemK), sharedV(smemV) { } __device__ inline void addThreadQ(K k, V v) { bool swap = Dir ? Comp::gt(k, threadK) : Comp::lt(k, threadK); threadK = swap ? k : threadK; threadV = swap ? v : threadV; } __device__ inline void checkThreadQ() { // We don't need to do anything here, since the warp doesn't // cooperate until the end } __device__ inline void add(K k, V v) { addThreadQ(k, v); } __device__ inline void reduce() { // Reduce within the warp KeyValuePair<K, V> pair(threadK, threadV); if (Dir) { pair = warpReduce(pair, max_op{}); } else { pair = warpReduce(pair, min_op{}); } // Each warp writes out a single value int laneId = raft::laneId(); int warpId = threadIdx.x / WarpSize; if (laneId == 0) { sharedK[warpId] = pair.key; sharedV[warpId] = pair.value; } __syncthreads(); // We typically use this for small blocks (<= 128), just having the // first thread in the block perform the reduction across warps is // faster if (threadIdx.x == 0) { threadK = sharedK[0]; threadV = sharedV[0]; #pragma unroll for (int i = 1; i < kNumWarps; ++i) { K k = sharedK[i]; V v = sharedV[i]; bool swap = Dir ? Comp::gt(k, threadK) : Comp::lt(k, threadK); threadK = swap ? k : threadK; threadV = swap ? v : threadV; } // Hopefully a thread's smem reads/writes are ordered wrt // itself, so no barrier needed :) sharedK[0] = threadK; sharedV[0] = threadV; } // In case other threads wish to read this value __syncthreads(); } // threadK is lowest (Dir) or highest (!Dir) K threadK; V threadV; // Where we reduce in smem K* sharedK; V* sharedV; }; // // per-warp WarpSelect // // `Dir` true, produce largest values. // `Dir` false, produce smallest values. template <typename K, typename V, bool Dir, typename Comp, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock> struct WarpSelect { static constexpr int kNumWarpQRegisters = NumWarpQ / WarpSize; __device__ inline WarpSelect(K initKVal, V initVVal, int k) : initK(initKVal), initV(initVVal), numVals(0), warpKTop(initKVal), kLane((k - 1) % WarpSize) { static_assert(utils::isPowerOf2(ThreadsPerBlock), "threads must be a power-of-2"); static_assert(utils::isPowerOf2(NumWarpQ), "warp queue must be power-of-2"); // Fill the per-thread queue keys with the default value #pragma unroll for (int i = 0; i < NumThreadQ; ++i) { threadK[i] = initK; threadV[i] = initV; } // Fill the warp queue with the default value #pragma unroll for (int i = 0; i < kNumWarpQRegisters; ++i) { warpK[i] = initK; warpV[i] = initV; } } __device__ inline void addThreadQ(K k, V v) { if (Dir ? Comp::gt(k, warpKTop) : Comp::lt(k, warpKTop)) { // Rotate right #pragma unroll for (int i = NumThreadQ - 1; i > 0; --i) { threadK[i] = threadK[i - 1]; threadV[i] = threadV[i - 1]; } threadK[0] = k; threadV[0] = v; ++numVals; } } __device__ inline void checkThreadQ() { bool needSort = (numVals == NumThreadQ); #if CUDA_VERSION >= 9000 needSort = __any_sync(0xffffffff, needSort); #else needSort = __any(needSort); #endif if (!needSort) { // no lanes have triggered a sort return; } mergeWarpQ(); // Any top-k elements have been merged into the warp queue; we're // free to reset the thread queues numVals = 0; #pragma unroll for (int i = 0; i < NumThreadQ; ++i) { threadK[i] = initK; threadV[i] = initV; } // We have to beat at least this element warpKTop = shfl(warpK[kNumWarpQRegisters - 1], kLane); } /// This function handles sorting and merging together the /// per-thread queues with the warp-wide queue, creating a sorted /// list across both __device__ inline void mergeWarpQ() { // Sort all of the per-thread queues warpSortAnyRegisters<K, V, NumThreadQ, !Dir, Comp>(threadK, threadV); // The warp queue is already sorted, and now that we've sorted the // per-thread queue, merge both sorted lists together, producing // one sorted list warpMergeAnyRegisters<K, V, kNumWarpQRegisters, NumThreadQ, !Dir, Comp, false>( warpK, warpV, threadK, threadV); } /// WARNING: all threads in a warp must participate in this. /// Otherwise, you must call the constituent parts separately. __device__ inline void add(K k, V v) { addThreadQ(k, v); checkThreadQ(); } __device__ inline void reduce() { // Have all warps dump and merge their queues; this will produce // the final per-warp results mergeWarpQ(); } /// Dump final k selected values for this warp out __device__ inline void writeOut(K* outK, V* outV, int k) { int laneId = raft::laneId(); #pragma unroll for (int i = 0; i < kNumWarpQRegisters; ++i) { int idx = i * WarpSize + laneId; if (idx < k) { outK[idx] = warpK[i]; outV[idx] = warpV[i]; } } } // Default element key const K initK; // Default element value const V initV; // Number of valid elements in our thread queue int numVals; // The k-th highest (Dir) or lowest (!Dir) element K warpKTop; // Thread queue values K threadK[NumThreadQ]; V threadV[NumThreadQ]; // warpK[0] is highest (Dir) or lowest (!Dir) K warpK[kNumWarpQRegisters]; V warpV[kNumWarpQRegisters]; // This is what lane we should load an approximation (>=k) to the // kth element from the last register in the warp queue (i.e., // warpK[kNumWarpQRegisters - 1]). int kLane; }; /// Specialization for k == 1 (NumWarpQ == 1) template <typename K, typename V, bool Dir, typename Comp, int NumThreadQ, int ThreadsPerBlock> struct WarpSelect<K, V, Dir, Comp, 1, NumThreadQ, ThreadsPerBlock> { static constexpr int kNumWarps = ThreadsPerBlock / WarpSize; __device__ inline WarpSelect(K initK, V initV, int k) : threadK(initK), threadV(initV) {} __device__ inline void addThreadQ(K k, V v) { bool swap = Dir ? Comp::gt(k, threadK) : Comp::lt(k, threadK); threadK = swap ? k : threadK; threadV = swap ? v : threadV; } __device__ inline void checkThreadQ() { // We don't need to do anything here, since the warp doesn't // cooperate until the end } __device__ inline void add(K k, V v) { addThreadQ(k, v); } __device__ inline void reduce() { // Reduce within the warp KeyValuePair<K, V> pair(threadK, threadV); if (Dir) { pair = warpReduce(pair, max_op{}); } else { pair = warpReduce(pair, min_op{}); } threadK = pair.key; threadV = pair.value; } /// Dump final k selected values for this warp out __device__ inline void writeOut(K* outK, V* outV, int k) { if (raft::laneId() == 0) { *outK = threadK; *outV = threadV; } } // threadK is lowest (Dir) or highest (!Dir) K threadK; V threadV; }; } // namespace raft::neighbors::detail::faiss_select
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/faiss_select/StaticUtils.h
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file thirdparty/LICENSES/LICENSE.faiss */ #pragma once #include <cuda.h> // allow usage for non-CUDA files #ifndef __host__ #define __host__ #define __device__ #endif namespace raft::neighbors::detail::faiss_select::utils { template <typename T> constexpr __host__ __device__ bool isPowerOf2(T v) { return (v && !(v & (v - 1))); } static_assert(isPowerOf2(2048), "isPowerOf2"); static_assert(!isPowerOf2(3333), "isPowerOf2"); template <typename T> constexpr __host__ __device__ T nextHighestPowerOf2(T v) { return (isPowerOf2(v) ? (T)2 * v : ((T)1 << (log2(v) + 1))); } static_assert(nextHighestPowerOf2(1) == 2, "nextHighestPowerOf2"); static_assert(nextHighestPowerOf2(2) == 4, "nextHighestPowerOf2"); static_assert(nextHighestPowerOf2(3) == 4, "nextHighestPowerOf2"); static_assert(nextHighestPowerOf2(4) == 8, "nextHighestPowerOf2"); static_assert(nextHighestPowerOf2(15) == 16, "nextHighestPowerOf2"); static_assert(nextHighestPowerOf2(16) == 32, "nextHighestPowerOf2"); static_assert(nextHighestPowerOf2(17) == 32, "nextHighestPowerOf2"); static_assert(nextHighestPowerOf2(1536000000u) == 2147483648u, "nextHighestPowerOf2"); static_assert(nextHighestPowerOf2((size_t)2147483648ULL) == (size_t)4294967296ULL, "nextHighestPowerOf2"); } // namespace raft::neighbors::detail::faiss_select::utils
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/faiss_select/DistanceUtils.h
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file thirdparty/LICENSES/LICENSE.faiss */ #pragma once namespace raft::neighbors::detail::faiss_select { // If the inner size (dim) of the vectors is small, we want a larger query tile // size, like 1024 inline void chooseTileSize(size_t numQueries, size_t numCentroids, size_t dim, size_t elementSize, size_t totalMem, size_t& tileRows, size_t& tileCols) { // The matrix multiplication should be large enough to be efficient, but if // it is too large, we seem to lose efficiency as opposed to // double-streaming. Each tile size here defines 1/2 of the memory use due // to double streaming. We ignore available temporary memory, as that is // adjusted independently by the user and can thus meet these requirements // (or not). For <= 4 GB GPUs, prefer 512 MB of usage. For <= 8 GB GPUs, // prefer 768 MB of usage. Otherwise, prefer 1 GB of usage. size_t targetUsage = 0; if (totalMem <= ((size_t)4) * 1024 * 1024 * 1024) { targetUsage = 512 * 1024 * 1024; } else if (totalMem <= ((size_t)8) * 1024 * 1024 * 1024) { targetUsage = 768 * 1024 * 1024; } else { targetUsage = 1024 * 1024 * 1024; } targetUsage /= 2 * elementSize; // 512 seems to be a batch size sweetspot for float32. // If we are on float16, increase to 512. // If the k size (vec dim) of the matrix multiplication is small (<= 32), // increase to 1024. size_t preferredTileRows = 512; if (dim <= 32) { preferredTileRows = 1024; } tileRows = std::min(preferredTileRows, numQueries); // tileCols is the remainder size tileCols = std::min(targetUsage / preferredTileRows, numCentroids); } } // namespace raft::neighbors::detail::faiss_select
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/faiss_select/MergeNetworkUtils.cuh
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file thirdparty/LICENSES/LICENSE.faiss */ #pragma once namespace raft::neighbors::detail::faiss_select { template <typename T> inline __device__ void swap(bool swap, T& x, T& y) { T tmp = x; x = swap ? y : x; y = swap ? tmp : y; } template <typename T> inline __device__ void assign(bool assign, T& x, T y) { x = assign ? y : x; } } // namespace raft::neighbors::detail::faiss_select
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/faiss_select/MergeNetworkWarp.cuh
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file thirdparty/LICENSES/LICENSE.faiss */ #pragma once #include <raft/neighbors/detail/faiss_select/MergeNetworkUtils.cuh> #include <raft/neighbors/detail/faiss_select/StaticUtils.h> #include <raft/util/cuda_utils.cuh> namespace raft::neighbors::detail::faiss_select { // // This file contains functions to: // // -perform bitonic merges on pairs of sorted lists, held in // registers. Each list contains N * WarpSize (multiple of 32) // elements for some N. // The bitonic merge is implemented for arbitrary sizes; // sorted list A of size N1 * WarpSize registers // sorted list B of size N2 * WarpSize registers => // sorted list C if size (N1 + N2) * WarpSize registers. N1 and N2 // are >= 1 and don't have to be powers of 2. // // -perform bitonic sorts on a set of N * WarpSize key/value pairs // held in registers, by using the above bitonic merge as a // primitive. // N can be an arbitrary N >= 1; i.e., the bitonic sort here supports // odd sizes and doesn't require the input to be a power of 2. // // The sort or merge network is completely statically instantiated via // template specialization / expansion and constexpr, and it uses warp // shuffles to exchange values between warp lanes. // // A note about comparisons: // // For a sorting network of keys only, we only need one // comparison (a < b). However, what we really need to know is // if one lane chooses to exchange a value, then the // corresponding lane should also do the exchange. // Thus, if one just uses the negation !(x < y) in the higher // lane, this will also include the case where (x == y). Thus, one // lane in fact performs an exchange and the other doesn't, but // because the only value being exchanged is equivalent, nothing has // changed. // So, you can get away with just one comparison and its negation. // // If we're sorting keys and values, where equivalent keys can // exist, then this is a problem, since we want to treat (x, v1) // as not equivalent to (x, v2). // // To remedy this, you can either compare with a lexicographic // ordering (a.k < b.k || (a.k == b.k && a.v < b.v)), which since // we're predicating all of the choices results in 3 comparisons // being executed, or we can invert the selection so that there is no // middle choice of equality; the other lane will likewise // check that (b.k > a.k) (the higher lane has the values // swapped). Then, the first lane swaps if and only if the // second lane swaps; if both lanes have equivalent keys, no // swap will be performed. This results in only two comparisons // being executed. // // If you don't consider values as well, then this does not produce a // consistent ordering among (k, v) pairs with equivalent keys but // different values; for us, we don't really care about ordering or // stability here. // // I have tried both re-arranging the order in the higher lane to get // away with one comparison or adding the value to the check; both // result in greater register consumption or lower speed than just // performing both < and > comparisons with the variables, so I just // stick with this. // This function merges WarpSize / 2L lists in parallel using warp // shuffles. // It works on at most size-16 lists, as we need 32 threads for this // shuffle merge. // // If IsBitonic is false, the first stage is reversed, so we don't // need to sort directionally. It's still technically a bitonic sort. template <typename K, typename V, int L, bool Dir, typename Comp, bool IsBitonic> inline __device__ void warpBitonicMergeLE16(K& k, V& v) { static_assert(utils::isPowerOf2(L), "L must be a power-of-2"); static_assert(L <= WarpSize / 2, "merge list size must be <= 16"); int laneId = raft::laneId(); if (!IsBitonic) { // Reverse the first comparison stage. // For example, merging a list of size 8 has the exchanges: // 0 <-> 15, 1 <-> 14, ... K otherK = shfl_xor(k, 2 * L - 1); V otherV = shfl_xor(v, 2 * L - 1); // Whether we are the lesser thread in the exchange bool small = !(laneId & L); if (Dir) { // See the comment above how performing both of these // comparisons in the warp seems to win out over the // alternatives in practice bool s = small ? Comp::gt(k, otherK) : Comp::lt(k, otherK); assign(s, k, otherK); assign(s, v, otherV); } else { bool s = small ? Comp::lt(k, otherK) : Comp::gt(k, otherK); assign(s, k, otherK); assign(s, v, otherV); } } #pragma unroll for (int stride = IsBitonic ? L : L / 2; stride > 0; stride /= 2) { K otherK = shfl_xor(k, stride); V otherV = shfl_xor(v, stride); // Whether we are the lesser thread in the exchange bool small = !(laneId & stride); if (Dir) { bool s = small ? Comp::gt(k, otherK) : Comp::lt(k, otherK); assign(s, k, otherK); assign(s, v, otherV); } else { bool s = small ? Comp::lt(k, otherK) : Comp::gt(k, otherK); assign(s, k, otherK); assign(s, v, otherV); } } } // Template for performing a bitonic merge of an arbitrary set of // registers template <typename K, typename V, int N, bool Dir, typename Comp, bool Low, bool Pow2> struct BitonicMergeStep {}; // // Power-of-2 merge specialization // // All merges eventually call this template <typename K, typename V, bool Dir, typename Comp, bool Low> struct BitonicMergeStep<K, V, 1, Dir, Comp, Low, true> { static inline __device__ void merge(K k[1], V v[1]) { // Use warp shuffles warpBitonicMergeLE16<K, V, 16, Dir, Comp, true>(k[0], v[0]); } }; template <typename K, typename V, int N, bool Dir, typename Comp, bool Low> struct BitonicMergeStep<K, V, N, Dir, Comp, Low, true> { static inline __device__ void merge(K k[N], V v[N]) { static_assert(utils::isPowerOf2(N), "must be power of 2"); static_assert(N > 1, "must be N > 1"); #pragma unroll for (int i = 0; i < N / 2; ++i) { K& ka = k[i]; V& va = v[i]; K& kb = k[i + N / 2]; V& vb = v[i + N / 2]; bool s = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); swap(s, ka, kb); swap(s, va, vb); } { K newK[N / 2]; V newV[N / 2]; #pragma unroll for (int i = 0; i < N / 2; ++i) { newK[i] = k[i]; newV[i] = v[i]; } BitonicMergeStep<K, V, N / 2, Dir, Comp, true, true>::merge(newK, newV); #pragma unroll for (int i = 0; i < N / 2; ++i) { k[i] = newK[i]; v[i] = newV[i]; } } { K newK[N / 2]; V newV[N / 2]; #pragma unroll for (int i = 0; i < N / 2; ++i) { newK[i] = k[i + N / 2]; newV[i] = v[i + N / 2]; } BitonicMergeStep<K, V, N / 2, Dir, Comp, false, true>::merge(newK, newV); #pragma unroll for (int i = 0; i < N / 2; ++i) { k[i + N / 2] = newK[i]; v[i + N / 2] = newV[i]; } } } }; // // Non-power-of-2 merge specialization // // Low recursion template <typename K, typename V, int N, bool Dir, typename Comp> struct BitonicMergeStep<K, V, N, Dir, Comp, true, false> { static inline __device__ void merge(K k[N], V v[N]) { static_assert(!utils::isPowerOf2(N), "must be non-power-of-2"); static_assert(N >= 3, "must be N >= 3"); constexpr int kNextHighestPowerOf2 = utils::nextHighestPowerOf2(N); #pragma unroll for (int i = 0; i < N - kNextHighestPowerOf2 / 2; ++i) { K& ka = k[i]; V& va = v[i]; K& kb = k[i + kNextHighestPowerOf2 / 2]; V& vb = v[i + kNextHighestPowerOf2 / 2]; bool s = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); swap(s, ka, kb); swap(s, va, vb); } constexpr int kLowSize = N - kNextHighestPowerOf2 / 2; constexpr int kHighSize = kNextHighestPowerOf2 / 2; { K newK[kLowSize]; V newV[kLowSize]; #pragma unroll for (int i = 0; i < kLowSize; ++i) { newK[i] = k[i]; newV[i] = v[i]; } constexpr bool kLowIsPowerOf2 = utils::isPowerOf2(N - kNextHighestPowerOf2 / 2); // FIXME: compiler doesn't like this expression? compiler bug? // constexpr bool kLowIsPowerOf2 = utils::isPowerOf2(kLowSize); BitonicMergeStep<K, V, kLowSize, Dir, Comp, true, // low kLowIsPowerOf2>::merge(newK, newV); #pragma unroll for (int i = 0; i < kLowSize; ++i) { k[i] = newK[i]; v[i] = newV[i]; } } { K newK[kHighSize]; V newV[kHighSize]; #pragma unroll for (int i = 0; i < kHighSize; ++i) { newK[i] = k[i + kLowSize]; newV[i] = v[i + kLowSize]; } constexpr bool kHighIsPowerOf2 = utils::isPowerOf2(kNextHighestPowerOf2 / 2); // FIXME: compiler doesn't like this expression? compiler bug? // constexpr bool kHighIsPowerOf2 = // utils::isPowerOf2(kHighSize); BitonicMergeStep<K, V, kHighSize, Dir, Comp, false, // high kHighIsPowerOf2>::merge(newK, newV); #pragma unroll for (int i = 0; i < kHighSize; ++i) { k[i + kLowSize] = newK[i]; v[i + kLowSize] = newV[i]; } } } }; // High recursion template <typename K, typename V, int N, bool Dir, typename Comp> struct BitonicMergeStep<K, V, N, Dir, Comp, false, false> { static inline __device__ void merge(K k[N], V v[N]) { static_assert(!utils::isPowerOf2(N), "must be non-power-of-2"); static_assert(N >= 3, "must be N >= 3"); constexpr int kNextHighestPowerOf2 = utils::nextHighestPowerOf2(N); #pragma unroll for (int i = 0; i < N - kNextHighestPowerOf2 / 2; ++i) { K& ka = k[i]; V& va = v[i]; K& kb = k[i + kNextHighestPowerOf2 / 2]; V& vb = v[i + kNextHighestPowerOf2 / 2]; bool s = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); swap(s, ka, kb); swap(s, va, vb); } constexpr int kLowSize = kNextHighestPowerOf2 / 2; constexpr int kHighSize = N - kNextHighestPowerOf2 / 2; { K newK[kLowSize]; V newV[kLowSize]; #pragma unroll for (int i = 0; i < kLowSize; ++i) { newK[i] = k[i]; newV[i] = v[i]; } constexpr bool kLowIsPowerOf2 = utils::isPowerOf2(kNextHighestPowerOf2 / 2); // FIXME: compiler doesn't like this expression? compiler bug? // constexpr bool kLowIsPowerOf2 = utils::isPowerOf2(kLowSize); BitonicMergeStep<K, V, kLowSize, Dir, Comp, true, // low kLowIsPowerOf2>::merge(newK, newV); #pragma unroll for (int i = 0; i < kLowSize; ++i) { k[i] = newK[i]; v[i] = newV[i]; } } { K newK[kHighSize]; V newV[kHighSize]; #pragma unroll for (int i = 0; i < kHighSize; ++i) { newK[i] = k[i + kLowSize]; newV[i] = v[i + kLowSize]; } constexpr bool kHighIsPowerOf2 = utils::isPowerOf2(N - kNextHighestPowerOf2 / 2); // FIXME: compiler doesn't like this expression? compiler bug? // constexpr bool kHighIsPowerOf2 = // utils::isPowerOf2(kHighSize); BitonicMergeStep<K, V, kHighSize, Dir, Comp, false, // high kHighIsPowerOf2>::merge(newK, newV); #pragma unroll for (int i = 0; i < kHighSize; ++i) { k[i + kLowSize] = newK[i]; v[i + kLowSize] = newV[i]; } } } }; /// Merges two sets of registers across the warp of any size; /// i.e., merges a sorted k/v list of size WarpSize * N1 with a /// sorted k/v list of size WarpSize * N2, where N1 and N2 are any /// value >= 1 template <typename K, typename V, int N1, int N2, bool Dir, typename Comp, bool FullMerge = true> inline __device__ void warpMergeAnyRegisters(K k1[N1], V v1[N1], K k2[N2], V v2[N2]) { constexpr int kSmallestN = N1 < N2 ? N1 : N2; #pragma unroll for (int i = 0; i < kSmallestN; ++i) { K& ka = k1[N1 - 1 - i]; V& va = v1[N1 - 1 - i]; K& kb = k2[i]; V& vb = v2[i]; K otherKa; V otherVa; if (FullMerge) { // We need the other values otherKa = shfl_xor(ka, WarpSize - 1); otherVa = shfl_xor(va, WarpSize - 1); } K otherKb = shfl_xor(kb, WarpSize - 1); V otherVb = shfl_xor(vb, WarpSize - 1); // ka is always first in the list, so we needn't use our lane // in this comparison bool swapa = Dir ? Comp::gt(ka, otherKb) : Comp::lt(ka, otherKb); assign(swapa, ka, otherKb); assign(swapa, va, otherVb); // kb is always second in the list, so we needn't use our lane // in this comparison if (FullMerge) { bool swapb = Dir ? Comp::lt(kb, otherKa) : Comp::gt(kb, otherKa); assign(swapb, kb, otherKa); assign(swapb, vb, otherVa); } else { // We don't care about updating elements in the second list } } BitonicMergeStep<K, V, N1, Dir, Comp, true, utils::isPowerOf2(N1)>::merge(k1, v1); if (FullMerge) { // Only if we care about N2 do we need to bother merging it fully BitonicMergeStep<K, V, N2, Dir, Comp, false, utils::isPowerOf2(N2)>::merge(k2, v2); } } // Recursive template that uses the above bitonic merge to perform a // bitonic sort template <typename K, typename V, int N, bool Dir, typename Comp> struct BitonicSortStep { static inline __device__ void sort(K k[N], V v[N]) { static_assert(N > 1, "did not hit specialized case"); // Sort recursively constexpr int kSizeA = N / 2; constexpr int kSizeB = N - kSizeA; K aK[kSizeA]; V aV[kSizeA]; #pragma unroll for (int i = 0; i < kSizeA; ++i) { aK[i] = k[i]; aV[i] = v[i]; } BitonicSortStep<K, V, kSizeA, Dir, Comp>::sort(aK, aV); K bK[kSizeB]; V bV[kSizeB]; #pragma unroll for (int i = 0; i < kSizeB; ++i) { bK[i] = k[i + kSizeA]; bV[i] = v[i + kSizeA]; } BitonicSortStep<K, V, kSizeB, Dir, Comp>::sort(bK, bV); // Merge halves warpMergeAnyRegisters<K, V, kSizeA, kSizeB, Dir, Comp>(aK, aV, bK, bV); #pragma unroll for (int i = 0; i < kSizeA; ++i) { k[i] = aK[i]; v[i] = aV[i]; } #pragma unroll for (int i = 0; i < kSizeB; ++i) { k[i + kSizeA] = bK[i]; v[i + kSizeA] = bV[i]; } } }; // Single warp (N == 1) sorting specialization template <typename K, typename V, bool Dir, typename Comp> struct BitonicSortStep<K, V, 1, Dir, Comp> { static inline __device__ void sort(K k[1], V v[1]) { // Update this code if this changes // should go from 1 -> WarpSize in multiples of 2 static_assert(WarpSize == 32, "unexpected warp size"); warpBitonicMergeLE16<K, V, 1, Dir, Comp, false>(k[0], v[0]); warpBitonicMergeLE16<K, V, 2, Dir, Comp, false>(k[0], v[0]); warpBitonicMergeLE16<K, V, 4, Dir, Comp, false>(k[0], v[0]); warpBitonicMergeLE16<K, V, 8, Dir, Comp, false>(k[0], v[0]); warpBitonicMergeLE16<K, V, 16, Dir, Comp, false>(k[0], v[0]); } }; /// Sort a list of WarpSize * N elements in registers, where N is an /// arbitrary >= 1 template <typename K, typename V, int N, bool Dir, typename Comp> inline __device__ void warpSortAnyRegisters(K k[N], V v[N]) { BitonicSortStep<K, V, N, Dir, Comp>::sort(k, v); } } // namespace raft::neighbors::detail::faiss_select
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/faiss_select/Comparators.cuh
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file thirdparty/LICENSES/LICENSE.faiss */ #pragma once #include <cuda.h> #include <cuda_fp16.h> namespace raft::neighbors::detail::faiss_select { template <typename T> struct Comparator { __device__ static inline bool lt(T a, T b) { return a < b; } __device__ static inline bool gt(T a, T b) { return a > b; } }; template <> struct Comparator<half> { __device__ static inline bool lt(half a, half b) { return __hlt(a, b); } __device__ static inline bool gt(half a, half b) { return __hgt(a, b); } }; } // namespace raft::neighbors::detail::faiss_select
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/faiss_select/key_value_block_select.cuh
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file thirdparty/LICENSES/LICENSE.faiss */ #pragma once #include <raft/neighbors/detail/faiss_select/MergeNetworkUtils.cuh> #include <raft/neighbors/detail/faiss_select/Select.cuh> // TODO: Need to think further about the impact (and new boundaries created) on the registers // because this will change the max k that can be processed. One solution might be to break // up k into multiple batches for larger k. namespace raft::neighbors::detail::faiss_select { // `Dir` true, produce largest values. // `Dir` false, produce smallest values. template <typename K, typename V, bool Dir, typename Comp, int NumWarpQ, int NumThreadQ, int ThreadsPerBlock> struct KeyValueBlockSelect { static constexpr int kNumWarps = ThreadsPerBlock / WarpSize; static constexpr int kTotalWarpSortSize = NumWarpQ; __device__ inline KeyValueBlockSelect( K initKVal, K initVKey, V initVVal, K* smemK, KeyValuePair<K, V>* smemV, int k) : initK(initKVal), initVk(initVKey), initVv(initVVal), numVals(0), warpKTop(initKVal), warpKTopRDist(initKVal), sharedK(smemK), sharedV(smemV), kMinus1(k - 1) { static_assert(utils::isPowerOf2(ThreadsPerBlock), "threads must be a power-of-2"); static_assert(utils::isPowerOf2(NumWarpQ), "warp queue must be power-of-2"); // Fill the per-thread queue keys with the default value #pragma unroll for (int i = 0; i < NumThreadQ; ++i) { threadK[i] = initK; threadV[i].key = initVk; threadV[i].value = initVv; } int laneId = raft::laneId(); int warpId = threadIdx.x / WarpSize; warpK = sharedK + warpId * kTotalWarpSortSize; warpV = sharedV + warpId * kTotalWarpSortSize; // Fill warp queue (only the actual queue space is fine, not where // we write the per-thread queues for merging) for (int i = laneId; i < NumWarpQ; i += WarpSize) { warpK[i] = initK; warpV[i].key = initVk; warpV[i].value = initVv; } warpFence(); } __device__ inline void addThreadQ(K k, K vk, V vv) { if (Dir ? Comp::gt(k, warpKTop) : Comp::lt(k, warpKTop)) { // Rotate right #pragma unroll for (int i = NumThreadQ - 1; i > 0; --i) { threadK[i] = threadK[i - 1]; threadV[i].key = threadV[i - 1].key; threadV[i].value = threadV[i - 1].value; } threadK[0] = k; threadV[0].key = vk; threadV[0].value = vv; ++numVals; } } __device__ inline void checkThreadQ() { bool needSort = (numVals == NumThreadQ); #if CUDA_VERSION >= 9000 needSort = __any_sync(0xffffffff, needSort); #else needSort = __any(needSort); #endif if (!needSort) { // no lanes have triggered a sort return; } // This has a trailing warpFence mergeWarpQ(); // Any top-k elements have been merged into the warp queue; we're // free to reset the thread queues numVals = 0; #pragma unroll for (int i = 0; i < NumThreadQ; ++i) { threadK[i] = initK; threadV[i].key = initVk; threadV[i].value = initVv; } // We have to beat at least this element warpKTop = warpK[kMinus1]; warpKTopRDist = warpV[kMinus1].key; warpFence(); } /// This function handles sorting and merging together the /// per-thread queues with the warp-wide queue, creating a sorted /// list across both __device__ inline void mergeWarpQ() { int laneId = raft::laneId(); // Sort all of the per-thread queues warpSortAnyRegisters<K, KeyValuePair<K, V>, NumThreadQ, !Dir, Comp>(threadK, threadV); constexpr int kNumWarpQRegisters = NumWarpQ / WarpSize; K warpKRegisters[kNumWarpQRegisters]; KeyValuePair<K, V> warpVRegisters[kNumWarpQRegisters]; #pragma unroll for (int i = 0; i < kNumWarpQRegisters; ++i) { warpKRegisters[i] = warpK[i * WarpSize + laneId]; warpVRegisters[i].key = warpV[i * WarpSize + laneId].key; warpVRegisters[i].value = warpV[i * WarpSize + laneId].value; } warpFence(); // The warp queue is already sorted, and now that we've sorted the // per-thread queue, merge both sorted lists together, producing // one sorted list warpMergeAnyRegisters<K, KeyValuePair<K, V>, kNumWarpQRegisters, NumThreadQ, !Dir, Comp, false>( warpKRegisters, warpVRegisters, threadK, threadV); // Write back out the warp queue #pragma unroll for (int i = 0; i < kNumWarpQRegisters; ++i) { warpK[i * WarpSize + laneId] = warpKRegisters[i]; warpV[i * WarpSize + laneId].key = warpVRegisters[i].key; warpV[i * WarpSize + laneId].value = warpVRegisters[i].value; } warpFence(); } /// WARNING: all threads in a warp must participate in this. /// Otherwise, you must call the constituent parts separately. __device__ inline void add(K k, K vk, V vv) { addThreadQ(k, vk, vv); checkThreadQ(); } __device__ inline void reduce() { // Have all warps dump and merge their queues; this will produce // the final per-warp results mergeWarpQ(); // block-wide dep; thus far, all warps have been completely // independent __syncthreads(); // All warp queues are contiguous in smem. // Now, we have kNumWarps lists of NumWarpQ elements. // This is a power of 2. FinalBlockMerge<kNumWarps, ThreadsPerBlock, K, KeyValuePair<K, V>, NumWarpQ, Dir, Comp>::merge( sharedK, sharedV); // The block-wide merge has a trailing syncthreads } // Default element key const K initK; // Default element value const K initVk; const V initVv; // Number of valid elements in our thread queue int numVals; // The k-th highest (Dir) or lowest (!Dir) element K warpKTop; K warpKTopRDist; // Thread queue values K threadK[NumThreadQ]; KeyValuePair<K, V> threadV[NumThreadQ]; // Queues for all warps K* sharedK; KeyValuePair<K, V>* sharedV; // Our warp's queue (points into sharedK/sharedV) // warpK[0] is highest (Dir) or lowest (!Dir) K* warpK; KeyValuePair<K, V>* warpV; // This is a cached k-1 value int kMinus1; }; } // namespace raft::neighbors::detail::faiss_select
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail
rapidsai_public_repos/raft/cpp/include/raft/neighbors/detail/faiss_select/MergeNetworkBlock.cuh
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file thirdparty/LICENSES/LICENSE.faiss */ #pragma once #include <cuda.h> #include <raft/neighbors/detail/faiss_select/MergeNetworkUtils.cuh> #include <raft/neighbors/detail/faiss_select/StaticUtils.h> namespace raft::neighbors::detail::faiss_select { // Merge pairs of lists smaller than blockDim.x (NumThreads) template <int NumThreads, typename K, typename V, int N, int L, bool AllThreads, bool Dir, typename Comp, bool FullMerge> inline __device__ void blockMergeSmall(K* listK, V* listV) { static_assert(utils::isPowerOf2(L), "L must be a power-of-2"); static_assert(utils::isPowerOf2(NumThreads), "NumThreads must be a power-of-2"); static_assert(L <= NumThreads, "merge list size must be <= NumThreads"); // Which pair of lists we are merging int mergeId = threadIdx.x / L; // Which thread we are within the merge int tid = threadIdx.x % L; // listK points to a region of size N * 2 * L listK += 2 * L * mergeId; listV += 2 * L * mergeId; // It's not a bitonic merge, both lists are in the same direction, // so handle the first swap assuming the second list is reversed int pos = L - 1 - tid; int stride = 2 * tid + 1; if (AllThreads || (threadIdx.x < N * L)) { K ka = listK[pos]; K kb = listK[pos + stride]; bool swap = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); listK[pos] = swap ? kb : ka; listK[pos + stride] = swap ? ka : kb; V va = listV[pos]; V vb = listV[pos + stride]; listV[pos] = swap ? vb : va; listV[pos + stride] = swap ? va : vb; // FIXME: is this a CUDA 9 compiler bug? // K& ka = listK[pos]; // K& kb = listK[pos + stride]; // bool s = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); // swap(s, ka, kb); // V& va = listV[pos]; // V& vb = listV[pos + stride]; // swap(s, va, vb); } __syncthreads(); #pragma unroll for (int stride = L / 2; stride > 0; stride /= 2) { int pos = 2 * tid - (tid & (stride - 1)); if (AllThreads || (threadIdx.x < N * L)) { K ka = listK[pos]; K kb = listK[pos + stride]; bool swap = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); listK[pos] = swap ? kb : ka; listK[pos + stride] = swap ? ka : kb; V va = listV[pos]; V vb = listV[pos + stride]; listV[pos] = swap ? vb : va; listV[pos + stride] = swap ? va : vb; // FIXME: is this a CUDA 9 compiler bug? // K& ka = listK[pos]; // K& kb = listK[pos + stride]; // bool s = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); // swap(s, ka, kb); // V& va = listV[pos]; // V& vb = listV[pos + stride]; // swap(s, va, vb); } __syncthreads(); } } // Merge pairs of sorted lists larger than blockDim.x (NumThreads) template <int NumThreads, typename K, typename V, int L, bool Dir, typename Comp, bool FullMerge> inline __device__ void blockMergeLarge(K* listK, V* listV) { static_assert(utils::isPowerOf2(L), "L must be a power-of-2"); static_assert(L >= WarpSize, "merge list size must be >= 32"); static_assert(utils::isPowerOf2(NumThreads), "NumThreads must be a power-of-2"); static_assert(L >= NumThreads, "merge list size must be >= NumThreads"); // For L > NumThreads, each thread has to perform more work // per each stride. constexpr int kLoopPerThread = L / NumThreads; // It's not a bitonic merge, both lists are in the same direction, // so handle the first swap assuming the second list is reversed #pragma unroll for (int loop = 0; loop < kLoopPerThread; ++loop) { int tid = loop * NumThreads + threadIdx.x; int pos = L - 1 - tid; int stride = 2 * tid + 1; K ka = listK[pos]; K kb = listK[pos + stride]; bool swap = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); listK[pos] = swap ? kb : ka; listK[pos + stride] = swap ? ka : kb; V va = listV[pos]; V vb = listV[pos + stride]; listV[pos] = swap ? vb : va; listV[pos + stride] = swap ? va : vb; // FIXME: is this a CUDA 9 compiler bug? // K& ka = listK[pos]; // K& kb = listK[pos + stride]; // bool s = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); // swap(s, ka, kb); // V& va = listV[pos]; // V& vb = listV[pos + stride]; // swap(s, va, vb); } __syncthreads(); constexpr int kSecondLoopPerThread = FullMerge ? kLoopPerThread : kLoopPerThread / 2; #pragma unroll for (int stride = L / 2; stride > 0; stride /= 2) { #pragma unroll for (int loop = 0; loop < kSecondLoopPerThread; ++loop) { int tid = loop * NumThreads + threadIdx.x; int pos = 2 * tid - (tid & (stride - 1)); K ka = listK[pos]; K kb = listK[pos + stride]; bool swap = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); listK[pos] = swap ? kb : ka; listK[pos + stride] = swap ? ka : kb; V va = listV[pos]; V vb = listV[pos + stride]; listV[pos] = swap ? vb : va; listV[pos + stride] = swap ? va : vb; // FIXME: is this a CUDA 9 compiler bug? // K& ka = listK[pos]; // K& kb = listK[pos + stride]; // bool s = Dir ? Comp::gt(ka, kb) : Comp::lt(ka, kb); // swap(s, ka, kb); // V& va = listV[pos]; // V& vb = listV[pos + stride]; // swap(s, va, vb); } __syncthreads(); } } /// Class template to prevent static_assert from firing for /// mixing smaller/larger than block cases template <int NumThreads, typename K, typename V, int N, int L, bool Dir, typename Comp, bool SmallerThanBlock, bool FullMerge> struct BlockMerge {}; /// Merging lists smaller than a block template <int NumThreads, typename K, typename V, int N, int L, bool Dir, typename Comp, bool FullMerge> struct BlockMerge<NumThreads, K, V, N, L, Dir, Comp, true, FullMerge> { static inline __device__ void merge(K* listK, V* listV) { constexpr int kNumParallelMerges = NumThreads / L; constexpr int kNumIterations = N / kNumParallelMerges; static_assert(L <= NumThreads, "list must be <= NumThreads"); static_assert((N < kNumParallelMerges) || (kNumIterations * kNumParallelMerges == N), "improper selection of N and L"); if (N < kNumParallelMerges) { // We only need L threads per each list to perform the merge blockMergeSmall<NumThreads, K, V, N, L, false, Dir, Comp, FullMerge>(listK, listV); } else { // All threads participate #pragma unroll for (int i = 0; i < kNumIterations; ++i) { int start = i * kNumParallelMerges * 2 * L; blockMergeSmall<NumThreads, K, V, N, L, true, Dir, Comp, FullMerge>(listK + start, listV + start); } } } }; /// Merging lists larger than a block template <int NumThreads, typename K, typename V, int N, int L, bool Dir, typename Comp, bool FullMerge> struct BlockMerge<NumThreads, K, V, N, L, Dir, Comp, false, FullMerge> { static inline __device__ void merge(K* listK, V* listV) { // Each pair of lists is merged sequentially #pragma unroll for (int i = 0; i < N; ++i) { int start = i * 2 * L; blockMergeLarge<NumThreads, K, V, L, Dir, Comp, FullMerge>(listK + start, listV + start); } } }; template <int NumThreads, typename K, typename V, int N, int L, bool Dir, typename Comp, bool FullMerge = true> inline __device__ void blockMerge(K* listK, V* listV) { constexpr bool kSmallerThanBlock = (L <= NumThreads); BlockMerge<NumThreads, K, V, N, L, Dir, Comp, kSmallerThanBlock, FullMerge>::merge(listK, listV); } } // namespace raft::neighbors::detail::faiss_select
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations/fused_l2_knn.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #pragma message( \ __FILE__ \ " is deprecated and will be removed." \ " Including specializations is not necessary any more." \ " For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations/ivf_flat.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #pragma message( \ __FILE__ \ " is deprecated and will be removed." \ " Including specializations is not necessary any more." \ " For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations/ivf_pq.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #pragma message( \ __FILE__ \ " is deprecated and will be removed." \ " Including specializations is not necessary any more." \ " For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations/refine.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #pragma message( \ __FILE__ \ " is deprecated and will be removed." \ " Including specializations is not necessary any more." \ " For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations/ball_cover.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #pragma message( \ __FILE__ \ " is deprecated and will be removed." \ " Including specializations is not necessary any more." \ " For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations/brute_force.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #pragma message( \ __FILE__ \ " is deprecated and will be removed." \ " Including specializations is not necessary any more." \ " For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations/detail/ivf_pq_compute_similarity.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #pragma message( \ __FILE__ \ " is deprecated and will be removed." \ " Including specializations is not necessary any more." \ " For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
0
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations
rapidsai_public_repos/raft/cpp/include/raft/neighbors/specializations/detail/ball_cover_lowdim.hpp
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdint> #include <raft/spatial/knn/detail/ball_cover/common.cuh> #include <raft/spatial/knn/detail/ball_cover/registers.cuh> namespace raft { namespace spatial { namespace knn { namespace detail { extern template void rbc_low_dim_pass_one<std::int64_t, float, std::uint32_t, 2>( raft::resources const& handle, const BallCoverIndex<std::int64_t, float, std::uint32_t>& index, const float* query, const std::uint32_t n_query_rows, std::uint32_t k, const std::int64_t* R_knn_inds, const float* R_knn_dists, DistFunc<float, std::uint32_t>& dfunc, std::int64_t* inds, float* dists, float weight, std::uint32_t* dists_counter); extern template void rbc_low_dim_pass_two<std::int64_t, float, std::uint32_t, 2>( raft::resources const& handle, const BallCoverIndex<std::int64_t, float, std::uint32_t>& index, const float* query, const std::uint32_t n_query_rows, std::uint32_t k, const std::int64_t* R_knn_inds, const float* R_knn_dists, DistFunc<float, std::uint32_t>& dfunc, std::int64_t* inds, float* dists, float weight, std::uint32_t* post_dists_counter); extern template void rbc_low_dim_pass_one<std::int64_t, float, std::uint32_t, 3>( raft::resources const& handle, const BallCoverIndex<std::int64_t, float, std::uint32_t>& index, const float* query, const std::uint32_t n_query_rows, std::uint32_t k, const std::int64_t* R_knn_inds, const float* R_knn_dists, DistFunc<float, std::uint32_t>& dfunc, std::int64_t* inds, float* dists, float weight, std::uint32_t* dists_counter); extern template void rbc_low_dim_pass_two<std::int64_t, float, std::uint32_t, 3>( raft::resources const& handle, const BallCoverIndex<std::int64_t, float, std::uint32_t>& index, const float* query, const std::uint32_t n_query_rows, std::uint32_t k, const std::int64_t* R_knn_inds, const float* R_knn_dists, DistFunc<float, std::uint32_t>& dfunc, std::int64_t* inds, float* dists, float weight, std::uint32_t* post_dists_counter); }; // namespace detail }; // namespace knn }; // namespace spatial }; // namespace raft
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/contingency_matrix.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __CONTINGENCY_MATRIX_H #define __CONTINGENCY_MATRIX_H #pragma once #include <raft/core/device_mdarray.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/host_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/stats/detail/contingencyMatrix.cuh> namespace raft { namespace stats { /** * @brief use this to allocate output matrix size * size of matrix = (maxLabel - minLabel + 1)^2 * sizeof(int) * @param groundTruth: device 1-d array for ground truth (num of rows) * @param nSamples: number of elements in input array * @param stream: cuda stream for execution * @param minLabel: [out] calculated min value in input array * @param maxLabel: [out] calculated max value in input array */ template <typename T> void getInputClassCardinality( const T* groundTruth, const int nSamples, cudaStream_t stream, T& minLabel, T& maxLabel) { detail::getInputClassCardinality(groundTruth, nSamples, stream, minLabel, maxLabel); } /** * @brief Calculate workspace size for running contingency matrix calculations * @tparam T label type * @tparam OutT output matrix type * @param nSamples: number of elements in input array * @param groundTruth: device 1-d array for ground truth (num of rows) * @param stream: cuda stream for execution * @param minLabel: Optional, min value in input array * @param maxLabel: Optional, max value in input array */ template <typename T, typename OutT = int> size_t getContingencyMatrixWorkspaceSize(int nSamples, const T* groundTruth, cudaStream_t stream, T minLabel = std::numeric_limits<T>::max(), T maxLabel = std::numeric_limits<T>::max()) { return detail::getContingencyMatrixWorkspaceSize( nSamples, groundTruth, stream, minLabel, maxLabel); } /** * @brief construct contingency matrix given input ground truth and prediction * labels. Users should call function getInputClassCardinality to find * and allocate memory for output. Similarly workspace requirements * should be checked using function getContingencyMatrixWorkspaceSize * @tparam T label type * @tparam OutT output matrix type * @param groundTruth: device 1-d array for ground truth (num of rows) * @param predictedLabel: device 1-d array for prediction (num of columns) * @param nSamples: number of elements in input array * @param outMat: output buffer for contingency matrix * @param stream: cuda stream for execution * @param workspace: Optional, workspace memory allocation * @param workspaceSize: Optional, size of workspace memory * @param minLabel: Optional, min value in input ground truth array * @param maxLabel: Optional, max value in input ground truth array */ template <typename T, typename OutT = int> void contingencyMatrix(const T* groundTruth, const T* predictedLabel, int nSamples, OutT* outMat, cudaStream_t stream, void* workspace = nullptr, size_t workspaceSize = 0, T minLabel = std::numeric_limits<T>::max(), T maxLabel = std::numeric_limits<T>::max()) { detail::contingencyMatrix<T, OutT>(groundTruth, predictedLabel, nSamples, outMat, stream, workspace, workspaceSize, minLabel, maxLabel); } /** * @defgroup contingency_matrix Contingency Matrix * @{ */ /** * @brief use this to allocate output matrix size * size of matrix = (maxLabel - minLabel + 1)^2 * sizeof(int) * @tparam value_t label type * @tparam idx_t Index type of matrix extent. * @param[in] handle: the raft handle. * @param[in] groundTruth: device 1-d array for ground truth (num of rows) * @param[out] minLabel: calculated min value in input array * @param[out] maxLabel: calculated max value in input array */ template <typename value_t, typename idx_t> void get_input_class_cardinality(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> groundTruth, raft::host_scalar_view<value_t> minLabel, raft::host_scalar_view<value_t> maxLabel) { RAFT_EXPECTS(minLabel.data_handle() != nullptr, "Invalid minLabel pointer"); RAFT_EXPECTS(maxLabel.data_handle() != nullptr, "Invalid maxLabel pointer"); detail::getInputClassCardinality(groundTruth.data_handle(), groundTruth.extent(0), resource::get_cuda_stream(handle), *minLabel.data_handle(), *maxLabel.data_handle()); } /** * @brief construct contingency matrix given input ground truth and prediction * labels. Users should call function getInputClassCardinality to find * and allocate memory for output. Similarly workspace requirements * should be checked using function getContingencyMatrixWorkspaceSize * @tparam value_t label type * @tparam out_t output matrix type * @tparam idx_t Index type of matrix extent. * @tparam layout_t Layout type of the input data. * @tparam opt_min_label_t std::optional<value_t> @c opt_min_label * @tparam opt_max_label_t std::optional<value_t> @c opt_max_label * @param[in] handle: the raft handle. * @param[in] ground_truth: device 1-d array for ground truth (num of rows) * @param[in] predicted_label: device 1-d array for prediction (num of columns) * @param[out] out_mat: output buffer for contingency matrix * @param[in] opt_min_label: std::optional, min value in input ground truth array * @param[in] opt_max_label: std::optional, max value in input ground truth array */ template <typename value_t, typename out_t, typename idx_t, typename layout_t, typename opt_min_label_t, typename opt_max_label_t> void contingency_matrix(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> ground_truth, raft::device_vector_view<const value_t, idx_t> predicted_label, raft::device_matrix_view<out_t, idx_t, layout_t> out_mat, opt_min_label_t&& opt_min_label, opt_max_label_t&& opt_max_label) { std::optional<value_t> min_label = std::forward<opt_min_label_t>(opt_min_label); std::optional<value_t> max_label = std::forward<opt_max_label_t>(opt_max_label); RAFT_EXPECTS(ground_truth.size() == predicted_label.size(), "Size mismatch"); RAFT_EXPECTS(ground_truth.is_exhaustive(), "ground_truth must be contiguous"); RAFT_EXPECTS(predicted_label.is_exhaustive(), "predicted_label must be contiguous"); RAFT_EXPECTS(out_mat.is_exhaustive(), "out_mat must be contiguous"); value_t min_label_value = std::numeric_limits<value_t>::max(); value_t max_label_value = std::numeric_limits<value_t>::max(); if (min_label.has_value()) { min_label_value = min_label.value(); } if (max_label.has_value()) { max_label_value = max_label.value(); } auto workspace_sz = detail::getContingencyMatrixWorkspaceSize(ground_truth.extent(0), ground_truth.data_handle(), resource::get_cuda_stream(handle), min_label_value, max_label_value); auto workspace = raft::make_device_vector<char>(handle, workspace_sz); detail::contingencyMatrix<value_t, out_t>(ground_truth.data_handle(), predicted_label.data_handle(), ground_truth.extent(0), out_mat.data_handle(), resource::get_cuda_stream(handle), workspace.data_handle(), workspace_sz, min_label_value, max_label_value); } /** @} */ // end group contingency_matrix /** * @brief Overload of `contingency_matrix` to help the * compiler find the above overload, in case users pass in * `std::nullopt` for the optional arguments. * * Please see above for documentation of `contingency_matrix`. */ template <typename... Args, typename = std::enable_if_t<sizeof...(Args) == 4>> void contingency_matrix(Args... args) { contingency_matrix(std::forward<Args>(args)..., std::nullopt, std::nullopt); } }; // namespace stats }; // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/mean.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __MEAN_H #define __MEAN_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/stats/detail/mean.cuh> namespace raft { namespace stats { /** * @brief Compute mean of the input matrix * * Mean operation is assumed to be performed on a given column. * * @tparam Type: the data type * @tparam IdxType Integer type used to for addressing * @param mu: the output mean vector * @param data: the input matrix * @param D: number of columns of data * @param N: number of rows of data * @param sample: whether to evaluate sample mean or not. In other words, * whether * to normalize the output using N-1 or N, for true or false, respectively * @param rowMajor: whether the input data is row or col major * @param stream: cuda stream */ template <typename Type, typename IdxType = int> void mean( Type* mu, const Type* data, IdxType D, IdxType N, bool sample, bool rowMajor, cudaStream_t stream) { detail::mean(mu, data, D, N, sample, rowMajor, stream); } /** * @defgroup stats_mean Mean * @{ */ /** * @brief Compute mean of the input matrix * * Mean operation is assumed to be performed on a given column. * * @tparam value_t the data type * @tparam idx_t index type * @tparam layout_t Layout type of the input matrix. * @param[in] handle the raft handle * @param[in] data: the input matrix * @param[out] mu: the output mean vector * @param[in] sample: whether to evaluate sample mean or not. In other words, whether * to normalize the output using N-1 or N, for true or false, respectively */ template <typename value_t, typename idx_t, typename layout_t> void mean(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> data, raft::device_vector_view<value_t, idx_t> mu, bool sample) { static_assert( std::is_same_v<layout_t, raft::row_major> || std::is_same_v<layout_t, raft::col_major>, "Data layout not supported"); RAFT_EXPECTS(data.extent(1) == mu.extent(0), "Size mismatch between data and mu"); RAFT_EXPECTS(mu.is_exhaustive(), "mu must be contiguous"); RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous"); detail::mean(mu.data_handle(), data.data_handle(), data.extent(1), data.extent(0), sample, std::is_same_v<layout_t, raft::row_major>, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_mean }; // namespace stats }; // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/cov.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __COV_H #define __COV_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/cov.cuh> namespace raft { namespace stats { /** * @brief Compute covariance of the input matrix * * Mean operation is assumed to be performed on a given column. * * @tparam Type the data type * @param covar the output covariance matrix * @param data the input matrix (this will get mean-centered at the end!) * @param mu mean vector of the input matrix * @param D number of columns of data * @param N number of rows of data * @param sample whether to evaluate sample covariance or not. In other words, * whether to normalize the output using N-1 or N, for true or false, * respectively * @param rowMajor whether the input data is row or col major * @param stable whether to run the slower-but-numerically-stable version or not * @param handle cublas handle * @param stream cuda stream * @note if stable=true, then the input data will be mean centered after this * function returns! */ template <typename Type> void cov(raft::resources const& handle, Type* covar, Type* data, const Type* mu, std::size_t D, std::size_t N, bool sample, bool rowMajor, bool stable, cudaStream_t stream) { detail::cov(handle, covar, data, mu, D, N, sample, rowMajor, stable, stream); } /** * @defgroup stats_cov Covariance Matrix Construction * @{ */ /** * @brief Compute covariance of the input matrix * * Mean operation is assumed to be performed on a given column. * * @tparam value_t the data type * @tparam idx_t the index type * @tparam layout_t Layout type of the input data. * @param[in] handle the raft handle * @param[in] data the input matrix (this will get mean-centered at the end!) * (length = nrows * ncols) * @param[in] mu mean vector of the input matrix (length = ncols) * @param[out] covar the output covariance matrix (length = ncols * ncols) * @param[in] sample whether to evaluate sample covariance or not. In other words, * whether to normalize the output using N-1 or N, for true or false, * respectively * @param[in] stable whether to run the slower-but-numerically-stable version or not * @note if stable=true, then the input data will be mean centered after this * function returns! */ template <typename value_t, typename idx_t, typename layout_t> void cov(raft::resources const& handle, raft::device_matrix_view<value_t, idx_t, layout_t> data, raft::device_vector_view<const value_t, idx_t> mu, raft::device_matrix_view<value_t, idx_t, layout_t> covar, bool sample, bool stable) { static_assert( std::is_same_v<layout_t, raft::row_major> || std::is_same_v<layout_t, raft::col_major>, "Data layout not supported"); RAFT_EXPECTS(data.extent(1) == covar.extent(0) && data.extent(1) == covar.extent(1), "Size mismatch"); RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous"); RAFT_EXPECTS(covar.is_exhaustive(), "covar must be contiguous"); RAFT_EXPECTS(mu.is_exhaustive(), "mu must be contiguous"); detail::cov(handle, covar.data_handle(), data.data_handle(), mu.data_handle(), data.extent(1), data.extent(0), std::is_same_v<layout_t, raft::row_major>, sample, stable, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_cov }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/trustworthiness_score.cuh
/* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __TRUSTWORTHINESS_SCORE_H #define __TRUSTWORTHINESS_SCORE_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resources.hpp> #include <raft/stats/detail/trustworthiness_score.cuh> namespace raft { namespace stats { /** * @brief Compute the trustworthiness score * @param[in] h: raft handle * @param[in] X: Data in original dimension * @param[in] X_embedded: Data in target dimension (embedding) * @param[in] n: Number of samples * @param[in] m: Number of features in high/original dimension * @param[in] d: Number of features in low/embedded dimension * @param[in] n_neighbors Number of neighbors considered by trustworthiness score * @param[in] batchSize Batch size * @return[out] Trustworthiness score */ template <typename math_t, raft::distance::DistanceType distance_type> double trustworthiness_score(const raft::resources& h, const math_t* X, math_t* X_embedded, int n, int m, int d, int n_neighbors, int batchSize = 512) { return detail::trustworthiness_score<math_t, distance_type>( h, X, X_embedded, n, m, d, n_neighbors, batchSize); } /** * @defgroup stats_trustworthiness Trustworthiness * @{ */ /** * @brief Compute the trustworthiness score * @tparam value_t the data type * @tparam idx_t Integer type used to for addressing * @param[in] handle the raft handle * @param[in] X: Data in original dimension * @param[in] X_embedded: Data in target dimension (embedding) * @param[in] n_neighbors Number of neighbors considered by trustworthiness score * @param[in] batch_size Batch size * @return Trustworthiness score * @note The constness of the data in X_embedded is currently casted away and the data is slightly * modified. */ template <raft::distance::DistanceType distance_type, typename value_t, typename idx_t> double trustworthiness_score( raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, raft::row_major> X, raft::device_matrix_view<const value_t, idx_t, raft::row_major> X_embedded, int n_neighbors, int batch_size = 512) { RAFT_EXPECTS(X.extent(0) == X_embedded.extent(0), "Size mismatch between X and X_embedded"); RAFT_EXPECTS(std::is_integral_v<idx_t> && X.extent(0) <= std::numeric_limits<int>::max(), "Index type not supported"); // TODO: Change the underlying implementation to remove the need to const_cast X_embedded. return detail::trustworthiness_score<value_t, distance_type>( handle, X.data_handle(), const_cast<value_t*>(X_embedded.data_handle()), X.extent(0), X.extent(1), X_embedded.extent(1), n_neighbors, batch_size); } /** @} */ // end group stats_trustworthiness } // namespace stats } // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/v_measure.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __V_MEASURE_H #define __V_MEASURE_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/stats/detail/v_measure.cuh> namespace raft { namespace stats { /** * @brief Function to calculate the v-measure between two clusters * * @param truthClusterArray: the array of truth classes of type T * @param predClusterArray: the array of predicted classes of type T * @param size: the size of the data points of type int * @param lowerLabelRange: the lower bound of the range of labels * @param upperLabelRange: the upper bound of the range of labels * @param stream: the cudaStream object * @param beta: v_measure parameter */ template <typename T> double v_measure(const T* truthClusterArray, const T* predClusterArray, int size, T lowerLabelRange, T upperLabelRange, cudaStream_t stream, double beta = 1.0) { return detail::v_measure( truthClusterArray, predClusterArray, size, lowerLabelRange, upperLabelRange, stream, beta); } /** * @defgroup stats_vmeasure V-Measure * @{ */ /** * @brief Function to calculate the v-measure between two clusters * * @tparam value_t the data type * @tparam idx_t Integer type used to for addressing * @param[in] handle the raft handle * @param[in] truth_cluster_array: the array of truth classes of type T * @param[in] pred_cluster_array: the array of predicted classes of type T * @param[in] lower_label_range: the lower bound of the range of labels * @param[in] upper_label_range: the upper bound of the range of labels * @param[in] beta: v_measure parameter * @return the v-measure between the two clusters */ template <typename value_t, typename idx_t> double v_measure(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> truth_cluster_array, raft::device_vector_view<const value_t, idx_t> pred_cluster_array, value_t lower_label_range, value_t upper_label_range, double beta = 1.0) { RAFT_EXPECTS(truth_cluster_array.extent(0) == pred_cluster_array.extent(0), "Size mismatch between truth_cluster_array and pred_cluster_array"); RAFT_EXPECTS(truth_cluster_array.is_exhaustive(), "truth_cluster_array must be contiguous"); RAFT_EXPECTS(pred_cluster_array.is_exhaustive(), "pred_cluster_array must be contiguous"); return detail::v_measure(truth_cluster_array.data_handle(), pred_cluster_array.data_handle(), truth_cluster_array.extent(0), lower_label_range, upper_label_range, resource::get_cuda_stream(handle), beta); } /** @} */ // end group stats_vmeasure }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/accuracy.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __STATS_ACCURACY_H #define __STATS_ACCURACY_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/scores.cuh> namespace raft { namespace stats { /** * @brief Compute accuracy of predictions. Useful for classification. * @tparam math_t: data type for predictions (e.g., int for classification) * @param[in] predictions: array of predictions (GPU pointer). * @param[in] ref_predictions: array of reference (ground-truth) predictions (GPU pointer). * @param[in] n: number of elements in each of predictions, ref_predictions. * @param[in] stream: cuda stream. * @return: Accuracy score in [0, 1]; higher is better. */ template <typename math_t> float accuracy(const math_t* predictions, const math_t* ref_predictions, int n, cudaStream_t stream) { return detail::accuracy_score(predictions, ref_predictions, n, stream); } /** * @defgroup stats_accuracy Accuracy Score * @{ */ /** * @brief Compute accuracy of predictions. Useful for classification. * @tparam value_t: data type for predictions (e.g., int for classification) * @tparam idx_t Index type of matrix extent. * @param[in] handle: the raft handle. * @param[in] predictions: array of predictions (GPU pointer). * @param[in] ref_predictions: array of reference (ground-truth) predictions (GPU pointer). * @return: Accuracy score in [0, 1]; higher is better. */ template <typename value_t, typename idx_t> float accuracy(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> predictions, raft::device_vector_view<const value_t, idx_t> ref_predictions) { RAFT_EXPECTS(predictions.size() == ref_predictions.size(), "Size mismatch"); RAFT_EXPECTS(predictions.is_exhaustive(), "predictions must be contiguous"); RAFT_EXPECTS(ref_predictions.is_exhaustive(), "ref_predictions must be contiguous"); return detail::accuracy_score(predictions.data_handle(), ref_predictions.data_handle(), predictions.extent(0), resource::get_cuda_stream(handle)); } /** @} */ // end group stats_accuracy } // namespace stats } // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/rand_index.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __RAND_INDEX_H #define __RAND_INDEX_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/stats/detail/rand_index.cuh> namespace raft { namespace stats { /** * @brief Function to calculate RandIndex * <a href="https://en.wikipedia.org/wiki/Rand_index">more info on rand index</a> * @param firstClusterArray: the array of classes of type T * @param secondClusterArray: the array of classes of type T * @param size: the size of the data points of type uint64_t * @param stream: the cudaStream object */ template <typename T> double rand_index(T* firstClusterArray, T* secondClusterArray, uint64_t size, cudaStream_t stream) { return detail::compute_rand_index(firstClusterArray, secondClusterArray, size, stream); } /** * @defgroup stats_rand_index Rand Index * @{ */ /** * @brief Function to calculate RandIndex * <a href="https://en.wikipedia.org/wiki/Rand_index">more info on rand index</a> * @tparam value_t the data type * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] first_cluster_array: the array of classes of type value_t * @param[in] second_cluster_array: the array of classes of type value_t * @return: The RandIndex value. */ template <typename value_t, typename idx_t> double rand_index(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> first_cluster_array, raft::device_vector_view<const value_t, idx_t> second_cluster_array) { RAFT_EXPECTS(first_cluster_array.extent(0) == second_cluster_array.extent(0), "Size mismatch between first_cluster_array and second_cluster_array"); RAFT_EXPECTS(first_cluster_array.is_exhaustive(), "first_cluster_array must be contiguous"); RAFT_EXPECTS(second_cluster_array.is_exhaustive(), "second_cluster_array must be contiguous"); return detail::compute_rand_index(first_cluster_array.data_handle(), second_cluster_array.data_handle(), second_cluster_array.extent(0), resource::get_cuda_stream(handle)); } /** @} */ // end group stats_rand_index }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/completeness_score.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __COMPLETENESS_SCORE_H #define __COMPLETENESS_SCORE_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/homogeneity_score.cuh> namespace raft { namespace stats { /** * @brief Function to calculate the completeness score between two clusters * * @param truthClusterArray: the array of truth classes of type T * @param predClusterArray: the array of predicted classes of type T * @param size: the size of the data points of type int * @param lower_label_range: the lower bound of the range of labels * @param upper_label_range: the upper bound of the range of labels * @param stream: the cudaStream object */ template <typename T> double completeness_score(const T* truthClusterArray, const T* predClusterArray, int size, T lower_label_range, T upper_label_range, cudaStream_t stream) { return detail::homogeneity_score( predClusterArray, truthClusterArray, size, lower_label_range, upper_label_range, stream); } /** * @defgroup stats_completeness Completeness Score * @{ */ /** * @brief Function to calculate the completeness score between two clusters * * @tparam value_t the data type * @tparam idx_t Index type of matrix extent. * @param[in] handle: the raft handle. * @param[in] truth_cluster_array: the array of truth classes of type value_t * @param[in] pred_cluster_array: the array of predicted classes of type value_t * @param[in] lower_label_range: the lower bound of the range of labels * @param[in] upper_label_range: the upper bound of the range of labels * @return the cluster completeness score */ template <typename value_t, typename idx_t> double completeness_score(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> truth_cluster_array, raft::device_vector_view<const value_t, idx_t> pred_cluster_array, value_t lower_label_range, value_t upper_label_range) { RAFT_EXPECTS(truth_cluster_array.size() == pred_cluster_array.size(), "Size mismatch"); RAFT_EXPECTS(truth_cluster_array.is_exhaustive(), "truth_cluster_array must be contiguous"); RAFT_EXPECTS(pred_cluster_array.is_exhaustive(), "pred_cluster_array must be contiguous"); return detail::homogeneity_score(pred_cluster_array.data_handle(), truth_cluster_array.data_handle(), truth_cluster_array.extent(0), lower_label_range, upper_label_range, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_completeness }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/r2_score.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __R2_SCORE_H #define __R2_SCORE_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/scores.cuh> namespace raft { namespace stats { /** * Calculates the "Coefficient of Determination" (R-Squared) score * normalizing the sum of squared errors by the total sum of squares. * * This score indicates the proportionate amount of variation in an * expected response variable is explained by the independent variables * in a linear regression model. The larger the R-squared value, the * more variability is explained by the linear regression model. * * @param y: Array of ground-truth response variables * @param y_hat: Array of predicted response variables * @param n: Number of elements in y and y_hat * @param stream: cuda stream * @return: The R-squared value. */ template <typename math_t> math_t r2_score(math_t* y, math_t* y_hat, int n, cudaStream_t stream) { return detail::r2_score(y, y_hat, n, stream); } /** * @defgroup stats_r2_score Regression R2 Score * @{ */ /** * Calculates the "Coefficient of Determination" (R-Squared) score * normalizing the sum of squared errors by the total sum of squares. * * This score indicates the proportionate amount of variation in an * expected response variable is explained by the independent variables * in a linear regression model. The larger the R-squared value, the * more variability is explained by the linear regression model. * * @tparam value_t the data type * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] y: Array of ground-truth response variables * @param[in] y_hat: Array of predicted response variables * @return: The R-squared value. * @note The constness of y and y_hat is currently casted away. */ template <typename value_t, typename idx_t> value_t r2_score(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> y, raft::device_vector_view<const value_t, idx_t> y_hat) { RAFT_EXPECTS(y.extent(0) == y_hat.extent(0), "Size mismatch between y and y_hat"); RAFT_EXPECTS(y.is_exhaustive(), "y must be contiguous"); RAFT_EXPECTS(y_hat.is_exhaustive(), "y_hat must be contiguous"); // TODO: Change the underlying implementation to remove the need to const_cast return detail::r2_score(const_cast<value_t*>(y.data_handle()), const_cast<value_t*>(y_hat.data_handle()), y.extent(0), resource::get_cuda_stream(handle)); } /** @} */ // end group stats_r2_score } // namespace stats } // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/adjusted_rand_index.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file adjusted_rand_index.cuh * @brief The adjusted Rand index is the corrected-for-chance version of the Rand index. * Such a correction for chance establishes a baseline by using the expected similarity * of all pair-wise comparisons between clusterings specified by a random model. */ #ifndef __ADJUSTED_RAND_INDEX_H #define __ADJUSTED_RAND_INDEX_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/adjusted_rand_index.cuh> namespace raft { namespace stats { /** * @brief Function to calculate Adjusted RandIndex * @see https://en.wikipedia.org/wiki/Rand_index * @tparam T data-type for input label arrays * @tparam MathT integral data-type used for computing n-choose-r * @param firstClusterArray: the array of classes * @param secondClusterArray: the array of classes * @param size: the size of the data points of type int * @param stream: the cudaStream object */ template <typename T, typename MathT = int> double adjusted_rand_index(const T* firstClusterArray, const T* secondClusterArray, int size, cudaStream_t stream) { return detail::compute_adjusted_rand_index(firstClusterArray, secondClusterArray, size, stream); } /** * @defgroup stats_adj_rand_index Adjusted Rand Index * @{ */ /** * @brief Function to calculate Adjusted RandIndex * @see https://en.wikipedia.org/wiki/Rand_index * @tparam value_t data-type for input label arrays * @tparam math_t integral data-type used for computing n-choose-r * @tparam idx_t Index type of matrix extent. * @param[in] handle: the raft handle. * @param[in] first_cluster_array: the array of classes * @param[in] second_cluster_array: the array of classes * @return the Adjusted RandIndex */ template <typename value_t, typename math_t, typename idx_t> double adjusted_rand_index(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> first_cluster_array, raft::device_vector_view<const value_t, idx_t> second_cluster_array) { RAFT_EXPECTS(first_cluster_array.size() == second_cluster_array.size(), "Size mismatch"); RAFT_EXPECTS(first_cluster_array.is_exhaustive(), "first_cluster_array must be contiguous"); RAFT_EXPECTS(second_cluster_array.is_exhaustive(), "second_cluster_array must be contiguous"); return detail::compute_adjusted_rand_index<value_t, math_t>(first_cluster_array.data_handle(), second_cluster_array.data_handle(), first_cluster_array.extent(0), resource::get_cuda_stream(handle)); } /** @} */ // end group stats_adj_rand_index }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/kl_divergence.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __KL_DIVERGENCE_H #define __KL_DIVERGENCE_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/kl_divergence.cuh> namespace raft { namespace stats { /** * @brief Function to calculate KL Divergence * <a href="https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence">more info on KL * Divergence</a> * * @tparam DataT: Data type of the input array * @param modelPDF: the model array of probability density functions of type DataT * @param candidatePDF: the candidate array of probability density functions of type DataT * @param size: the size of the data points of type int * @param stream: the cudaStream object */ template <typename DataT> DataT kl_divergence(const DataT* modelPDF, const DataT* candidatePDF, int size, cudaStream_t stream) { return detail::kl_divergence(modelPDF, candidatePDF, size, stream); } /** * @defgroup kl_divergence Kullback-Leibler Divergence * @{ */ /** * @brief Function to calculate KL Divergence * <a href="https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence">more info on KL * Divergence</a> * * @tparam value_t: Data type of the input array * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] modelPDF: the model array of probability density functions of type value_t * @param[in] candidatePDF: the candidate array of probability density functions of type value_t * @return the KL Divergence value */ template <typename value_t, typename idx_t> value_t kl_divergence(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> modelPDF, raft::device_vector_view<const value_t, idx_t> candidatePDF) { RAFT_EXPECTS(modelPDF.size() == candidatePDF.size(), "Size mismatch"); RAFT_EXPECTS(modelPDF.is_exhaustive(), "modelPDF must be contiguous"); RAFT_EXPECTS(candidatePDF.is_exhaustive(), "candidatePDF must be contiguous"); return detail::kl_divergence(modelPDF.data_handle(), candidatePDF.data_handle(), modelPDF.extent(0), resource::get_cuda_stream(handle)); } /** @} */ // end group kl_divergence }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/histogram.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HISTOGRAM_H #define __HISTOGRAM_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/histogram.cuh> #include <raft/stats/stats_types.hpp> // This file is a shameless amalgamation of independent works done by // Lars Nyland and Andy Adinets ///@todo: add cub's histogram as another option namespace raft { namespace stats { /** * Default mapper which just returns the value of the data itself */ template <typename DataT, typename IdxT> struct IdentityBinner : public detail::IdentityBinner<DataT, IdxT> { IdentityBinner() : detail::IdentityBinner<DataT, IdxT>() {} }; /** * @brief Perform histogram on the input data. It chooses the right load size * based on the input data vector length. It also supports large-bin cases * using a specialized smem-based hashing technique. * @tparam DataT input data type * @tparam IdxT data type used to compute indices * @tparam BinnerOp takes the input data and computes its bin index * @param type histogram implementation type to choose * @param bins the output bins (length = ncols * nbins) * @param nbins number of bins * @param data input data (length = ncols * nrows) * @param nrows data array length in each column (or batch) * @param ncols number of columns (or batch size) * @param stream cuda stream * @param binner the operation that computes the bin index of the input data * * @note signature of BinnerOp is `int func(DataT, IdxT);` */ template <typename DataT, typename IdxT = int, typename BinnerOp = IdentityBinner<DataT, IdxT>> void histogram(HistType type, int* bins, IdxT nbins, const DataT* data, IdxT nrows, IdxT ncols, cudaStream_t stream, BinnerOp binner = IdentityBinner<DataT, IdxT>()) { detail::histogram<DataT, IdxT, BinnerOp>(type, bins, nbins, data, nrows, ncols, stream, binner); } /** * @defgroup stats_histogram Histogram * @{ */ /** * @brief Perform histogram on the input data. It chooses the right load size * based on the input data vector length. It also supports large-bin cases * using a specialized smem-based hashing technique. * @tparam value_t input data type * @tparam idx_t data type used to compute indices * @tparam binner_op takes the input data and computes its bin index * @param[in] handle the raft handle * @param[in] type histogram implementation type to choose * @param[in] data input data col-major (length = nrows * ncols) * @param[out] bins the output bins col-major (length = nbins * ncols) * @param[in] binner the operation that computes the bin index of the input data * * @note signature of binner_op is `int func(value_t, IdxT);` */ template <typename value_t, typename idx_t, typename binner_op = IdentityBinner<value_t, idx_t>> void histogram(raft::resources const& handle, HistType type, raft::device_matrix_view<const value_t, idx_t, raft::col_major> data, raft::device_matrix_view<int, idx_t, raft::col_major> bins, binner_op binner = IdentityBinner<value_t, idx_t>()) { RAFT_EXPECTS(std::is_integral_v<idx_t> && data.extent(0) <= std::numeric_limits<int>::max(), "Index type not supported"); RAFT_EXPECTS(bins.extent(1) == data.extent(1), "Size mismatch"); RAFT_EXPECTS(bins.is_exhaustive(), "bins must be contiguous"); RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous"); detail::histogram<value_t, idx_t, binner_op>(type, bins.data_handle(), bins.extent(0), data.data_handle(), data.extent(0), data.extent(1), resource::get_cuda_stream(handle), binner); } /** @} */ // end group stats_histogram }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/weighted_mean.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __WEIGHTED_MEAN_H #define __WEIGHTED_MEAN_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/weighted_mean.cuh> namespace raft { namespace stats { /** * @brief Compute the weighted mean of the input matrix with a * vector of weights, along rows or along columns * * @tparam Type the data type * @tparam IdxType Integer type used to for addressing * @param mu the output mean vector * @param data the input matrix * @param weights weight of size D if along_row is true, else of size N * @param D number of columns of data * @param N number of rows of data * @param row_major data input matrix is row-major or not * @param along_rows whether to reduce along rows or columns * @param stream cuda stream to launch work on */ template <typename Type, typename IdxType = int> void weightedMean(Type* mu, const Type* data, const Type* weights, IdxType D, IdxType N, bool row_major, bool along_rows, cudaStream_t stream) { detail::weightedMean(mu, data, weights, D, N, row_major, along_rows, stream); } /** * @brief Compute the row-wise weighted mean of the input matrix with a * vector of column weights * * @tparam Type the data type * @tparam IdxType Integer type used to for addressing * @param mu the output mean vector * @param data the input matrix (assumed to be row-major) * @param weights per-column means * @param D number of columns of data * @param N number of rows of data * @param stream cuda stream to launch work on */ template <typename Type, typename IdxType = int> void rowWeightedMean( Type* mu, const Type* data, const Type* weights, IdxType D, IdxType N, cudaStream_t stream) { weightedMean(mu, data, weights, D, N, true, true, stream); } /** * @brief Compute the column-wise weighted mean of the input matrix with a * vector of row weights * * @tparam Type the data type * @tparam IdxType Integer type used to for addressing * @param mu the output mean vector * @param data the input matrix (assumed to be row-major) * @param weights per-row means * @param D number of columns of data * @param N number of rows of data * @param stream cuda stream to launch work on */ template <typename Type, typename IdxType = int> void colWeightedMean( Type* mu, const Type* data, const Type* weights, IdxType D, IdxType N, cudaStream_t stream) { weightedMean(mu, data, weights, D, N, true, false, stream); } /** * @defgroup stats_weighted_mean Weighted Mean * @{ */ /** * @brief Compute the weighted mean of the input matrix with a * vector of weights, along rows or along columns * * @tparam value_t the data type * @tparam idx_t Integer type used to for addressing * @tparam layout_t Layout type of the input matrix. * @param[in] handle the raft handle * @param[in] data the input matrix of size nrows * ncols * @param[in] weights weight of size ncols if along_row is true, else of size nrows * @param[out] mu the output mean vector of size nrows if along_row is true, else of size ncols * @param[in] along_rows whether to reduce along rows or columns */ template <typename value_t, typename idx_t, typename layout_t> void weighted_mean(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> data, raft::device_vector_view<const value_t, idx_t> weights, raft::device_vector_view<value_t, idx_t> mu, bool along_rows) { constexpr bool is_row_major = std::is_same_v<layout_t, raft::row_major>; constexpr bool is_col_major = std::is_same_v<layout_t, raft::col_major>; static_assert(is_row_major || is_col_major, "weighted_mean: Layout must be either " "raft::row_major or raft::col_major (or one of their aliases)"); auto mean_vec_size = along_rows ? data.extent(0) : data.extent(1); auto weight_size = along_rows ? data.extent(1) : data.extent(0); RAFT_EXPECTS(weights.extent(0) == weight_size, "Size mismatch between weights and expected weight_size"); RAFT_EXPECTS(mu.extent(0) == mean_vec_size, "Size mismatch between mu and expected mean_vec_size"); detail::weightedMean(mu.data_handle(), data.data_handle(), weights.data_handle(), data.extent(1), data.extent(0), is_row_major, along_rows, resource::get_cuda_stream(handle)); } /** * @brief Compute the row-wise weighted mean of the input matrix with a * vector of column weights * * @tparam value_t the data type * @tparam idx_t Integer type used to for addressing * @tparam layout_t Layout type of the input matrix. * @param[in] handle the raft handle * @param[in] data the input matrix of size nrows * ncols * @param[in] weights weight vector of size ncols * @param[out] mu the output mean vector of size nrows */ template <typename value_t, typename idx_t, typename layout_t> void row_weighted_mean(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> data, raft::device_vector_view<const value_t, idx_t> weights, raft::device_vector_view<value_t, idx_t> mu) { weighted_mean(handle, data, weights, mu, true); } /** * @brief Compute the column-wise weighted mean of the input matrix with a * vector of row weights * * @tparam value_t the data type * @tparam idx_t Integer type used to for addressing * @tparam layout_t Layout type of the input matrix. * @param[in] handle the raft handle * @param[in] data the input matrix of size nrows * ncols * @param[in] weights weight vector of size nrows * @param[out] mu the output mean vector of size ncols */ template <typename value_t, typename idx_t, typename layout_t> void col_weighted_mean(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> data, raft::device_vector_view<const value_t, idx_t> weights, raft::device_vector_view<value_t, idx_t> mu) { weighted_mean(handle, data, weights, mu, false); } /** @} */ // end group stats_weighted_mean }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/homogeneity_score.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HOMOGENEITY_SCORE_H #define __HOMOGENEITY_SCORE_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/homogeneity_score.cuh> namespace raft { namespace stats { /** * @brief Function to calculate the homogeneity score between two clusters * <a href="https://en.wikipedia.org/wiki/Homogeneity_(statistics)">more info on mutual * information</a> * @param truthClusterArray: the array of truth classes of type T * @param predClusterArray: the array of predicted classes of type T * @param size: the size of the data points of type int * @param lowerLabelRange: the lower bound of the range of labels * @param upperLabelRange: the upper bound of the range of labels * @param stream: the cudaStream object */ template <typename T> double homogeneity_score(const T* truthClusterArray, const T* predClusterArray, int size, T lowerLabelRange, T upperLabelRange, cudaStream_t stream) { return detail::homogeneity_score( truthClusterArray, predClusterArray, size, lowerLabelRange, upperLabelRange, stream); } /** * @defgroup stats_homogeneity_score Homogeneity Score * @{ */ /** * @brief Function to calculate the homogeneity score between two clusters * <a href="https://en.wikipedia.org/wiki/Homogeneity_(statistics)">more info on mutual * information</a> * * @tparam value_t data type * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] truth_cluster_array: the array of truth classes of type value_t * @param[in] pred_cluster_array: the array of predicted classes of type value_t * @param[in] lower_label_range: the lower bound of the range of labels * @param[in] upper_label_range: the upper bound of the range of labels * @return the homogeneity score */ template <typename value_t, typename idx_t> double homogeneity_score(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> truth_cluster_array, raft::device_vector_view<const value_t, idx_t> pred_cluster_array, value_t lower_label_range, value_t upper_label_range) { RAFT_EXPECTS(truth_cluster_array.size() == pred_cluster_array.size(), "Size mismatch"); RAFT_EXPECTS(truth_cluster_array.is_exhaustive(), "truth_cluster_array must be contiguous"); RAFT_EXPECTS(pred_cluster_array.is_exhaustive(), "pred_cluster_array must be contiguous"); return detail::homogeneity_score(truth_cluster_array.data_handle(), pred_cluster_array.data_handle(), truth_cluster_array.extent(0), lower_label_range, upper_label_range, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_homogeneity_score }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/information_criterion.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file information_criterion.cuh * @brief These information criteria are used to evaluate the quality of models * by balancing the quality of the fit and the number of parameters. * * See: * - AIC: https://en.wikipedia.org/wiki/Akaike_information_criterion * - AICc: https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc * - BIC: https://en.wikipedia.org/wiki/Bayesian_information_criterion */ #ifndef __INFORMATION_CRIT_H #define __INFORMATION_CRIT_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/stats/detail/batched/information_criterion.cuh> #include <raft/stats/stats_types.hpp> namespace raft { namespace stats { /** * Compute the given type of information criterion * * @note: it is safe to do the computation in-place (i.e give same pointer * as input and output) * * @param[out] d_ic Information criterion to be returned for each * series (device) * @param[in] d_loglikelihood Log-likelihood for each series (device) * @param[in] ic_type Type of criterion to compute. See IC_Type * @param[in] n_params Number of parameters in the model * @param[in] batch_size Number of series in the batch * @param[in] n_samples Number of samples in each series * @param[in] stream CUDA stream */ template <typename ScalarT, typename IdxT> void information_criterion_batched(ScalarT* d_ic, const ScalarT* d_loglikelihood, IC_Type ic_type, IdxT n_params, IdxT batch_size, IdxT n_samples, cudaStream_t stream) { batched::detail::information_criterion( d_ic, d_loglikelihood, ic_type, n_params, batch_size, n_samples, stream); } /** * @defgroup stats_information_criterion Information Criterion * @{ */ /** * Compute the given type of information criterion * * @note: it is safe to do the computation in-place (i.e give same pointer * as input and output) * See: * - AIC: https://en.wikipedia.org/wiki/Akaike_information_criterion * - AICc: https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc * - BIC: https://en.wikipedia.org/wiki/Bayesian_information_criterion * * @tparam value_t data type * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] d_loglikelihood Log-likelihood for each series (device) length: batch_size * @param[out] d_ic Information criterion to be returned for each * series (device) length: batch_size * @param[in] ic_type Type of criterion to compute. See IC_Type * @param[in] n_params Number of parameters in the model * @param[in] n_samples Number of samples in each series */ template <typename value_t, typename idx_t> void information_criterion_batched(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> d_loglikelihood, raft::device_vector_view<value_t, idx_t> d_ic, IC_Type ic_type, idx_t n_params, idx_t n_samples) { RAFT_EXPECTS(d_ic.size() == d_loglikelihood.size(), "Size mismatch"); RAFT_EXPECTS(d_ic.is_exhaustive(), "d_ic must be contiguous"); RAFT_EXPECTS(d_loglikelihood.is_exhaustive(), "d_loglikelihood must be contiguous"); batched::detail::information_criterion(d_ic.data_handle(), d_loglikelihood.data_handle(), ic_type, n_params, d_ic.extent(0), n_samples, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_information_criterion } // namespace stats } // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/stddev.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __STDDEV_H #define __STDDEV_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/stats/detail/stddev.cuh> namespace raft { namespace stats { /** * @brief Compute stddev of the input matrix * * Stddev operation is assumed to be performed on a given column. * * @tparam Type the data type * @tparam IdxType Integer type used to for addressing * @param std the output stddev vector * @param data the input matrix * @param mu the mean vector * @param D number of columns of data * @param N number of rows of data * @param sample whether to evaluate sample stddev or not. In other words, * whether * to normalize the output using N-1 or N, for true or false, respectively * @param rowMajor whether the input data is row or col major * @param stream cuda stream where to launch work */ template <typename Type, typename IdxType = int> void stddev(Type* std, const Type* data, const Type* mu, IdxType D, IdxType N, bool sample, bool rowMajor, cudaStream_t stream) { detail::stddev(std, data, mu, D, N, sample, rowMajor, stream); } /** * @brief Compute variance of the input matrix * * Variance operation is assumed to be performed on a given column. * * @tparam Type the data type * @tparam IdxType Integer type used to for addressing * @param var the output stddev vector * @param data the input matrix * @param mu the mean vector * @param D number of columns of data * @param N number of rows of data * @param sample whether to evaluate sample stddev or not. In other words, * whether * to normalize the output using N-1 or N, for true or false, respectively * @param rowMajor whether the input data is row or col major * @param stream cuda stream where to launch work */ template <typename Type, typename IdxType = int> void vars(Type* var, const Type* data, const Type* mu, IdxType D, IdxType N, bool sample, bool rowMajor, cudaStream_t stream) { detail::vars(var, data, mu, D, N, sample, rowMajor, stream); } /** * @defgroup stats_stddev Standard Deviation * @{ */ /** * @brief Compute stddev of the input matrix * * Stddev operation is assumed to be performed on a given column. * * @tparam value_t the data type * @tparam idx_t Integer type used to for addressing * @tparam layout_t Layout type of the input matrix. * @param[in] handle the raft handle * @param[in] data the input matrix * @param[in] mu the mean vector * @param[out] std the output stddev vector * @param[in] sample whether to evaluate sample stddev or not. In other words, * whether * to normalize the output using N-1 or N, for true or false, respectively */ template <typename value_t, typename idx_t, typename layout_t> void stddev(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> data, raft::device_vector_view<const value_t, idx_t> mu, raft::device_vector_view<value_t, idx_t> std, bool sample) { constexpr bool is_row_major = std::is_same_v<layout_t, raft::row_major>; constexpr bool is_col_major = std::is_same_v<layout_t, raft::col_major>; static_assert(is_row_major || is_col_major, "stddev: Layout must be either " "raft::row_major or raft::col_major (or one of their aliases)"); RAFT_EXPECTS(mu.size() == std.size(), "Size mismatch between mu and std"); RAFT_EXPECTS(mu.extent(0) == data.extent(1), "Size mismatch between data and mu"); detail::stddev(std.data_handle(), data.data_handle(), mu.data_handle(), data.extent(1), data.extent(0), sample, is_row_major, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_stddev /** * @defgroup stats_variance Variance * @{ */ /** * @brief Compute variance of the input matrix * * Variance operation is assumed to be performed on a given column. * * @tparam value_t the data type * @tparam idx_t Integer type used to for addressing * @tparam layout_t Layout type of the input matrix. * @param[in] handle the raft handle * @param[in] data the input matrix * @param[in] mu the mean vector * @param[out] var the output stddev vector * @param[in] sample whether to evaluate sample stddev or not. In other words, * whether * to normalize the output using N-1 or N, for true or false, respectively */ template <typename value_t, typename idx_t, typename layout_t> void vars(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> data, raft::device_vector_view<const value_t, idx_t> mu, raft::device_vector_view<value_t, idx_t> var, bool sample) { constexpr bool is_row_major = std::is_same_v<layout_t, raft::row_major>; constexpr bool is_col_major = std::is_same_v<layout_t, raft::col_major>; static_assert(is_row_major || is_col_major, "vars: Layout must be either " "raft::row_major or raft::col_major (or one of their aliases)"); RAFT_EXPECTS(mu.size() == var.size(), "Size mismatch between mu and std"); RAFT_EXPECTS(mu.extent(0) == data.extent(1), "Size mismatch between data and mu"); detail::vars(var.data_handle(), data.data_handle(), mu.data_handle(), data.extent(1), data.extent(0), sample, is_row_major, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_variance }; // namespace stats }; // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/specializations.cuh
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #pragma message( \ __FILE__ \ " is deprecated and will be removed." \ " Including specializations is not necessary any more." \ " For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/mean_center.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __MEAN_CENTER_H #define __MEAN_CENTER_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/mean_center.cuh> namespace raft { namespace stats { /** * @brief Center the input matrix wrt its mean * @tparam Type the data type * @tparam IdxType Integer type used to for addressing * @tparam TPB threads per block of the cuda kernel launched * @param out the output mean-centered matrix * @param data input matrix * @param mu the mean vector * @param D number of columns of data * @param N number of rows of data * @param rowMajor whether input is row or col major * @param bcastAlongRows whether to broadcast vector along rows or columns * @param stream cuda stream where to launch work */ template <typename Type, typename IdxType = int, int TPB = 256> void meanCenter(Type* out, const Type* data, const Type* mu, IdxType D, IdxType N, bool rowMajor, bool bcastAlongRows, cudaStream_t stream) { detail::meanCenter<Type, IdxType, TPB>(out, data, mu, D, N, rowMajor, bcastAlongRows, stream); } /** * @brief Add the input matrix wrt its mean * @tparam Type the data type * @tparam IdxType Integer type used to for addressing * @tparam TPB threads per block of the cuda kernel launched * @param out the output mean-added matrix * @param data input matrix * @param mu the mean vector * @param D number of columns of data * @param N number of rows of data * @param rowMajor whether input is row or col major * @param bcastAlongRows whether to broadcast vector along rows or columns * @param stream cuda stream where to launch work */ template <typename Type, typename IdxType = int, int TPB = 256> void meanAdd(Type* out, const Type* data, const Type* mu, IdxType D, IdxType N, bool rowMajor, bool bcastAlongRows, cudaStream_t stream) { detail::meanAdd<Type, IdxType, TPB>(out, data, mu, D, N, rowMajor, bcastAlongRows, stream); } /** * @defgroup stats_mean_center Mean Center * @{ */ /** * @brief Center the input matrix wrt its mean * @tparam value_t the data type * @tparam idx_t index type * @tparam layout_t Layout type of the input matrix. * @param[in] handle the raft handle * @param[in] data input matrix of size nrows * ncols * @param[in] mu the mean vector of size ncols if bcast_along_rows else nrows * @param[out] out the output mean-centered matrix * @param[in] bcast_along_rows whether to broadcast vector along rows or columns */ template <typename value_t, typename idx_t, typename layout_t> void mean_center(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> data, raft::device_vector_view<const value_t, idx_t> mu, raft::device_matrix_view<value_t, idx_t, layout_t> out, bool bcast_along_rows) { static_assert( std::is_same_v<layout_t, raft::row_major> || std::is_same_v<layout_t, raft::col_major>, "Data layout not supported"); auto mean_vec_size = bcast_along_rows ? data.extent(1) : data.extent(0); RAFT_EXPECTS(out.extents() == data.extents(), "Size mismatch"); RAFT_EXPECTS(mean_vec_size == mu.extent(0), "Size mismatch between data and mu"); RAFT_EXPECTS(out.is_exhaustive(), "out must be contiguous"); RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous"); detail::meanCenter<value_t, idx_t>(out.data_handle(), data.data_handle(), mu.data_handle(), data.extent(1), data.extent(0), std::is_same_v<layout_t, raft::row_major>, bcast_along_rows, resource::get_cuda_stream(handle)); } /** * @brief Add the input matrix wrt its mean * @tparam Type the data type * @tparam idx_t index type * @tparam layout_t Layout type of the input matrix. * @tparam TPB threads per block of the cuda kernel launched * @param[in] handle the raft handle * @param[in] data input matrix of size nrows * ncols * @param[in] mu the mean vector of size ncols if bcast_along_rows else nrows * @param[out] out the output mean-centered matrix * @param[in] bcast_along_rows whether to broadcast vector along rows or columns */ template <typename value_t, typename idx_t, typename layout_t> void mean_add(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> data, raft::device_vector_view<const value_t, idx_t> mu, raft::device_matrix_view<value_t, idx_t, layout_t> out, bool bcast_along_rows) { static_assert( std::is_same_v<layout_t, raft::row_major> || std::is_same_v<layout_t, raft::col_major>, "Data layout not supported"); auto mean_vec_size = bcast_along_rows ? data.extent(1) : data.extent(0); RAFT_EXPECTS(out.extents() == data.extents(), "Size mismatch"); RAFT_EXPECTS(mean_vec_size == mu.extent(0), "Size mismatch between data and mu"); RAFT_EXPECTS(out.is_exhaustive(), "out must be contiguous"); RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous"); detail::meanAdd<value_t, idx_t>(out.data_handle(), data.data_handle(), mu.data_handle(), data.extent(1), data.extent(0), std::is_same_v<layout_t, raft::row_major>, bcast_along_rows, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_mean_center }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/entropy.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __ENTROPY_H #define __ENTROPY_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/entropy.cuh> namespace raft { namespace stats { /** * @brief Function to calculate entropy * <a href="https://en.wikipedia.org/wiki/Entropy_(information_theory)">more info on entropy</a> * * @tparam T data type * @param clusterArray: the array of classes of type T * @param size: the size of the data points of type int * @param lowerLabelRange: the lower bound of the range of labels * @param upperLabelRange: the upper bound of the range of labels * @param stream: the cudaStream object * @return the entropy score */ template <typename T> double entropy(const T* clusterArray, const int size, const T lowerLabelRange, const T upperLabelRange, cudaStream_t stream) { return detail::entropy(clusterArray, size, lowerLabelRange, upperLabelRange, stream); } /** * @defgroup stats_entropy Entropy * @{ */ /** * @brief Function to calculate entropy * <a href="https://en.wikipedia.org/wiki/Entropy_(information_theory)">more info on entropy</a> * * @tparam value_t data type * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] cluster_array: the array of classes of type value_t * @param[in] lower_label_range: the lower bound of the range of labels * @param[in] upper_label_range: the upper bound of the range of labels * @return the entropy score */ template <typename value_t, typename idx_t> double entropy(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> cluster_array, const value_t lower_label_range, const value_t upper_label_range) { RAFT_EXPECTS(cluster_array.is_exhaustive(), "cluster_array must be contiguous"); return detail::entropy(cluster_array.data_handle(), cluster_array.extent(0), lower_label_range, upper_label_range, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_entropy }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/neighborhood_recall.cuh
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "detail/neighborhood_recall.cuh" #include <raft/core/device_mdarray.hpp> #include <raft/core/device_mdspan.hpp> #include <raft/core/error.hpp> #include <raft/core/host_mdarray.hpp> #include <raft/core/host_mdspan.hpp> #include <raft/core/mdspan_types.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <optional> namespace raft::stats { /** * @defgroup stats_neighborhood_recall Neighborhood Recall Score * @{ */ /** * @brief Calculate Neighborhood Recall score on the device for indices, distances computed by any * Nearest Neighbors Algorithm against reference indices, distances. Recall score is calculated by * comparing the total number of matching indices and dividing that value by the total size of the * indices matrix of dimensions (D, k). If distance matrices are provided, then non-matching indices * could be considered a match if abs(dist, ref_dist) < eps. * * Usage example: * @code{.cpp} * raft::device_resources res; * // assume D rows and N column dataset * auto k = 64; * auto indices = raft::make_device_matrix<int>(res, D, k); * auto distances = raft::make_device_matrix<float>(res, D, k); * // run ANN algorithm of choice * * auto ref_indices = raft::make_device_matrix<int>(res, D, k); * auto ref_distances = raft::make_device_matrix<float>(res, D, k); * // run brute-force KNN for reference * * auto scalar = 0.0f; * auto recall_score = raft::make_device_scalar(res, scalar); * * raft::stats::neighborhood_recall(res, raft::make_const_mdspan(indices.view()), raft::make_const_mdspan(ref_indices.view()), recall_score.view(), raft::make_const_mdspan(distances.view()), raft::make_const_mdspan(ref_distances.view())); * @endcode * * @tparam IndicesValueType data-type of the indices * @tparam IndexType data-type to index all matrices * @tparam ScalarType data-type to store recall score * @tparam DistanceValueType data-type of the distances * @param res raft::resources object to manage resources * @param[in] indices raft::device_matrix_view indices of neighbors * @param[in] ref_indices raft::device_matrix_view reference indices of neighbors * @param[out] recall_score raft::device_scalar_view output recall score * @param[in] distances (optional) raft::device_matrix_view distances of neighbors * @param[in] ref_distances (optional) raft::device_matrix_view reference distances of neighbors * @param[in] eps (optional, default = 0.001) value within which distances are considered matching */ template <typename IndicesValueType, typename IndexType, typename ScalarType, typename DistanceValueType = float> void neighborhood_recall( raft::resources const& res, raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> indices, raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> ref_indices, raft::device_scalar_view<ScalarType> recall_score, std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>> distances = std::nullopt, std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>> ref_distances = std::nullopt, std::optional<raft::host_scalar_view<const DistanceValueType>> eps = std::nullopt) { RAFT_EXPECTS(indices.extent(0) == ref_indices.extent(0), "The number of rows in indices and reference indices should be equal"); RAFT_EXPECTS(indices.extent(1) == ref_indices.extent(1), "The number of columns in indices and reference indices should be equal"); if (distances.has_value() or ref_distances.has_value()) { RAFT_EXPECTS(distances.has_value() and ref_distances.has_value(), "Both distances and reference distances should have values"); RAFT_EXPECTS(distances.value().extent(0) == ref_distances.value().extent(0), "The number of rows in distances and reference distances should be equal"); RAFT_EXPECTS(distances.value().extent(1) == ref_distances.value().extent(1), "The number of columns in indices and reference indices should be equal"); RAFT_EXPECTS(indices.extent(0) == distances.value().extent(0), "The number of rows in indices and distances should be equal"); RAFT_EXPECTS(indices.extent(1) == distances.value().extent(1), "The number of columns in indices and distances should be equal"); } DistanceValueType eps_val = 0.001; if (eps.has_value()) { eps_val = *eps.value().data_handle(); } detail::neighborhood_recall( res, indices, ref_indices, distances, ref_distances, recall_score, eps_val); } /** * @brief Calculate Neighborhood Recall score on the host for indices, distances computed by any * Nearest Neighbors Algorithm against reference indices, distances. Recall score is calculated by * comparing the total number of matching indices and dividing that value by the total size of the * indices matrix of dimensions (D, k). If distance matrices are provided, then non-matching indices * could be considered a match if abs(dist, ref_dist) < eps. * * Usage example: * @code{.cpp} * raft::device_resources res; * // assume D rows and N column dataset * auto k = 64; * auto indices = raft::make_device_matrix<int>(res, D, k); * auto distances = raft::make_device_matrix<float>(res, D, k); * // run ANN algorithm of choice * * auto ref_indices = raft::make_device_matrix<int>(res, D, k); * auto ref_distances = raft::make_device_matrix<float>(res, D, k); * // run brute-force KNN for reference * * auto scalar = 0.0f; * auto recall_score = raft::make_host_scalar(scalar); * * raft::stats::neighborhood_recall(res, raft::make_const_mdspan(indices.view()), raft::make_const_mdspan(ref_indices.view()), recall_score.view(), raft::make_const_mdspan(distances.view()), raft::make_const_mdspan(ref_distances.view())); * @endcode * * @tparam IndicesValueType data-type of the indices * @tparam IndexType data-type to index all matrices * @tparam ScalarType data-type to store recall score * @tparam DistanceValueType data-type of the distances * @param res raft::resources object to manage resources * @param[in] indices raft::device_matrix_view indices of neighbors * @param[in] ref_indices raft::device_matrix_view reference indices of neighbors * @param[out] recall_score raft::host_scalar_view output recall score * @param[in] distances (optional) raft::device_matrix_view distances of neighbors * @param[in] ref_distances (optional) raft::device_matrix_view reference distances of neighbors * @param[in] eps (optional, default = 0.001) value within which distances are considered matching */ template <typename IndicesValueType, typename IndexType, typename ScalarType, typename DistanceValueType = float> void neighborhood_recall( raft::resources const& res, raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> indices, raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> ref_indices, raft::host_scalar_view<ScalarType> recall_score, std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>> distances = std::nullopt, std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>> ref_distances = std::nullopt, std::optional<raft::host_scalar_view<const DistanceValueType>> eps = std::nullopt) { auto recall_score_d = raft::make_device_scalar(res, *recall_score.data_handle()); neighborhood_recall( res, indices, ref_indices, recall_score_d.view(), distances, ref_distances, eps); raft::update_host(recall_score.data_handle(), recall_score_d.data_handle(), 1, raft::resource::get_cuda_stream(res)); raft::resource::sync_stream(res); } /** @} */ // end group stats_recall } // end namespace raft::stats
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/regression_metrics.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __REGRESSION_METRICS_H #define __REGRESSION_METRICS_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/host_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/stats/detail/scores.cuh> namespace raft { namespace stats { /** * @brief Compute regression metrics mean absolute error, mean squared error, median absolute error * @tparam T: data type for predictions (e.g., float or double for regression). * @param[in] predictions: array of predictions (GPU pointer). * @param[in] ref_predictions: array of reference (ground-truth) predictions (GPU pointer). * @param[in] n: number of elements in each of predictions, ref_predictions. Should be > 0. * @param[in] stream: cuda stream. * @param[out] mean_abs_error: Mean Absolute Error. Sum over n of (|predictions[i] - * ref_predictions[i]|) / n. * @param[out] mean_squared_error: Mean Squared Error. Sum over n of ((predictions[i] - * ref_predictions[i])^2) / n. * @param[out] median_abs_error: Median Absolute Error. Median of |predictions[i] - * ref_predictions[i]| for i in [0, n). */ template <typename T> void regression_metrics(const T* predictions, const T* ref_predictions, int n, cudaStream_t stream, double& mean_abs_error, double& mean_squared_error, double& median_abs_error) { detail::regression_metrics( predictions, ref_predictions, n, stream, mean_abs_error, mean_squared_error, median_abs_error); } /** * @defgroup stats_regression_metrics Regression Metrics * @{ */ /** * @brief Compute regression metrics mean absolute error, mean squared error, median absolute error * @tparam value_t the data type for predictions (e.g., float or double for regression). * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] predictions: array of predictions. * @param[in] ref_predictions: array of reference (ground-truth) predictions. * @param[out] mean_abs_error: Mean Absolute Error. Sum over n of (|predictions[i] - * ref_predictions[i]|) / n. * @param[out] mean_squared_error: Mean Squared Error. Sum over n of ((predictions[i] - * ref_predictions[i])^2) / n. * @param[out] median_abs_error: Median Absolute Error. Median of |predictions[i] - * ref_predictions[i]| for i in [0, n). */ template <typename value_t, typename idx_t> void regression_metrics(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> predictions, raft::device_vector_view<const value_t, idx_t> ref_predictions, raft::host_scalar_view<double> mean_abs_error, raft::host_scalar_view<double> mean_squared_error, raft::host_scalar_view<double> median_abs_error) { RAFT_EXPECTS(predictions.extent(0) == ref_predictions.extent(0), "Size mismatch between predictions and ref_predictions"); RAFT_EXPECTS(predictions.is_exhaustive(), "predictions must be contiguous"); RAFT_EXPECTS(ref_predictions.is_exhaustive(), "ref_predictions must be contiguous"); RAFT_EXPECTS(mean_abs_error.data_handle() != nullptr, "mean_abs_error view must not be empty"); RAFT_EXPECTS(mean_squared_error.data_handle() != nullptr, "mean_squared_error view must not be empty"); RAFT_EXPECTS(median_abs_error.data_handle() != nullptr, "median_abs_error view must not be empty"); detail::regression_metrics(predictions.data_handle(), ref_predictions.data_handle(), predictions.extent(0), resource::get_cuda_stream(handle), *mean_abs_error.data_handle(), *mean_squared_error.data_handle(), *median_abs_error.data_handle()); } /** @} */ // end group stats_regression_metrics } // namespace stats } // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/silhouette_score.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __SILHOUETTE_SCORE_H #define __SILHOUETTE_SCORE_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/batched/silhouette_score.cuh> #include <raft/stats/detail/silhouette_score.cuh> namespace raft { namespace stats { /** * @brief main function that returns the average silhouette score for a given set of data and its * clusterings * @tparam DataT: type of the data samples * @tparam LabelT: type of the labels * @param handle: raft handle for managing expensive resources * @param X_in: pointer to the input Data samples array (nRows x nCols) * @param nRows: number of data samples * @param nCols: number of features * @param labels: the pointer to the array containing labels for every data sample (1 x nRows) * @param nLabels: number of Labels * @param silhouette_scorePerSample: pointer to the array that is optionally taken in as input and * is populated with the silhouette score for every sample (1 x nRows) * @param stream: the cuda stream where to launch this kernel * @param metric: the numerical value that maps to the type of distance metric to be used in the * calculations */ template <typename DataT, typename LabelT> DataT silhouette_score( raft::resources const& handle, DataT* X_in, int nRows, int nCols, LabelT* labels, int nLabels, DataT* silhouette_scorePerSample, cudaStream_t stream, raft::distance::DistanceType metric = raft::distance::DistanceType::L2Unexpanded) { return detail::silhouette_score( handle, X_in, nRows, nCols, labels, nLabels, silhouette_scorePerSample, stream, metric); } template <typename value_t, typename value_idx, typename label_idx> value_t silhouette_score_batched( raft::resources const& handle, value_t* X, value_idx n_rows, value_idx n_cols, label_idx* y, label_idx n_labels, value_t* scores, value_idx chunk, raft::distance::DistanceType metric = raft::distance::DistanceType::L2Unexpanded) { return batched::detail::silhouette_score( handle, X, n_rows, n_cols, y, n_labels, scores, chunk, metric); } /** * @defgroup stats_silhouette_score Silhouette Score * @{ */ /** * @brief main function that returns the average silhouette score for a given set of data and its * clusterings * @tparam value_t: type of the data samples * @tparam label_t: type of the labels * @tparam idx_t index type * @param[in] handle: raft handle for managing expensive resources * @param[in] X_in: input matrix Data in row-major format (nRows x nCols) * @param[in] labels: the pointer to the array containing labels for every data sample (length: * nRows) * @param[out] silhouette_score_per_sample: optional array populated with the silhouette score * for every sample (length: nRows) * @param[in] n_unique_labels: number of unique labels in the labels array * @param[in] metric: the numerical value that maps to the type of distance metric to be used in * the calculations * @return: The silhouette score. */ template <typename value_t, typename label_t, typename idx_t> value_t silhouette_score( raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, raft::row_major> X_in, raft::device_vector_view<const label_t, idx_t> labels, std::optional<raft::device_vector_view<value_t, idx_t>> silhouette_score_per_sample, idx_t n_unique_labels, raft::distance::DistanceType metric = raft::distance::DistanceType::L2Unexpanded) { RAFT_EXPECTS(labels.extent(0) == X_in.extent(0), "Size mismatch between labels and data"); value_t* silhouette_score_per_sample_ptr = nullptr; if (silhouette_score_per_sample.has_value()) { silhouette_score_per_sample_ptr = silhouette_score_per_sample.value().data_handle(); RAFT_EXPECTS(silhouette_score_per_sample.value().extent(0) == X_in.extent(0), "Size mismatch between silhouette_score_per_sample and data"); } return detail::silhouette_score(handle, X_in.data_handle(), X_in.extent(0), X_in.extent(1), labels.data_handle(), n_unique_labels, silhouette_score_per_sample_ptr, resource::get_cuda_stream(handle), metric); } /** * @brief function that returns the average silhouette score for a given set of data and its * clusterings * @tparam value_t: type of the data samples * @tparam label_t: type of the labels * @tparam idx_t index type * @param[in] handle: raft handle for managing expensive resources * @param[in] X: input matrix Data in row-major format (nRows x nCols) * @param[in] labels: the pointer to the array containing labels for every data sample (length: * nRows) * @param[out] silhouette_score_per_sample: optional array populated with the silhouette score * for every sample (length: nRows) * @param[in] n_unique_labels: number of unique labels in the labels array * @param[in] batch_size: number of samples per batch * @param[in] metric: the numerical value that maps to the type of distance metric to be used in * the calculations * @return: The silhouette score. */ template <typename value_t, typename label_t, typename idx_t> value_t silhouette_score_batched( raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, raft::row_major> X, raft::device_vector_view<const label_t, idx_t> labels, std::optional<raft::device_vector_view<value_t, idx_t>> silhouette_score_per_sample, idx_t n_unique_labels, idx_t batch_size, raft::distance::DistanceType metric = raft::distance::DistanceType::L2Unexpanded) { static_assert(std::is_integral_v<idx_t>, "silhouette_score_batched: The index type " "of each mdspan argument must be an integral type."); static_assert(std::is_integral_v<label_t>, "silhouette_score_batched: The label type must be an integral type."); RAFT_EXPECTS(labels.extent(0) == X.extent(0), "Size mismatch between labels and data"); value_t* scores_ptr = nullptr; if (silhouette_score_per_sample.has_value()) { scores_ptr = silhouette_score_per_sample.value().data_handle(); RAFT_EXPECTS(silhouette_score_per_sample.value().extent(0) == X.extent(0), "Size mismatch between silhouette_score_per_sample and data"); } return batched::detail::silhouette_score(handle, X.data_handle(), X.extent(0), X.extent(1), labels.data_handle(), n_unique_labels, scores_ptr, batch_size, metric); } /** @} */ // end group stats_silhouette_score /** * @brief Overload of `silhouette_score` to help the * compiler find the above overload, in case users pass in * `std::nullopt` for the optional arguments. * * Please see above for documentation of `silhouette_score`. */ template <typename value_t, typename label_t, typename idx_t> value_t silhouette_score( raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, raft::row_major> X_in, raft::device_vector_view<const label_t, idx_t> labels, std::nullopt_t silhouette_score_per_sample, idx_t n_unique_labels, raft::distance::DistanceType metric = raft::distance::DistanceType::L2Unexpanded) { std::optional<raft::device_vector_view<value_t, idx_t>> opt_scores = silhouette_score_per_sample; return silhouette_score(handle, X_in, labels, opt_scores, n_unique_labels, metric); } /** * @brief Overload of `silhouette_score_batched` to help the * compiler find the above overload, in case users pass in * `std::nullopt` for the optional arguments. * * Please see above for documentation of `silhouette_score_batched`. */ template <typename value_t, typename label_t, typename idx_t> value_t silhouette_score_batched( raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, raft::row_major> X, raft::device_vector_view<const label_t, idx_t> labels, std::nullopt_t silhouette_score_per_sample, idx_t n_unique_labels, idx_t batch_size, raft::distance::DistanceType metric = raft::distance::DistanceType::L2Unexpanded) { std::optional<raft::device_vector_view<value_t, idx_t>> opt_scores = silhouette_score_per_sample; return silhouette_score_batched( handle, X, labels, opt_scores, n_unique_labels, batch_size, metric); } }; // namespace stats }; // namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/dispersion.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __DISPERSION_H #define __DISPERSION_H #pragma once #include <optional> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/dispersion.cuh> namespace raft { namespace stats { /** * @brief Compute cluster dispersion metric. This is very useful for * automatically finding the 'k' (in kmeans) that improves this metric. * @tparam DataT data type * @tparam IdxT index type * @tparam TPB threads block for kernels launched * @param centroids the cluster centroids. This is assumed to be row-major * and of dimension (nClusters x dim) * @param clusterSizes number of points in the dataset which belong to each * cluster. This is of length nClusters * @param globalCentroid compute the global weighted centroid of all cluster * centroids. This is of length dim. Pass a nullptr if this is not needed * @param nClusters number of clusters * @param nPoints number of points in the dataset * @param dim dataset dimensionality * @param stream cuda stream * @return the cluster dispersion value */ template <typename DataT, typename IdxT = int, int TPB = 256> DataT dispersion(const DataT* centroids, const IdxT* clusterSizes, DataT* globalCentroid, IdxT nClusters, IdxT nPoints, IdxT dim, cudaStream_t stream) { return detail::dispersion<DataT, IdxT, TPB>( centroids, clusterSizes, globalCentroid, nClusters, nPoints, dim, stream); } /** * @defgroup stats_cluster_dispersion Cluster Dispersion Metric * @{ */ /** * @brief Compute cluster dispersion metric. This is very useful for * automatically finding the 'k' (in kmeans) that improves this metric. * The cluster dispersion metric is defined as the square root of the sum of the * squared distances between the cluster centroids and the global centroid * @tparam value_t data type * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] centroids the cluster centroids. This is assumed to be row-major * and of dimension (n_clusters x dim) * @param[in] cluster_sizes number of points in the dataset which belong to each * cluster. This is of length n_clusters * @param[out] global_centroid compute the global weighted centroid of all cluster * centroids. This is of length dim. Use std::nullopt to not return it. * @param[in] n_points number of points in the dataset * @return the cluster dispersion value */ template <typename value_t, typename idx_t> value_t cluster_dispersion( raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, raft::row_major> centroids, raft::device_vector_view<const idx_t, idx_t> cluster_sizes, std::optional<raft::device_vector_view<value_t, idx_t>> global_centroid, const idx_t n_points) { RAFT_EXPECTS(cluster_sizes.extent(0) == centroids.extent(0), "Size mismatch"); RAFT_EXPECTS(cluster_sizes.is_exhaustive(), "cluster_sizes must be contiguous"); value_t* global_centroid_ptr = nullptr; if (global_centroid.has_value()) { RAFT_EXPECTS(global_centroid.value().extent(0) == centroids.extent(1), "Size mismatch between global_centroid and centroids"); RAFT_EXPECTS(global_centroid.value().is_exhaustive(), "global_centroid must be contiguous"); global_centroid_ptr = global_centroid.value().data_handle(); } return detail::dispersion<value_t, idx_t>(centroids.data_handle(), cluster_sizes.data_handle(), global_centroid_ptr, centroids.extent(0), n_points, centroids.extent(1), resource::get_cuda_stream(handle)); } /** @} */ // end group stats_cluster_dispersion /** * @brief Overload of `cluster_dispersion` to help the * compiler find the above overload, in case users pass in * `std::nullopt` for the optional arguments. * * Please see above for documentation of `cluster_dispersion`. */ template <typename value_t, typename idx_t> value_t cluster_dispersion( raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, raft::row_major> centroids, raft::device_vector_view<const idx_t, idx_t> cluster_sizes, std::nullopt_t global_centroid, const idx_t n_points) { std::optional<raft::device_vector_view<value_t, idx_t>> opt_centroid = global_centroid; return cluster_dispersion(handle, centroids, cluster_sizes, opt_centroid, n_points); } } // end namespace stats } // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/sum.cuh
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __SUM_H #define __SUM_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/sum.cuh> #include <raft/util/cudart_utils.hpp> namespace raft { namespace stats { /** * @brief Compute sum of the input matrix * * Sum operation is assumed to be performed on a given column. * * @tparam Type the data type * @tparam IdxType Integer type used to for addressing * @param output the output mean vector * @param input the input matrix * @param D number of columns of data * @param N number of rows of data * @param rowMajor whether the input data is row or col major * @param stream cuda stream where to launch work */ template <typename Type, typename IdxType = int> void sum(Type* output, const Type* input, IdxType D, IdxType N, bool rowMajor, cudaStream_t stream) { detail::sum(output, input, D, N, rowMajor, stream); } /** * @defgroup stats_sum Sum * @{ */ /** * @brief Compute sum of the input matrix * * Sum operation is assumed to be performed on a given column. * * @tparam value_t the data type * @tparam idx_t Integer type used to for addressing * @tparam layout_t Layout type of the input matrix. * @param[in] handle the raft handle * @param[in] input the input matrix * @param[out] output the output mean vector */ template <typename value_t, typename idx_t, typename layout_t> void sum(raft::resources const& handle, raft::device_matrix_view<const value_t, idx_t, layout_t> input, raft::device_vector_view<value_t, idx_t> output) { constexpr bool is_row_major = std::is_same_v<layout_t, raft::row_major>; constexpr bool is_col_major = std::is_same_v<layout_t, raft::col_major>; static_assert(is_row_major || is_col_major, "sum: Layout must be either " "raft::row_major or raft::col_major (or one of their aliases)"); RAFT_EXPECTS(input.extent(1) == output.extent(0), "Size mismatch between input and output"); detail::sum(output.data_handle(), input.data_handle(), input.extent(1), input.extent(0), is_row_major, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_sum }; // end namespace stats }; // end namespace raft #endif
0
rapidsai_public_repos/raft/cpp/include/raft
rapidsai_public_repos/raft/cpp/include/raft/stats/mutual_info_score.cuh
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __MUTUAL_INFO_SCORE_H #define __MUTUAL_INFO_SCORE_H #pragma once #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/stats/detail/mutual_info_score.cuh> namespace raft { namespace stats { /** * @brief Function to calculate the mutual information between two clusters * <a href="https://en.wikipedia.org/wiki/Mutual_information">more info on mutual information</a> * @param firstClusterArray: the array of classes of type T * @param secondClusterArray: the array of classes of type T * @param size: the size of the data points of type int * @param lowerLabelRange: the lower bound of the range of labels * @param upperLabelRange: the upper bound of the range of labels * @param stream: the cudaStream object */ template <typename T> double mutual_info_score(const T* firstClusterArray, const T* secondClusterArray, int size, T lowerLabelRange, T upperLabelRange, cudaStream_t stream) { return detail::mutual_info_score( firstClusterArray, secondClusterArray, size, lowerLabelRange, upperLabelRange, stream); } /** * @defgroup stats_mutual_info Mutual Information * @{ */ /** * @brief Function to calculate the mutual information between two clusters * <a href="https://en.wikipedia.org/wiki/Mutual_information">more info on mutual information</a> * @tparam value_t the data type * @tparam idx_t index type * @param[in] handle the raft handle * @param[in] first_cluster_array: the array of classes of type value_t * @param[in] second_cluster_array: the array of classes of type value_t * @param[in] lower_label_range: the lower bound of the range of labels * @param[in] upper_label_range: the upper bound of the range of labels * @return the mutual information score */ template <typename value_t, typename idx_t> double mutual_info_score(raft::resources const& handle, raft::device_vector_view<const value_t, idx_t> first_cluster_array, raft::device_vector_view<const value_t, idx_t> second_cluster_array, value_t lower_label_range, value_t upper_label_range) { RAFT_EXPECTS(first_cluster_array.extent(0) == second_cluster_array.extent(0), "Size mismatch between first_cluster_array and second_cluster_array"); RAFT_EXPECTS(first_cluster_array.is_exhaustive(), "first_cluster_array must be contiguous"); RAFT_EXPECTS(second_cluster_array.is_exhaustive(), "second_cluster_array must be contiguous"); return detail::mutual_info_score(first_cluster_array.data_handle(), second_cluster_array.data_handle(), first_cluster_array.extent(0), lower_label_range, upper_label_range, resource::get_cuda_stream(handle)); } /** @} */ // end group stats_mutual_info }; // end namespace stats }; // end namespace raft #endif
0