repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/kl_divergence.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __KL_DIVERGENCE_H
#define __KL_DIVERGENCE_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/kl_divergence.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Function to calculate KL Divergence
* <a href="https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence">more info on KL
* Divergence</a>
*
* @tparam DataT: Data type of the input array
* @param modelPDF: the model array of probability density functions of type DataT
* @param candidatePDF: the candidate array of probability density functions of type DataT
* @param size: the size of the data points of type int
* @param stream: the cudaStream object
*/
template <typename DataT>
DataT kl_divergence(const DataT* modelPDF, const DataT* candidatePDF, int size, cudaStream_t stream)
{
return detail::kl_divergence(modelPDF, candidatePDF, size, stream);
}
/**
* @defgroup kl_divergence Kullback-Leibler Divergence
* @{
*/
/**
* @brief Function to calculate KL Divergence
* <a href="https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence">more info on KL
* Divergence</a>
*
* @tparam value_t: Data type of the input array
* @tparam idx_t index type
* @param[in] handle the raft handle
* @param[in] modelPDF: the model array of probability density functions of type value_t
* @param[in] candidatePDF: the candidate array of probability density functions of type value_t
* @return the KL Divergence value
*/
template <typename value_t, typename idx_t>
value_t kl_divergence(raft::resources const& handle,
raft::device_vector_view<const value_t, idx_t> modelPDF,
raft::device_vector_view<const value_t, idx_t> candidatePDF)
{
RAFT_EXPECTS(modelPDF.size() == candidatePDF.size(), "Size mismatch");
RAFT_EXPECTS(modelPDF.is_exhaustive(), "modelPDF must be contiguous");
RAFT_EXPECTS(candidatePDF.is_exhaustive(), "candidatePDF must be contiguous");
return detail::kl_divergence(modelPDF.data_handle(),
candidatePDF.data_handle(),
modelPDF.extent(0),
resource::get_cuda_stream(handle));
}
/** @} */ // end group kl_divergence
}; // end namespace stats
}; // namespace cuvs
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/histogram.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __HISTOGRAM_H
#define __HISTOGRAM_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/histogram.cuh>
#include <raft/stats/stats_types.hpp>
// This file is a shameless amalgamation of independent works done by
// Lars Nyland and Andy Adinets
///@todo: add cub's histogram as another option
namespace cuvs {
namespace stats {
/**
* Default mapper which just returns the value of the data itself
*/
template <typename DataT, typename IdxT>
struct IdentityBinner : public detail::IdentityBinner<DataT, IdxT> {
IdentityBinner() : detail::IdentityBinner<DataT, IdxT>() {}
};
/**
* @brief Perform histogram on the input data. It chooses the right load size
* based on the input data vector length. It also supports large-bin cases
* using a specialized smem-based hashing technique.
* @tparam DataT input data type
* @tparam IdxT data type used to compute indices
* @tparam BinnerOp takes the input data and computes its bin index
* @param type histogram implementation type to choose
* @param bins the output bins (length = ncols * nbins)
* @param nbins number of bins
* @param data input data (length = ncols * nrows)
* @param nrows data array length in each column (or batch)
* @param ncols number of columns (or batch size)
* @param stream cuda stream
* @param binner the operation that computes the bin index of the input data
*
* @note signature of BinnerOp is `int func(DataT, IdxT);`
*/
template <typename DataT, typename IdxT = int, typename BinnerOp = IdentityBinner<DataT, IdxT>>
void histogram(HistType type,
int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
cudaStream_t stream,
BinnerOp binner = IdentityBinner<DataT, IdxT>())
{
detail::histogram<DataT, IdxT, BinnerOp>(type, bins, nbins, data, nrows, ncols, stream, binner);
}
/**
* @defgroup stats_histogram Histogram
* @{
*/
/**
* @brief Perform histogram on the input data. It chooses the right load size
* based on the input data vector length. It also supports large-bin cases
* using a specialized smem-based hashing technique.
* @tparam value_t input data type
* @tparam idx_t data type used to compute indices
* @tparam binner_op takes the input data and computes its bin index
* @param[in] handle the raft handle
* @param[in] type histogram implementation type to choose
* @param[in] data input data col-major (length = nrows * ncols)
* @param[out] bins the output bins col-major (length = nbins * ncols)
* @param[in] binner the operation that computes the bin index of the input data
*
* @note signature of binner_op is `int func(value_t, IdxT);`
*/
template <typename value_t, typename idx_t, typename binner_op = IdentityBinner<value_t, idx_t>>
void histogram(raft::resources const& handle,
HistType type,
raft::device_matrix_view<const value_t, idx_t, raft::col_major> data,
raft::device_matrix_view<int, idx_t, raft::col_major> bins,
binner_op binner = IdentityBinner<value_t, idx_t>())
{
RAFT_EXPECTS(std::is_integral_v<idx_t> && data.extent(0) <= std::numeric_limits<int>::max(),
"Index type not supported");
RAFT_EXPECTS(bins.extent(1) == data.extent(1), "Size mismatch");
RAFT_EXPECTS(bins.is_exhaustive(), "bins must be contiguous");
RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous");
detail::histogram<value_t, idx_t, binner_op>(type,
bins.data_handle(),
bins.extent(0),
data.data_handle(),
data.extent(0),
data.extent(1),
resource::get_cuda_stream(handle),
binner);
}
/** @} */ // end group stats_histogram
}; // end namespace stats
}; // namespace cuvs
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/weighted_mean.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __WEIGHTED_MEAN_H
#define __WEIGHTED_MEAN_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/weighted_mean.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Compute the weighted mean of the input matrix with a
* vector of weights, along rows or along columns
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param mu the output mean vector
* @param data the input matrix
* @param weights weight of size D if along_row is true, else of size N
* @param D number of columns of data
* @param N number of rows of data
* @param row_major data input matrix is row-major or not
* @param along_rows whether to reduce along rows or columns
* @param stream cuda stream to launch work on
*/
template <typename Type, typename IdxType = int>
void weightedMean(Type* mu,
const Type* data,
const Type* weights,
IdxType D,
IdxType N,
bool row_major,
bool along_rows,
cudaStream_t stream)
{
detail::weightedMean(mu, data, weights, D, N, row_major, along_rows, stream);
}
/**
* @brief Compute the row-wise weighted mean of the input matrix with a
* vector of column weights
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param mu the output mean vector
* @param data the input matrix (assumed to be row-major)
* @param weights per-column means
* @param D number of columns of data
* @param N number of rows of data
* @param stream cuda stream to launch work on
*/
template <typename Type, typename IdxType = int>
void rowWeightedMean(
Type* mu, const Type* data, const Type* weights, IdxType D, IdxType N, cudaStream_t stream)
{
weightedMean(mu, data, weights, D, N, true, true, stream);
}
/**
* @brief Compute the column-wise weighted mean of the input matrix with a
* vector of row weights
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param mu the output mean vector
* @param data the input matrix (assumed to be row-major)
* @param weights per-row means
* @param D number of columns of data
* @param N number of rows of data
* @param stream cuda stream to launch work on
*/
template <typename Type, typename IdxType = int>
void colWeightedMean(
Type* mu, const Type* data, const Type* weights, IdxType D, IdxType N, cudaStream_t stream)
{
weightedMean(mu, data, weights, D, N, true, false, stream);
}
/**
* @defgroup stats_weighted_mean Weighted Mean
* @{
*/
/**
* @brief Compute the weighted mean of the input matrix with a
* vector of weights, along rows or along columns
*
* @tparam value_t the data type
* @tparam idx_t Integer type used to for addressing
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] data the input matrix of size nrows * ncols
* @param[in] weights weight of size ncols if along_row is true, else of size nrows
* @param[out] mu the output mean vector of size nrows if along_row is true, else of size ncols
* @param[in] along_rows whether to reduce along rows or columns
*/
template <typename value_t, typename idx_t, typename layout_t>
void weighted_mean(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<const value_t, idx_t> weights,
raft::device_vector_view<value_t, idx_t> mu,
bool along_rows)
{
constexpr bool is_row_major = std::is_same_v<layout_t, raft::row_major>;
constexpr bool is_col_major = std::is_same_v<layout_t, raft::col_major>;
static_assert(is_row_major || is_col_major,
"weighted_mean: Layout must be either "
"raft::row_major or raft::col_major (or one of their aliases)");
auto mean_vec_size = along_rows ? data.extent(0) : data.extent(1);
auto weight_size = along_rows ? data.extent(1) : data.extent(0);
RAFT_EXPECTS(weights.extent(0) == weight_size,
"Size mismatch between weights and expected weight_size");
RAFT_EXPECTS(mu.extent(0) == mean_vec_size,
"Size mismatch between mu and expected mean_vec_size");
detail::weightedMean(mu.data_handle(),
data.data_handle(),
weights.data_handle(),
data.extent(1),
data.extent(0),
is_row_major,
along_rows,
resource::get_cuda_stream(handle));
}
/**
* @brief Compute the row-wise weighted mean of the input matrix with a
* vector of column weights
*
* @tparam value_t the data type
* @tparam idx_t Integer type used to for addressing
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] data the input matrix of size nrows * ncols
* @param[in] weights weight vector of size ncols
* @param[out] mu the output mean vector of size nrows
*/
template <typename value_t, typename idx_t, typename layout_t>
void row_weighted_mean(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<const value_t, idx_t> weights,
raft::device_vector_view<value_t, idx_t> mu)
{
weighted_mean(handle, data, weights, mu, true);
}
/**
* @brief Compute the column-wise weighted mean of the input matrix with a
* vector of row weights
*
* @tparam value_t the data type
* @tparam idx_t Integer type used to for addressing
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] data the input matrix of size nrows * ncols
* @param[in] weights weight vector of size nrows
* @param[out] mu the output mean vector of size ncols
*/
template <typename value_t, typename idx_t, typename layout_t>
void col_weighted_mean(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<const value_t, idx_t> weights,
raft::device_vector_view<value_t, idx_t> mu)
{
weighted_mean(handle, data, weights, mu, false);
}
/** @} */ // end group stats_weighted_mean
}; // end namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/homogeneity_score.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __HOMOGENEITY_SCORE_H
#define __HOMOGENEITY_SCORE_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/homogeneity_score.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Function to calculate the homogeneity score between two clusters
* <a href="https://en.wikipedia.org/wiki/Homogeneity_(statistics)">more info on mutual
* information</a>
* @param truthClusterArray: the array of truth classes of type T
* @param predClusterArray: the array of predicted classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
*/
template <typename T>
double homogeneity_score(const T* truthClusterArray,
const T* predClusterArray,
int size,
T lowerLabelRange,
T upperLabelRange,
cudaStream_t stream)
{
return detail::homogeneity_score(
truthClusterArray, predClusterArray, size, lowerLabelRange, upperLabelRange, stream);
}
/**
* @defgroup stats_homogeneity_score Homogeneity Score
* @{
*/
/**
* @brief Function to calculate the homogeneity score between two clusters
* <a href="https://en.wikipedia.org/wiki/Homogeneity_(statistics)">more info on mutual
* information</a>
*
* @tparam value_t data type
* @tparam idx_t index type
* @param[in] handle the raft handle
* @param[in] truth_cluster_array: the array of truth classes of type value_t
* @param[in] pred_cluster_array: the array of predicted classes of type value_t
* @param[in] lower_label_range: the lower bound of the range of labels
* @param[in] upper_label_range: the upper bound of the range of labels
* @return the homogeneity score
*/
template <typename value_t, typename idx_t>
double homogeneity_score(raft::resources const& handle,
raft::device_vector_view<const value_t, idx_t> truth_cluster_array,
raft::device_vector_view<const value_t, idx_t> pred_cluster_array,
value_t lower_label_range,
value_t upper_label_range)
{
RAFT_EXPECTS(truth_cluster_array.size() == pred_cluster_array.size(), "Size mismatch");
RAFT_EXPECTS(truth_cluster_array.is_exhaustive(), "truth_cluster_array must be contiguous");
RAFT_EXPECTS(pred_cluster_array.is_exhaustive(), "pred_cluster_array must be contiguous");
return detail::homogeneity_score(truth_cluster_array.data_handle(),
pred_cluster_array.data_handle(),
truth_cluster_array.extent(0),
lower_label_range,
upper_label_range,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_homogeneity_score
}; // end namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/information_criterion.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file information_criterion.cuh
* @brief These information criteria are used to evaluate the quality of models
* by balancing the quality of the fit and the number of parameters.
*
* See:
* - AIC: https://en.wikipedia.org/wiki/Akaike_information_criterion
* - AICc: https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
* - BIC: https://en.wikipedia.org/wiki/Bayesian_information_criterion
*/
#ifndef __INFORMATION_CRIT_H
#define __INFORMATION_CRIT_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/stats/detail/batched/information_criterion.cuh>
#include <raft/stats/stats_types.hpp>
namespace cuvs {
namespace stats {
/**
* Compute the given type of information criterion
*
* @note: it is safe to do the computation in-place (i.e give same pointer
* as input and output)
*
* @param[out] d_ic Information criterion to be returned for each
* series (device)
* @param[in] d_loglikelihood Log-likelihood for each series (device)
* @param[in] ic_type Type of criterion to compute. See IC_Type
* @param[in] n_params Number of parameters in the model
* @param[in] batch_size Number of series in the batch
* @param[in] n_samples Number of samples in each series
* @param[in] stream CUDA stream
*/
template <typename ScalarT, typename IdxT>
void information_criterion_batched(ScalarT* d_ic,
const ScalarT* d_loglikelihood,
IC_Type ic_type,
IdxT n_params,
IdxT batch_size,
IdxT n_samples,
cudaStream_t stream)
{
batched::detail::information_criterion(
d_ic, d_loglikelihood, ic_type, n_params, batch_size, n_samples, stream);
}
/**
* @defgroup stats_information_criterion Information Criterion
* @{
*/
/**
* Compute the given type of information criterion
*
* @note: it is safe to do the computation in-place (i.e give same pointer
* as input and output)
* See:
* - AIC: https://en.wikipedia.org/wiki/Akaike_information_criterion
* - AICc: https://en.wikipedia.org/wiki/Akaike_information_criterion#AICc
* - BIC: https://en.wikipedia.org/wiki/Bayesian_information_criterion
*
* @tparam value_t data type
* @tparam idx_t index type
* @param[in] handle the raft handle
* @param[in] d_loglikelihood Log-likelihood for each series (device) length: batch_size
* @param[out] d_ic Information criterion to be returned for each
* series (device) length: batch_size
* @param[in] ic_type Type of criterion to compute. See IC_Type
* @param[in] n_params Number of parameters in the model
* @param[in] n_samples Number of samples in each series
*/
template <typename value_t, typename idx_t>
void information_criterion_batched(raft::resources const& handle,
raft::device_vector_view<const value_t, idx_t> d_loglikelihood,
raft::device_vector_view<value_t, idx_t> d_ic,
IC_Type ic_type,
idx_t n_params,
idx_t n_samples)
{
RAFT_EXPECTS(d_ic.size() == d_loglikelihood.size(), "Size mismatch");
RAFT_EXPECTS(d_ic.is_exhaustive(), "d_ic must be contiguous");
RAFT_EXPECTS(d_loglikelihood.is_exhaustive(), "d_loglikelihood must be contiguous");
batched::detail::information_criterion(d_ic.data_handle(),
d_loglikelihood.data_handle(),
ic_type,
n_params,
d_ic.extent(0),
n_samples,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_information_criterion
} // namespace stats
} // namespace cuvs
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/stddev.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __STDDEV_H
#define __STDDEV_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/stats/detail/stddev.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Compute stddev of the input matrix
*
* Stddev operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param std the output stddev vector
* @param data the input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param sample whether to evaluate sample stddev or not. In other words,
* whether
* to normalize the output using N-1 or N, for true or false, respectively
* @param rowMajor whether the input data is row or col major
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int>
void stddev(Type* std,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool sample,
bool rowMajor,
cudaStream_t stream)
{
detail::stddev(std, data, mu, D, N, sample, rowMajor, stream);
}
/**
* @brief Compute variance of the input matrix
*
* Variance operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param var the output stddev vector
* @param data the input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param sample whether to evaluate sample stddev or not. In other words,
* whether
* to normalize the output using N-1 or N, for true or false, respectively
* @param rowMajor whether the input data is row or col major
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int>
void vars(Type* var,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool sample,
bool rowMajor,
cudaStream_t stream)
{
detail::vars(var, data, mu, D, N, sample, rowMajor, stream);
}
/**
* @defgroup stats_stddev Standard Deviation
* @{
*/
/**
* @brief Compute stddev of the input matrix
*
* Stddev operation is assumed to be performed on a given column.
*
* @tparam value_t the data type
* @tparam idx_t Integer type used to for addressing
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] data the input matrix
* @param[in] mu the mean vector
* @param[out] std the output stddev vector
* @param[in] sample whether to evaluate sample stddev or not. In other words,
* whether
* to normalize the output using N-1 or N, for true or false, respectively
*/
template <typename value_t, typename idx_t, typename layout_t>
void stddev(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<const value_t, idx_t> mu,
raft::device_vector_view<value_t, idx_t> std,
bool sample)
{
constexpr bool is_row_major = std::is_same_v<layout_t, raft::row_major>;
constexpr bool is_col_major = std::is_same_v<layout_t, raft::col_major>;
static_assert(is_row_major || is_col_major,
"stddev: Layout must be either "
"raft::row_major or raft::col_major (or one of their aliases)");
RAFT_EXPECTS(mu.size() == std.size(), "Size mismatch between mu and std");
RAFT_EXPECTS(mu.extent(0) == data.extent(1), "Size mismatch between data and mu");
detail::stddev(std.data_handle(),
data.data_handle(),
mu.data_handle(),
data.extent(1),
data.extent(0),
sample,
is_row_major,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_stddev
/**
* @defgroup stats_variance Variance
* @{
*/
/**
* @brief Compute variance of the input matrix
*
* Variance operation is assumed to be performed on a given column.
*
* @tparam value_t the data type
* @tparam idx_t Integer type used to for addressing
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] data the input matrix
* @param[in] mu the mean vector
* @param[out] var the output stddev vector
* @param[in] sample whether to evaluate sample stddev or not. In other words,
* whether
* to normalize the output using N-1 or N, for true or false, respectively
*/
template <typename value_t, typename idx_t, typename layout_t>
void vars(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<const value_t, idx_t> mu,
raft::device_vector_view<value_t, idx_t> var,
bool sample)
{
constexpr bool is_row_major = std::is_same_v<layout_t, raft::row_major>;
constexpr bool is_col_major = std::is_same_v<layout_t, raft::col_major>;
static_assert(is_row_major || is_col_major,
"vars: Layout must be either "
"raft::row_major or raft::col_major (or one of their aliases)");
RAFT_EXPECTS(mu.size() == var.size(), "Size mismatch between mu and std");
RAFT_EXPECTS(mu.extent(0) == data.extent(1), "Size mismatch between data and mu");
detail::vars(var.data_handle(),
data.data_handle(),
mu.data_handle(),
data.extent(1),
data.extent(0),
sample,
is_row_major,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_variance
}; // namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/specializations.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/mean_center.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MEAN_CENTER_H
#define __MEAN_CENTER_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/mean_center.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Center the input matrix wrt its mean
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads per block of the cuda kernel launched
* @param out the output mean-centered matrix
* @param data input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param rowMajor whether input is row or col major
* @param bcastAlongRows whether to broadcast vector along rows or columns
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void meanCenter(Type* out,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
detail::meanCenter<Type, IdxType, TPB>(out, data, mu, D, N, rowMajor, bcastAlongRows, stream);
}
/**
* @brief Add the input matrix wrt its mean
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads per block of the cuda kernel launched
* @param out the output mean-added matrix
* @param data input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param rowMajor whether input is row or col major
* @param bcastAlongRows whether to broadcast vector along rows or columns
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void meanAdd(Type* out,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
detail::meanAdd<Type, IdxType, TPB>(out, data, mu, D, N, rowMajor, bcastAlongRows, stream);
}
/**
* @defgroup stats_mean_center Mean Center
* @{
*/
/**
* @brief Center the input matrix wrt its mean
* @tparam value_t the data type
* @tparam idx_t index type
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] data input matrix of size nrows * ncols
* @param[in] mu the mean vector of size ncols if bcast_along_rows else nrows
* @param[out] out the output mean-centered matrix
* @param[in] bcast_along_rows whether to broadcast vector along rows or columns
*/
template <typename value_t, typename idx_t, typename layout_t>
void mean_center(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<const value_t, idx_t> mu,
raft::device_matrix_view<value_t, idx_t, layout_t> out,
bool bcast_along_rows)
{
static_assert(
std::is_same_v<layout_t, raft::row_major> || std::is_same_v<layout_t, raft::col_major>,
"Data layout not supported");
auto mean_vec_size = bcast_along_rows ? data.extent(1) : data.extent(0);
RAFT_EXPECTS(out.extents() == data.extents(), "Size mismatch");
RAFT_EXPECTS(mean_vec_size == mu.extent(0), "Size mismatch between data and mu");
RAFT_EXPECTS(out.is_exhaustive(), "out must be contiguous");
RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous");
detail::meanCenter<value_t, idx_t>(out.data_handle(),
data.data_handle(),
mu.data_handle(),
data.extent(1),
data.extent(0),
std::is_same_v<layout_t, raft::row_major>,
bcast_along_rows,
resource::get_cuda_stream(handle));
}
/**
* @brief Add the input matrix wrt its mean
* @tparam Type the data type
* @tparam idx_t index type
* @tparam layout_t Layout type of the input matrix.
* @tparam TPB threads per block of the cuda kernel launched
* @param[in] handle the raft handle
* @param[in] data input matrix of size nrows * ncols
* @param[in] mu the mean vector of size ncols if bcast_along_rows else nrows
* @param[out] out the output mean-centered matrix
* @param[in] bcast_along_rows whether to broadcast vector along rows or columns
*/
template <typename value_t, typename idx_t, typename layout_t>
void mean_add(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<const value_t, idx_t> mu,
raft::device_matrix_view<value_t, idx_t, layout_t> out,
bool bcast_along_rows)
{
static_assert(
std::is_same_v<layout_t, raft::row_major> || std::is_same_v<layout_t, raft::col_major>,
"Data layout not supported");
auto mean_vec_size = bcast_along_rows ? data.extent(1) : data.extent(0);
RAFT_EXPECTS(out.extents() == data.extents(), "Size mismatch");
RAFT_EXPECTS(mean_vec_size == mu.extent(0), "Size mismatch between data and mu");
RAFT_EXPECTS(out.is_exhaustive(), "out must be contiguous");
RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous");
detail::meanAdd<value_t, idx_t>(out.data_handle(),
data.data_handle(),
mu.data_handle(),
data.extent(1),
data.extent(0),
std::is_same_v<layout_t, raft::row_major>,
bcast_along_rows,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_mean_center
}; // end namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/entropy.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __ENTROPY_H
#define __ENTROPY_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/entropy.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Function to calculate entropy
* <a href="https://en.wikipedia.org/wiki/Entropy_(information_theory)">more info on entropy</a>
*
* @tparam T data type
* @param clusterArray: the array of classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
* @return the entropy score
*/
template <typename T>
double entropy(const T* clusterArray,
const int size,
const T lowerLabelRange,
const T upperLabelRange,
cudaStream_t stream)
{
return detail::entropy(clusterArray, size, lowerLabelRange, upperLabelRange, stream);
}
/**
* @defgroup stats_entropy Entropy
* @{
*/
/**
* @brief Function to calculate entropy
* <a href="https://en.wikipedia.org/wiki/Entropy_(information_theory)">more info on entropy</a>
*
* @tparam value_t data type
* @tparam idx_t index type
* @param[in] handle the raft handle
* @param[in] cluster_array: the array of classes of type value_t
* @param[in] lower_label_range: the lower bound of the range of labels
* @param[in] upper_label_range: the upper bound of the range of labels
* @return the entropy score
*/
template <typename value_t, typename idx_t>
double entropy(raft::resources const& handle,
raft::device_vector_view<const value_t, idx_t> cluster_array,
const value_t lower_label_range,
const value_t upper_label_range)
{
RAFT_EXPECTS(cluster_array.is_exhaustive(), "cluster_array must be contiguous");
return detail::entropy(cluster_array.data_handle(),
cluster_array.extent(0),
lower_label_range,
upper_label_range,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_entropy
}; // end namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/neighborhood_recall.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "detail/neighborhood_recall.cuh"
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <optional>
namespace raft::stats {
/**
* @defgroup stats_neighborhood_recall Neighborhood Recall Score
* @{
*/
/**
* @brief Calculate Neighborhood Recall score on the device for indices, distances computed by any
* Nearest Neighbors Algorithm against reference indices, distances. Recall score is calculated by
* comparing the total number of matching indices and dividing that value by the total size of the
* indices matrix of dimensions (D, k). If distance matrices are provided, then non-matching indices
* could be considered a match if abs(dist, ref_dist) < eps.
*
* Usage example:
* @code{.cpp}
* raft::device_resources res;
* // assume D rows and N column dataset
* auto k = 64;
* auto indices = raft::make_device_matrix<int>(res, D, k);
* auto distances = raft::make_device_matrix<float>(res, D, k);
* // run ANN algorithm of choice
*
* auto ref_indices = raft::make_device_matrix<int>(res, D, k);
* auto ref_distances = raft::make_device_matrix<float>(res, D, k);
* // run brute-force KNN for reference
*
* auto scalar = 0.0f;
* auto recall_score = raft::make_device_scalar(res, scalar);
*
* raft::stats::neighborhood_recall(res,
raft::make_const_mdspan(indices.view()),
raft::make_const_mdspan(ref_indices.view()),
recall_score.view(),
raft::make_const_mdspan(distances.view()),
raft::make_const_mdspan(ref_distances.view()));
* @endcode
*
* @tparam IndicesValueType data-type of the indices
* @tparam IndexType data-type to index all matrices
* @tparam ScalarType data-type to store recall score
* @tparam DistanceValueType data-type of the distances
* @param res raft::resources object to manage resources
* @param[in] indices raft::device_matrix_view indices of neighbors
* @param[in] ref_indices raft::device_matrix_view reference indices of neighbors
* @param[out] recall_score raft::device_scalar_view output recall score
* @param[in] distances (optional) raft::device_matrix_view distances of neighbors
* @param[in] ref_distances (optional) raft::device_matrix_view reference distances of neighbors
* @param[in] eps (optional, default = 0.001) value within which distances are considered matching
*/
template <typename IndicesValueType,
typename IndexType,
typename ScalarType,
typename DistanceValueType = float>
void neighborhood_recall(
raft::resources const& res,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> indices,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> ref_indices,
raft::device_scalar_view<ScalarType> recall_score,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
distances = std::nullopt,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
ref_distances = std::nullopt,
std::optional<raft::host_scalar_view<const DistanceValueType>> eps = std::nullopt)
{
RAFT_EXPECTS(indices.extent(0) == ref_indices.extent(0),
"The number of rows in indices and reference indices should be equal");
RAFT_EXPECTS(indices.extent(1) == ref_indices.extent(1),
"The number of columns in indices and reference indices should be equal");
if (distances.has_value() or ref_distances.has_value()) {
RAFT_EXPECTS(distances.has_value() and ref_distances.has_value(),
"Both distances and reference distances should have values");
RAFT_EXPECTS(distances.value().extent(0) == ref_distances.value().extent(0),
"The number of rows in distances and reference distances should be equal");
RAFT_EXPECTS(distances.value().extent(1) == ref_distances.value().extent(1),
"The number of columns in indices and reference indices should be equal");
RAFT_EXPECTS(indices.extent(0) == distances.value().extent(0),
"The number of rows in indices and distances should be equal");
RAFT_EXPECTS(indices.extent(1) == distances.value().extent(1),
"The number of columns in indices and distances should be equal");
}
DistanceValueType eps_val = 0.001;
if (eps.has_value()) { eps_val = *eps.value().data_handle(); }
detail::neighborhood_recall(
res, indices, ref_indices, distances, ref_distances, recall_score, eps_val);
}
/**
* @brief Calculate Neighborhood Recall score on the host for indices, distances computed by any
* Nearest Neighbors Algorithm against reference indices, distances. Recall score is calculated by
* comparing the total number of matching indices and dividing that value by the total size of the
* indices matrix of dimensions (D, k). If distance matrices are provided, then non-matching indices
* could be considered a match if abs(dist, ref_dist) < eps.
*
* Usage example:
* @code{.cpp}
* raft::device_resources res;
* // assume D rows and N column dataset
* auto k = 64;
* auto indices = raft::make_device_matrix<int>(res, D, k);
* auto distances = raft::make_device_matrix<float>(res, D, k);
* // run ANN algorithm of choice
*
* auto ref_indices = raft::make_device_matrix<int>(res, D, k);
* auto ref_distances = raft::make_device_matrix<float>(res, D, k);
* // run brute-force KNN for reference
*
* auto scalar = 0.0f;
* auto recall_score = raft::make_host_scalar(scalar);
*
* raft::stats::neighborhood_recall(res,
raft::make_const_mdspan(indices.view()),
raft::make_const_mdspan(ref_indices.view()),
recall_score.view(),
raft::make_const_mdspan(distances.view()),
raft::make_const_mdspan(ref_distances.view()));
* @endcode
*
* @tparam IndicesValueType data-type of the indices
* @tparam IndexType data-type to index all matrices
* @tparam ScalarType data-type to store recall score
* @tparam DistanceValueType data-type of the distances
* @param res raft::resources object to manage resources
* @param[in] indices raft::device_matrix_view indices of neighbors
* @param[in] ref_indices raft::device_matrix_view reference indices of neighbors
* @param[out] recall_score raft::host_scalar_view output recall score
* @param[in] distances (optional) raft::device_matrix_view distances of neighbors
* @param[in] ref_distances (optional) raft::device_matrix_view reference distances of neighbors
* @param[in] eps (optional, default = 0.001) value within which distances are considered matching
*/
template <typename IndicesValueType,
typename IndexType,
typename ScalarType,
typename DistanceValueType = float>
void neighborhood_recall(
raft::resources const& res,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> indices,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> ref_indices,
raft::host_scalar_view<ScalarType> recall_score,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
distances = std::nullopt,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
ref_distances = std::nullopt,
std::optional<raft::host_scalar_view<const DistanceValueType>> eps = std::nullopt)
{
auto recall_score_d = raft::make_device_scalar(res, *recall_score.data_handle());
neighborhood_recall(
res, indices, ref_indices, recall_score_d.view(), distances, ref_distances, eps);
raft::update_host(recall_score.data_handle(),
recall_score_d.data_handle(),
1,
raft::resource::get_cuda_stream(res));
raft::resource::sync_stream(res);
}
/** @} */ // end group stats_recall
} // end namespace raft::stats
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/regression_metrics.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __REGRESSION_METRICS_H
#define __REGRESSION_METRICS_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <raft/stats/detail/scores.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Compute regression metrics mean absolute error, mean squared error, median absolute error
* @tparam T: data type for predictions (e.g., float or double for regression).
* @param[in] predictions: array of predictions (GPU pointer).
* @param[in] ref_predictions: array of reference (ground-truth) predictions (GPU pointer).
* @param[in] n: number of elements in each of predictions, ref_predictions. Should be > 0.
* @param[in] stream: cuda stream.
* @param[out] mean_abs_error: Mean Absolute Error. Sum over n of (|predictions[i] -
* ref_predictions[i]|) / n.
* @param[out] mean_squared_error: Mean Squared Error. Sum over n of ((predictions[i] -
* ref_predictions[i])^2) / n.
* @param[out] median_abs_error: Median Absolute Error. Median of |predictions[i] -
* ref_predictions[i]| for i in [0, n).
*/
template <typename T>
void regression_metrics(const T* predictions,
const T* ref_predictions,
int n,
cudaStream_t stream,
double& mean_abs_error,
double& mean_squared_error,
double& median_abs_error)
{
detail::regression_metrics(
predictions, ref_predictions, n, stream, mean_abs_error, mean_squared_error, median_abs_error);
}
/**
* @defgroup stats_regression_metrics Regression Metrics
* @{
*/
/**
* @brief Compute regression metrics mean absolute error, mean squared error, median absolute error
* @tparam value_t the data type for predictions (e.g., float or double for regression).
* @tparam idx_t index type
* @param[in] handle the raft handle
* @param[in] predictions: array of predictions.
* @param[in] ref_predictions: array of reference (ground-truth) predictions.
* @param[out] mean_abs_error: Mean Absolute Error. Sum over n of (|predictions[i] -
* ref_predictions[i]|) / n.
* @param[out] mean_squared_error: Mean Squared Error. Sum over n of ((predictions[i] -
* ref_predictions[i])^2) / n.
* @param[out] median_abs_error: Median Absolute Error. Median of |predictions[i] -
* ref_predictions[i]| for i in [0, n).
*/
template <typename value_t, typename idx_t>
void regression_metrics(raft::resources const& handle,
raft::device_vector_view<const value_t, idx_t> predictions,
raft::device_vector_view<const value_t, idx_t> ref_predictions,
raft::host_scalar_view<double> mean_abs_error,
raft::host_scalar_view<double> mean_squared_error,
raft::host_scalar_view<double> median_abs_error)
{
RAFT_EXPECTS(predictions.extent(0) == ref_predictions.extent(0),
"Size mismatch between predictions and ref_predictions");
RAFT_EXPECTS(predictions.is_exhaustive(), "predictions must be contiguous");
RAFT_EXPECTS(ref_predictions.is_exhaustive(), "ref_predictions must be contiguous");
RAFT_EXPECTS(mean_abs_error.data_handle() != nullptr, "mean_abs_error view must not be empty");
RAFT_EXPECTS(mean_squared_error.data_handle() != nullptr,
"mean_squared_error view must not be empty");
RAFT_EXPECTS(median_abs_error.data_handle() != nullptr,
"median_abs_error view must not be empty");
detail::regression_metrics(predictions.data_handle(),
ref_predictions.data_handle(),
predictions.extent(0),
resource::get_cuda_stream(handle),
*mean_abs_error.data_handle(),
*mean_squared_error.data_handle(),
*median_abs_error.data_handle());
}
/** @} */ // end group stats_regression_metrics
} // namespace stats
} // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/silhouette_score.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SILHOUETTE_SCORE_H
#define __SILHOUETTE_SCORE_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/batched/silhouette_score.cuh>
#include <raft/stats/detail/silhouette_score.cuh>
namespace cuvs {
namespace stats {
/**
* @brief main function that returns the average silhouette score for a given set of data and its
* clusterings
* @tparam DataT: type of the data samples
* @tparam LabelT: type of the labels
* @param handle: raft handle for managing expensive resources
* @param X_in: pointer to the input Data samples array (nRows x nCols)
* @param nRows: number of data samples
* @param nCols: number of features
* @param labels: the pointer to the array containing labels for every data sample (1 x nRows)
* @param nLabels: number of Labels
* @param silhouette_scorePerSample: pointer to the array that is optionally taken in as input and
* is populated with the silhouette score for every sample (1 x nRows)
* @param stream: the cuda stream where to launch this kernel
* @param metric: the numerical value that maps to the type of distance metric to be used in the
* calculations
*/
template <typename DataT, typename LabelT>
DataT silhouette_score(
raft::resources const& handle,
DataT* X_in,
int nRows,
int nCols,
LabelT* labels,
int nLabels,
DataT* silhouette_scorePerSample,
cudaStream_t stream,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Unexpanded)
{
return detail::silhouette_score(
handle, X_in, nRows, nCols, labels, nLabels, silhouette_scorePerSample, stream, metric);
}
template <typename value_t, typename value_idx, typename label_idx>
value_t silhouette_score_batched(
raft::resources const& handle,
value_t* X,
value_idx n_rows,
value_idx n_cols,
label_idx* y,
label_idx n_labels,
value_t* scores,
value_idx chunk,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Unexpanded)
{
return batched::detail::silhouette_score(
handle, X, n_rows, n_cols, y, n_labels, scores, chunk, metric);
}
/**
* @defgroup stats_silhouette_score Silhouette Score
* @{
*/
/**
* @brief main function that returns the average silhouette score for a given set of data and its
* clusterings
* @tparam value_t: type of the data samples
* @tparam label_t: type of the labels
* @tparam idx_t index type
* @param[in] handle: raft handle for managing expensive resources
* @param[in] X_in: input matrix Data in row-major format (nRows x nCols)
* @param[in] labels: the pointer to the array containing labels for every data sample (length:
* nRows)
* @param[out] silhouette_score_per_sample: optional array populated with the silhouette score
* for every sample (length: nRows)
* @param[in] n_unique_labels: number of unique labels in the labels array
* @param[in] metric: the numerical value that maps to the type of distance metric to be used in
* the calculations
* @return: The silhouette score.
*/
template <typename value_t, typename label_t, typename idx_t>
value_t silhouette_score(
raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::row_major> X_in,
raft::device_vector_view<const label_t, idx_t> labels,
std::optional<raft::device_vector_view<value_t, idx_t>> silhouette_score_per_sample,
idx_t n_unique_labels,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Unexpanded)
{
RAFT_EXPECTS(labels.extent(0) == X_in.extent(0), "Size mismatch between labels and data");
value_t* silhouette_score_per_sample_ptr = nullptr;
if (silhouette_score_per_sample.has_value()) {
silhouette_score_per_sample_ptr = silhouette_score_per_sample.value().data_handle();
RAFT_EXPECTS(silhouette_score_per_sample.value().extent(0) == X_in.extent(0),
"Size mismatch between silhouette_score_per_sample and data");
}
return detail::silhouette_score(handle,
X_in.data_handle(),
X_in.extent(0),
X_in.extent(1),
labels.data_handle(),
n_unique_labels,
silhouette_score_per_sample_ptr,
resource::get_cuda_stream(handle),
metric);
}
/**
* @brief function that returns the average silhouette score for a given set of data and its
* clusterings
* @tparam value_t: type of the data samples
* @tparam label_t: type of the labels
* @tparam idx_t index type
* @param[in] handle: raft handle for managing expensive resources
* @param[in] X: input matrix Data in row-major format (nRows x nCols)
* @param[in] labels: the pointer to the array containing labels for every data sample (length:
* nRows)
* @param[out] silhouette_score_per_sample: optional array populated with the silhouette score
* for every sample (length: nRows)
* @param[in] n_unique_labels: number of unique labels in the labels array
* @param[in] batch_size: number of samples per batch
* @param[in] metric: the numerical value that maps to the type of distance metric to be used in
* the calculations
* @return: The silhouette score.
*/
template <typename value_t, typename label_t, typename idx_t>
value_t silhouette_score_batched(
raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::row_major> X,
raft::device_vector_view<const label_t, idx_t> labels,
std::optional<raft::device_vector_view<value_t, idx_t>> silhouette_score_per_sample,
idx_t n_unique_labels,
idx_t batch_size,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Unexpanded)
{
static_assert(std::is_integral_v<idx_t>,
"silhouette_score_batched: The index type "
"of each mdspan argument must be an integral type.");
static_assert(std::is_integral_v<label_t>,
"silhouette_score_batched: The label type must be an integral type.");
RAFT_EXPECTS(labels.extent(0) == X.extent(0), "Size mismatch between labels and data");
value_t* scores_ptr = nullptr;
if (silhouette_score_per_sample.has_value()) {
scores_ptr = silhouette_score_per_sample.value().data_handle();
RAFT_EXPECTS(silhouette_score_per_sample.value().extent(0) == X.extent(0),
"Size mismatch between silhouette_score_per_sample and data");
}
return batched::detail::silhouette_score(handle,
X.data_handle(),
X.extent(0),
X.extent(1),
labels.data_handle(),
n_unique_labels,
scores_ptr,
batch_size,
metric);
}
/** @} */ // end group stats_silhouette_score
/**
* @brief Overload of `silhouette_score` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for the optional arguments.
*
* Please see above for documentation of `silhouette_score`.
*/
template <typename value_t, typename label_t, typename idx_t>
value_t silhouette_score(
raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::row_major> X_in,
raft::device_vector_view<const label_t, idx_t> labels,
std::nullopt_t silhouette_score_per_sample,
idx_t n_unique_labels,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Unexpanded)
{
std::optional<raft::device_vector_view<value_t, idx_t>> opt_scores = silhouette_score_per_sample;
return silhouette_score(handle, X_in, labels, opt_scores, n_unique_labels, metric);
}
/**
* @brief Overload of `silhouette_score_batched` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for the optional arguments.
*
* Please see above for documentation of `silhouette_score_batched`.
*/
template <typename value_t, typename label_t, typename idx_t>
value_t silhouette_score_batched(
raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::row_major> X,
raft::device_vector_view<const label_t, idx_t> labels,
std::nullopt_t silhouette_score_per_sample,
idx_t n_unique_labels,
idx_t batch_size,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Unexpanded)
{
std::optional<raft::device_vector_view<value_t, idx_t>> opt_scores = silhouette_score_per_sample;
return silhouette_score_batched(
handle, X, labels, opt_scores, n_unique_labels, batch_size, metric);
}
}; // namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/dispersion.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __DISPERSION_H
#define __DISPERSION_H
#pragma once
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/dispersion.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Compute cluster dispersion metric. This is very useful for
* automatically finding the 'k' (in kmeans) that improves this metric.
* @tparam DataT data type
* @tparam IdxT index type
* @tparam TPB threads block for kernels launched
* @param centroids the cluster centroids. This is assumed to be row-major
* and of dimension (nClusters x dim)
* @param clusterSizes number of points in the dataset which belong to each
* cluster. This is of length nClusters
* @param globalCentroid compute the global weighted centroid of all cluster
* centroids. This is of length dim. Pass a nullptr if this is not needed
* @param nClusters number of clusters
* @param nPoints number of points in the dataset
* @param dim dataset dimensionality
* @param stream cuda stream
* @return the cluster dispersion value
*/
template <typename DataT, typename IdxT = int, int TPB = 256>
DataT dispersion(const DataT* centroids,
const IdxT* clusterSizes,
DataT* globalCentroid,
IdxT nClusters,
IdxT nPoints,
IdxT dim,
cudaStream_t stream)
{
return detail::dispersion<DataT, IdxT, TPB>(
centroids, clusterSizes, globalCentroid, nClusters, nPoints, dim, stream);
}
/**
* @defgroup stats_cluster_dispersion Cluster Dispersion Metric
* @{
*/
/**
* @brief Compute cluster dispersion metric. This is very useful for
* automatically finding the 'k' (in kmeans) that improves this metric.
* The cluster dispersion metric is defined as the square root of the sum of the
* squared distances between the cluster centroids and the global centroid
* @tparam value_t data type
* @tparam idx_t index type
* @param[in] handle the raft handle
* @param[in] centroids the cluster centroids. This is assumed to be row-major
* and of dimension (n_clusters x dim)
* @param[in] cluster_sizes number of points in the dataset which belong to each
* cluster. This is of length n_clusters
* @param[out] global_centroid compute the global weighted centroid of all cluster
* centroids. This is of length dim. Use std::nullopt to not return it.
* @param[in] n_points number of points in the dataset
* @return the cluster dispersion value
*/
template <typename value_t, typename idx_t>
value_t cluster_dispersion(
raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::row_major> centroids,
raft::device_vector_view<const idx_t, idx_t> cluster_sizes,
std::optional<raft::device_vector_view<value_t, idx_t>> global_centroid,
const idx_t n_points)
{
RAFT_EXPECTS(cluster_sizes.extent(0) == centroids.extent(0), "Size mismatch");
RAFT_EXPECTS(cluster_sizes.is_exhaustive(), "cluster_sizes must be contiguous");
value_t* global_centroid_ptr = nullptr;
if (global_centroid.has_value()) {
RAFT_EXPECTS(global_centroid.value().extent(0) == centroids.extent(1),
"Size mismatch between global_centroid and centroids");
RAFT_EXPECTS(global_centroid.value().is_exhaustive(), "global_centroid must be contiguous");
global_centroid_ptr = global_centroid.value().data_handle();
}
return detail::dispersion<value_t, idx_t>(centroids.data_handle(),
cluster_sizes.data_handle(),
global_centroid_ptr,
centroids.extent(0),
n_points,
centroids.extent(1),
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_cluster_dispersion
/**
* @brief Overload of `cluster_dispersion` to help the
* compiler find the above overload, in case users pass in
* `std::nullopt` for the optional arguments.
*
* Please see above for documentation of `cluster_dispersion`.
*/
template <typename value_t, typename idx_t>
value_t cluster_dispersion(
raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::row_major> centroids,
raft::device_vector_view<const idx_t, idx_t> cluster_sizes,
std::nullopt_t global_centroid,
const idx_t n_points)
{
std::optional<raft::device_vector_view<value_t, idx_t>> opt_centroid = global_centroid;
return cluster_dispersion(handle, centroids, cluster_sizes, opt_centroid, n_points);
}
} // end namespace stats
} // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/sum.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SUM_H
#define __SUM_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/sum.cuh>
#include <raft/util/cudart_utils.hpp>
namespace cuvs {
namespace stats {
/**
* @brief Compute sum of the input matrix
*
* Sum operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param output the output mean vector
* @param input the input matrix
* @param D number of columns of data
* @param N number of rows of data
* @param rowMajor whether the input data is row or col major
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int>
void sum(Type* output, const Type* input, IdxType D, IdxType N, bool rowMajor, cudaStream_t stream)
{
detail::sum(output, input, D, N, rowMajor, stream);
}
/**
* @defgroup stats_sum Sum
* @{
*/
/**
* @brief Compute sum of the input matrix
*
* Sum operation is assumed to be performed on a given column.
*
* @tparam value_t the data type
* @tparam idx_t Integer type used to for addressing
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] input the input matrix
* @param[out] output the output mean vector
*/
template <typename value_t, typename idx_t, typename layout_t>
void sum(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> input,
raft::device_vector_view<value_t, idx_t> output)
{
constexpr bool is_row_major = std::is_same_v<layout_t, raft::row_major>;
constexpr bool is_col_major = std::is_same_v<layout_t, raft::col_major>;
static_assert(is_row_major || is_col_major,
"sum: Layout must be either "
"raft::row_major or raft::col_major (or one of their aliases)");
RAFT_EXPECTS(input.extent(1) == output.extent(0), "Size mismatch between input and output");
detail::sum(output.data_handle(),
input.data_handle(),
input.extent(1),
input.extent(0),
is_row_major,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_sum
}; // end namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/mutual_info_score.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MUTUAL_INFO_SCORE_H
#define __MUTUAL_INFO_SCORE_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/mutual_info_score.cuh>
namespace cuvs {
namespace stats {
/**
* @brief Function to calculate the mutual information between two clusters
* <a href="https://en.wikipedia.org/wiki/Mutual_information">more info on mutual information</a>
* @param firstClusterArray: the array of classes of type T
* @param secondClusterArray: the array of classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
*/
template <typename T>
double mutual_info_score(const T* firstClusterArray,
const T* secondClusterArray,
int size,
T lowerLabelRange,
T upperLabelRange,
cudaStream_t stream)
{
return detail::mutual_info_score(
firstClusterArray, secondClusterArray, size, lowerLabelRange, upperLabelRange, stream);
}
/**
* @defgroup stats_mutual_info Mutual Information
* @{
*/
/**
* @brief Function to calculate the mutual information between two clusters
* <a href="https://en.wikipedia.org/wiki/Mutual_information">more info on mutual information</a>
* @tparam value_t the data type
* @tparam idx_t index type
* @param[in] handle the raft handle
* @param[in] first_cluster_array: the array of classes of type value_t
* @param[in] second_cluster_array: the array of classes of type value_t
* @param[in] lower_label_range: the lower bound of the range of labels
* @param[in] upper_label_range: the upper bound of the range of labels
* @return the mutual information score
*/
template <typename value_t, typename idx_t>
double mutual_info_score(raft::resources const& handle,
raft::device_vector_view<const value_t, idx_t> first_cluster_array,
raft::device_vector_view<const value_t, idx_t> second_cluster_array,
value_t lower_label_range,
value_t upper_label_range)
{
RAFT_EXPECTS(first_cluster_array.extent(0) == second_cluster_array.extent(0),
"Size mismatch between first_cluster_array and second_cluster_array");
RAFT_EXPECTS(first_cluster_array.is_exhaustive(), "first_cluster_array must be contiguous");
RAFT_EXPECTS(second_cluster_array.is_exhaustive(), "second_cluster_array must be contiguous");
return detail::mutual_info_score(first_cluster_array.data_handle(),
second_cluster_array.data_handle(),
first_cluster_array.extent(0),
lower_label_range,
upper_label_range,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_mutual_info
}; // end namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/minmax.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MINMAX_H
#define __MINMAX_H
#pragma once
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/minmax.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <limits>
namespace cuvs {
namespace stats {
/**
* @brief Computes min/max across every column of the input matrix, as well as
* optionally allow to subsample based on the given row/col ID mapping vectors
*
* @tparam T the data type
* @tparam TPB number of threads per block
* @param data input data
* @param rowids actual row ID mappings. It is of length nrows. If you want to
* skip this index lookup entirely, pass nullptr
* @param colids actual col ID mappings. It is of length ncols. If you want to
* skip this index lookup entirely, pass nullptr
* @param nrows number of rows of data to be worked upon. The actual rows of the
* input "data" can be bigger than this!
* @param ncols number of cols of data to be worked upon. The actual cols of the
* input "data" can be bigger than this!
* @param row_stride stride (in number of elements) between 2 adjacent columns
* @param globalmin final col-wise global minimum (size = ncols)
* @param globalmax final col-wise global maximum (size = ncols)
* @param sampledcols output sampled data. Pass nullptr if you don't need this
* @param stream cuda stream
* @note This method makes the following assumptions:
* 1. input and output matrices are assumed to be col-major
* 2. ncols is small enough to fit the whole of min/max values across all cols
* in shared memory
*/
template <typename T, int TPB = 512>
void minmax(const T* data,
const unsigned* rowids,
const unsigned* colids,
int nrows,
int ncols,
int row_stride,
T* globalmin,
T* globalmax,
T* sampledcols,
cudaStream_t stream)
{
detail::minmax<T, TPB>(
data, rowids, colids, nrows, ncols, row_stride, globalmin, globalmax, sampledcols, stream);
}
/**
* @defgroup stats_minmax Min/Max
* @{
*/
/**
* @brief Computes min/max across every column of the input matrix, as well as
* optionally allow to subsample based on the given row/col ID mapping vectors
*
* @tparam value_t Data type of input matrix element.
* @tparam idx_t Index type of matrix extent.
* @param[in] handle the raft handle
* @param[in] data input data col-major of size [nrows, ncols], unless rowids or
* colids length is smaller
* @param[in] rowids optional row ID mappings of length nrows. If you want to
* skip this index lookup entirely, pass std::nullopt
* @param[in] colids optional col ID mappings of length ncols. If you want to
* skip this index lookup entirely, pass std::nullopt
* @param[out] globalmin final col-wise global minimum (size = ncols)
* @param[out] globalmax final col-wise global maximum (size = ncols)
* @param[out] sampledcols output sampled data. Pass std::nullopt if you don't need this
* @note This method makes the following assumptions:
* 1. input and output matrices are assumed to be col-major
* 2. ncols is small enough to fit the whole of min/max values across all cols
* in shared memory
*/
template <typename value_t, typename idx_t>
void minmax(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, raft::col_major> data,
std::optional<raft::device_vector_view<const unsigned, idx_t>> rowids,
std::optional<raft::device_vector_view<const unsigned, idx_t>> colids,
raft::device_vector_view<value_t, idx_t> globalmin,
raft::device_vector_view<value_t, idx_t> globalmax,
std::optional<raft::device_vector_view<value_t, idx_t>> sampledcols)
{
const unsigned* rowids_ptr = nullptr;
const unsigned* colids_ptr = nullptr;
value_t* sampledcols_ptr = nullptr;
auto nrows = data.extent(0);
auto ncols = data.extent(1);
auto row_stride = data.stride(1);
if (rowids.has_value()) {
rowids_ptr = rowids.value().data_handle();
RAFT_EXPECTS(rowids.value().extent(0) <= nrows, "Rowids size is greater than nrows");
nrows = rowids.value().extent(0);
}
if (colids.has_value()) {
colids_ptr = colids.value().data_handle();
RAFT_EXPECTS(colids.value().extent(0) <= ncols, "Colids size is greater than ncols");
ncols = colids.value().extent(0);
}
if (sampledcols.has_value()) { sampledcols_ptr = sampledcols.value().data_handle(); }
RAFT_EXPECTS(globalmin.extent(0) == ncols, "Size mismatch between globalmin and ncols");
RAFT_EXPECTS(globalmax.extent(0) == ncols, "Size mismatch between globalmax and ncols");
detail::minmax<value_t>(data.data_handle(),
rowids_ptr,
colids_ptr,
nrows,
ncols,
row_stride,
globalmin.data_handle(),
globalmax.data_handle(),
sampledcols_ptr,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_minmax
}; // namespace stats
}; // namespace cuvs
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/meanvar.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MEANVAR_H
#define __MEANVAR_H
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/detail/meanvar.cuh>
namespace raft::stats {
/**
* @brief Compute mean and variance for each column of a given matrix.
*
* The operation is performed in a single sweep. Consider using it when you need to compute
* both mean and variance, or when you need to compute variance but don't have the mean.
* It's almost twice faster than running `mean` and `vars` sequentially, because all three
* kernels are memory-bound.
*
* @tparam Type the data type
* @tparam IdxType Integer type used for addressing
* @param [out] mean the output mean vector of size D
* @param [out] var the output variance vector of size D
* @param [in] data the input matrix of size [N, D]
* @param [in] D number of columns of data
* @param [in] N number of rows of data
* @param [in] sample whether to evaluate sample variance or not. In other words, whether to
* normalize the variance using N-1 or N, for true or false respectively.
* @param [in] rowMajor whether the input data is row- or col-major, for true or false respectively.
* @param [in] stream
*/
template <typename Type, typename IdxType = int>
void meanvar(Type* mean,
Type* var,
const Type* data,
IdxType D,
IdxType N,
bool sample,
bool rowMajor,
cudaStream_t stream)
{
detail::meanvar(mean, var, data, D, N, sample, rowMajor, stream);
}
/**
* @defgroup stats_mean_var Mean and Variance
* @{
*/
/**
* @brief Compute mean and variance for each column of a given matrix.
*
* The operation is performed in a single sweep. Consider using it when you need to compute
* both mean and variance, or when you need to compute variance but don't have the mean.
* It's almost twice faster than running `mean` and `vars` sequentially, because all three
* kernels are memory-bound.
*
* @tparam value_t the data type
* @tparam idx_t Integer type used for addressing
* @tparam layout_t Layout type of the input matrix.
* @param[in] handle the raft handle
* @param[in] data the input matrix of size [N, D]
* @param[out] mean the output mean vector of size D
* @param[out] var the output variance vector of size D
* @param[in] sample whether to evaluate sample variance or not. In other words, whether to
* normalize the variance using N-1 or N, for true or false respectively.
*/
template <typename value_t, typename idx_t, typename layout_t>
void meanvar(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, layout_t> data,
raft::device_vector_view<value_t, idx_t> mean,
raft::device_vector_view<value_t, idx_t> var,
bool sample)
{
static_assert(
std::is_same_v<layout_t, raft::row_major> || std::is_same_v<layout_t, raft::col_major>,
"Data layout not supported");
RAFT_EXPECTS(data.extent(1) == var.extent(0), "Size mismatch between data and var");
RAFT_EXPECTS(mean.size() == var.size(), "Size mismatch between mean and var");
RAFT_EXPECTS(mean.is_exhaustive(), "mean must be contiguous");
RAFT_EXPECTS(var.is_exhaustive(), "var must be contiguous");
RAFT_EXPECTS(data.is_exhaustive(), "data must be contiguous");
detail::meanvar(mean.data_handle(),
var.data_handle(),
data.data_handle(),
data.extent(1),
data.extent(0),
sample,
std::is_same_v<layout_t, raft::row_major>,
resource::get_cuda_stream(handle));
}
/** @} */ // end group stats_mean_var
}; // namespace raft::stats
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/stats_types.hpp | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cudart_utils.hpp>
namespace raft::stats {
/**
* @ingroup stats_histogram
* @{
*/
/**
* @brief Types of support histogram implementations
*/
enum HistType {
/** shared mem atomics but with bins to be 1b int's */
HistTypeSmemBits1 = 1,
/** shared mem atomics but with bins to be 2b int's */
HistTypeSmemBits2 = 2,
/** shared mem atomics but with bins to be 4b int's */
HistTypeSmemBits4 = 4,
/** shared mem atomics but with bins to ba 1B int's */
HistTypeSmemBits8 = 8,
/** shared mem atomics but with bins to be 2B int's */
HistTypeSmemBits16 = 16,
/** use only global atomics */
HistTypeGmem,
/** uses shared mem atomics to reduce global traffic */
HistTypeSmem,
/**
* uses shared mem atomics with match_any intrinsic to further reduce shared
* memory traffic. This can only be enabled on Volta and later architectures.
* If one tries to enable this for older arch's, it will fall back to
* `HistTypeSmem`.
* @note This is to be used only when the input dataset leads to a lot of
* repetitions in a given warp, else, this algo can be much slower than
* `HistTypeSmem`!
*/
HistTypeSmemMatchAny,
/** builds a hashmap of active bins in shared mem */
HistTypeSmemHash,
/** decide at runtime the best algo for the given inputs */
HistTypeAuto
};
/** @} */
/**
* @ingroup stats_information_criterion
* @{
*/
/**
* @brief Supported types of information criteria
*/
enum IC_Type { AIC, AICc, BIC };
/** @} */
}; // end namespace raft::stats
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/mean.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/eltwise.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cub/cub.cuh>
namespace cuvs {
namespace stats {
namespace detail {
///@todo: ColsPerBlk has been tested only for 32!
template <typename Type, typename IdxType, int TPB, int ColsPerBlk = 32>
RAFT_KERNEL meanKernelRowMajor(Type* mu, const Type* data, IdxType D, IdxType N)
{
const int RowsPerBlkPerIter = TPB / ColsPerBlk;
IdxType thisColId = threadIdx.x % ColsPerBlk;
IdxType thisRowId = threadIdx.x / ColsPerBlk;
IdxType colId = thisColId + ((IdxType)blockIdx.y * ColsPerBlk);
IdxType rowId = thisRowId + ((IdxType)blockIdx.x * RowsPerBlkPerIter);
Type thread_data = Type(0);
const IdxType stride = RowsPerBlkPerIter * gridDim.x;
for (IdxType i = rowId; i < N; i += stride)
thread_data += (colId < D) ? data[i * D + colId] : Type(0);
__shared__ Type smu[ColsPerBlk];
if (threadIdx.x < ColsPerBlk) smu[threadIdx.x] = Type(0);
__syncthreads();
raft::myAtomicAdd(smu + thisColId, thread_data);
__syncthreads();
if (threadIdx.x < ColsPerBlk) raft::myAtomicAdd(mu + colId, smu[thisColId]);
}
template <typename Type, typename IdxType, int TPB>
RAFT_KERNEL meanKernelColMajor(Type* mu, const Type* data, IdxType D, IdxType N)
{
typedef cub::BlockReduce<Type, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
Type thread_data = Type(0);
IdxType colStart = N * blockIdx.x;
for (IdxType i = threadIdx.x; i < N; i += TPB) {
IdxType idx = colStart + i;
thread_data += data[idx];
}
Type acc = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) { mu[blockIdx.x] = acc / N; }
}
template <typename Type, typename IdxType = int>
void mean(
Type* mu, const Type* data, IdxType D, IdxType N, bool sample, bool rowMajor, cudaStream_t stream)
{
static const int TPB = 256;
if (rowMajor) {
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(N, (IdxType)RowsPerBlk), raft::ceildiv(D, (IdxType)ColsPerBlk));
RAFT_CUDA_TRY(cudaMemsetAsync(mu, 0, sizeof(Type) * D, stream));
meanKernelRowMajor<Type, IdxType, TPB, ColsPerBlk><<<grid, TPB, 0, stream>>>(mu, data, D, N);
RAFT_CUDA_TRY(cudaPeekAtLastError());
Type ratio = Type(1) / (sample ? Type(N - 1) : Type(N));
raft::linalg::scalarMultiply(mu, mu, ratio, D, stream);
} else {
meanKernelColMajor<Type, IdxType, TPB><<<D, TPB, 0, stream>>>(mu, data, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // namespace detail
} // namespace stats
} // namespace cuvs | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/cov.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/linalg/gemm.cuh>
#include <raft/stats/mean_center.cuh>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief Compute covariance of the input matrix
*
* Mean operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @param covar the output covariance matrix
* @param data the input matrix (this will get mean-centered at the end!)
* @param mu mean vector of the input matrix
* @param D number of columns of data
* @param N number of rows of data
* @param sample whether to evaluate sample covariance or not. In other words,
* whether to normalize the output using N-1 or N, for true or false,
* respectively
* @param rowMajor whether the input data is row or col major
* @param stable whether to run the slower-but-numerically-stable version or not
* @param handle cublas handle
* @param stream cuda stream
* @note if stable=true, then the input data will be mean centered after this
* function returns!
*/
template <typename Type>
void cov(raft::resources const& handle,
Type* covar,
Type* data,
const Type* mu,
std::size_t D,
std::size_t N,
bool sample,
bool rowMajor,
bool stable,
cudaStream_t stream)
{
if (stable) {
cublasHandle_t cublas_h = resource::get_cublas_handle(handle);
// since mean operation is assumed to be along a given column, broadcast
// must be along rows!
raft::stats::meanCenter(data, data, mu, D, N, rowMajor, true, stream);
Type alpha = Type(1) / (sample ? Type(N - 1) : Type(N));
Type beta = Type(0);
if (rowMajor) {
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgemm(cublas_h,
CUBLAS_OP_N,
CUBLAS_OP_T,
D,
D,
N,
&alpha,
data,
D,
data,
D,
&beta,
covar,
D,
stream));
} else {
raft::linalg::gemm(
handle, data, N, D, data, covar, D, D, CUBLAS_OP_T, CUBLAS_OP_N, alpha, beta, stream);
}
} else {
///@todo: implement this using cutlass + customized epilogue!
ASSERT(false, "cov: Implement stable=false case!");
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/contingencyMatrix.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include <cub/cub.cuh>
#include <math.h>
namespace cuvs {
namespace stats {
namespace detail {
typedef enum {
IMPL_NONE,
SMEM_ATOMICS,
GLOBAL_ATOMICS,
SORT_AND_GATOMICS
} ContingencyMatrixImplType;
template <typename T, typename OutT = int>
RAFT_KERNEL devConstructContingencyMatrix(const T* groundTruth,
const T* predicted,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outMatWidth)
{
int elementId = threadIdx.x + blockDim.x * blockIdx.x;
if (elementId < nSamples) {
T gt = groundTruth[elementId];
T pd = predicted[elementId];
auto outputIdx = (gt - outIdxOffset) * outMatWidth + pd - outIdxOffset;
raft::myAtomicAdd(outMat + outputIdx, OutT(1));
}
}
template <typename T, typename OutT = int>
void computeCMatWAtomics(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outDimN,
cudaStream_t stream)
{
RAFT_CUDA_TRY(
cudaFuncSetCacheConfig(devConstructContingencyMatrix<T, OutT>, cudaFuncCachePreferL1));
static const int block = 128;
auto grid = raft::ceildiv(nSamples, block);
devConstructContingencyMatrix<T, OutT><<<grid, block, 0, stream>>>(
groundTruth, predictedLabel, nSamples, outMat, outIdxOffset, outDimN);
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename T, typename OutT = int>
RAFT_KERNEL devConstructContingencyMatrixSmem(const T* groundTruth,
const T* predicted,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outMatWidth)
{
extern __shared__ char smem[];
auto* sMemMatrix = reinterpret_cast<OutT*>(smem);
for (int smemIdx = threadIdx.x; smemIdx < outMatWidth * outMatWidth; smemIdx += blockDim.x) {
sMemMatrix[smemIdx] = 0;
}
__syncthreads();
int elementId = threadIdx.x + blockDim.x * blockIdx.x;
if (elementId < nSamples) {
T gt = groundTruth[elementId];
T pd = predicted[elementId];
auto outputIdx = (gt - outIdxOffset) * outMatWidth + pd - outIdxOffset;
raft::myAtomicAdd(sMemMatrix + outputIdx, OutT(1));
}
__syncthreads();
for (int smemIdx = threadIdx.x; smemIdx < outMatWidth * outMatWidth; smemIdx += blockDim.x) {
raft::myAtomicAdd(outMat + smemIdx, sMemMatrix[smemIdx]);
}
}
template <typename T, typename OutT = int>
void computeCMatWSmemAtomics(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
int outIdxOffset,
int outDimN,
cudaStream_t stream)
{
static const int block = 128;
auto grid = raft::ceildiv(nSamples, block);
size_t smemSizePerBlock = outDimN * outDimN * sizeof(OutT);
devConstructContingencyMatrixSmem<T, OutT><<<grid, block, smemSizePerBlock, stream>>>(
groundTruth, predictedLabel, nSamples, outMat, outIdxOffset, outDimN);
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename T, typename OutT = int>
void contingencyMatrixWSort(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
T minLabel,
T maxLabel,
void* workspace,
size_t workspaceSize,
cudaStream_t stream)
{
T* outKeys = reinterpret_cast<T*>(workspace);
auto alignedBufferSz = raft::alignTo<size_t>(nSamples * sizeof(T), 256);
T* outValue = reinterpret_cast<T*>((size_t)workspace + alignedBufferSz);
void* pWorkspaceCub = reinterpret_cast<void*>((size_t)workspace + 2 * alignedBufferSz);
auto bitsToSort = log2<int>(maxLabel);
if (!raft::isPo2(maxLabel)) ++bitsToSort;
// we dont really need perfect sorting, should get by with some sort of
// binning-reordering operation
///@todo: future work - explore "efficient" custom binning kernels vs cub sort
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortPairs(pWorkspaceCub,
workspaceSize,
groundTruth,
outKeys,
predictedLabel,
outValue,
nSamples,
0,
bitsToSort,
stream));
auto outDimM_N = int(maxLabel - minLabel + 1);
computeCMatWAtomics<T, OutT>(outKeys, outValue, nSamples, outMat, minLabel, outDimM_N, stream);
}
template <typename OutT = int>
ContingencyMatrixImplType getImplVersion(OutT outDimN)
{
int currDevice = 0;
int l2CacheSize = 0;
// no way to query this from CUDA APIs, value for CC 7.0, 3.0
int maxBlocksResidentPerSM = 16;
RAFT_CUDA_TRY(cudaGetDevice(&currDevice));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&l2CacheSize, cudaDevAttrL2CacheSize, currDevice));
auto maxSmemPerBlock = raft::getSharedMemPerBlock();
ContingencyMatrixImplType implVersion = IMPL_NONE;
// keeping 8 block per SM to get good utilization
// can go higher but reduced L1 size degrades perf
OutT upperLimitSmemAtomics =
std::floor(std::sqrt(maxSmemPerBlock / (sizeof(OutT) * (maxBlocksResidentPerSM / 2))));
OutT upperLimitL2Atomics = std::floor(std::sqrt(l2CacheSize / sizeof(OutT)));
if (outDimN <= upperLimitSmemAtomics)
implVersion = SMEM_ATOMICS;
else if (outDimN <= upperLimitL2Atomics)
implVersion = GLOBAL_ATOMICS;
else
implVersion = SORT_AND_GATOMICS;
return implVersion;
}
/**
* @brief use this to allocate output matrix size
* size of matrix = (maxLabel - minLabel + 1)^2 * sizeof(int)
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param nSamples: number of elements in input array
* @param stream: cuda stream for execution
* @param minLabel: [out] calculated min value in input array
* @param maxLabel: [out] calculated max value in input array
*/
template <typename T>
void getInputClassCardinality(
const T* groundTruth, const int nSamples, cudaStream_t stream, T& minLabel, T& maxLabel)
{
thrust::device_ptr<const T> dTrueLabel = thrust::device_pointer_cast(groundTruth);
auto min_max =
thrust::minmax_element(thrust::cuda::par.on(stream), dTrueLabel, dTrueLabel + nSamples);
minLabel = *min_max.first;
maxLabel = *min_max.second;
}
/**
* @brief Calculate workspace size for running contingency matrix calculations
* @tparam T label type
* @tparam OutT output matrix type
* @param nSamples: number of elements in input array
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param stream: cuda stream for execution
* @param minLabel: Optional, min value in input array
* @param maxLabel: Optional, max value in input array
*/
template <typename T, typename OutT = int>
size_t getContingencyMatrixWorkspaceSize(int nSamples,
const T* groundTruth,
cudaStream_t stream,
T minLabel = std::numeric_limits<T>::max(),
T maxLabel = std::numeric_limits<T>::max())
{
size_t workspaceSize = 0;
// below is a redundant computation - can be avoided
if (minLabel == std::numeric_limits<T>::max() || maxLabel == std::numeric_limits<T>::max()) {
getInputClassCardinality<T>(groundTruth, nSamples, stream, minLabel, maxLabel);
}
auto outDimN = OutT(maxLabel - minLabel + 1);
ContingencyMatrixImplType implVersion = getImplVersion<OutT>(outDimN);
if (implVersion == SORT_AND_GATOMICS) {
void* pWorkspaceCub{};
size_t tmpStorageBytes = 0;
// no-op pointers to get workspace size
T* pTmpUnused{};
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortPairs(
pWorkspaceCub, tmpStorageBytes, pTmpUnused, pTmpUnused, pTmpUnused, pTmpUnused, nSamples));
auto tmpStagingMemorySize = raft::alignTo<size_t>(nSamples * sizeof(T), 256);
tmpStagingMemorySize *= 2;
workspaceSize = tmpStagingMemorySize + tmpStorageBytes;
}
return workspaceSize;
}
/**
* @brief construct contingency matrix given input ground truth and prediction
* labels. Users should call function getInputClassCardinality to find
* and allocate memory for output. Similarly workspace requirements
* should be checked using function getContingencyMatrixWorkspaceSize
* @tparam T label type
* @tparam OutT output matrix type
* @param groundTruth: device 1-d array for ground truth (num of rows)
* @param predictedLabel: device 1-d array for prediction (num of columns)
* @param nSamples: number of elements in input array
* @param outMat: output buffer for contingecy matrix
* @param stream: cuda stream for execution
* @param workspace: Optional, workspace memory allocation
* @param workspaceSize: Optional, size of workspace memory
* @param minLabel: Optional, min value in input ground truth array
* @param maxLabel: Optional, max value in input ground truth array
*/
template <typename T, typename OutT = int>
void contingencyMatrix(const T* groundTruth,
const T* predictedLabel,
int nSamples,
OutT* outMat,
cudaStream_t stream,
void* workspace = nullptr,
size_t workspaceSize = 0,
T minLabel = std::numeric_limits<T>::max(),
T maxLabel = std::numeric_limits<T>::max())
{
// assumptions:
// output is not at par with scikit learn - output will be square matrix
// always with numRows = numColumns = numOfClassesInTrueLabel
// it is also assumed that true labels are monotically increasing
// if for some reason groundTruth completely skips some labels
// eg: {0,1,2,5} instead of {0,1,2,3}.
// Output matrix will still have empty rows for label value {3,4}
// Users can use "make_monotonic" to convert their discontinuous input label
// range to a monotonically increasing one //
// this also serves as way to measure co-occurrence/joint counts for NLP tasks which
// can be used to then compute pointwise mutual information and mutual information
if (minLabel == std::numeric_limits<T>::max() || maxLabel == std::numeric_limits<T>::max()) {
getInputClassCardinality<T>(groundTruth, nSamples, stream, minLabel, maxLabel);
}
auto outDimM_N = OutT(maxLabel - minLabel + 1);
RAFT_CUDA_TRY(cudaMemsetAsync(outMat, 0, sizeof(OutT) * outDimM_N * outDimM_N, stream));
ContingencyMatrixImplType implVersion = getImplVersion<OutT>(outDimM_N);
switch (implVersion) {
case SMEM_ATOMICS:
// smem atomics and then single global mem atomics only works
// when all label count can fit in smem for a block
// helps when GLOBAL_ATOMICS performance blocked by atomic update
// serialization -when very less labels ~10 labels
computeCMatWSmemAtomics<T, OutT>(
groundTruth, predictedLabel, nSamples, outMat, minLabel, outDimM_N, stream);
break;
case GLOBAL_ATOMICS:
// launch kernel - global atomic ops per (groundTruth,predictedValue) pair
computeCMatWAtomics<T, OutT>(
groundTruth, predictedLabel, nSamples, outMat, minLabel, outDimM_N, stream);
break;
// more L2 thrashing if atomic OPs land in completely different mem
// segment - when more labels
case SORT_AND_GATOMICS:
contingencyMatrixWSort<T, OutT>(groundTruth,
predictedLabel,
nSamples,
outMat,
minLabel,
maxLabel,
workspace,
workspaceSize,
stream);
break;
case IMPL_NONE: break;
}
}
}; // namespace detail
}; // namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/trustworthiness_score.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/distance/distance.cuh>
#include <cuvs/spatial/knn/knn.cuh>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/col_wise_sort.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#define N_THREADS 512
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief Build the lookup table
* @param[out] lookup_table: Lookup table giving nearest neighbor order
* of pairwise distance calculations given sample index
* @param[in] X_ind: Sorted indexes of pairwise distance calculations of X
* @param n: Number of samples
* @param work: Number of elements to consider
*/
RAFT_KERNEL build_lookup_table(int* lookup_table, const int* X_ind, int n, int work)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= work) return;
int sample_idx = i / n;
int nn_idx = i % n;
int idx = X_ind[i];
lookup_table[(sample_idx * n) + idx] = nn_idx;
}
/**
* @brief Compute a the rank of trustworthiness score
* @param[out] rank: Resulting rank
* @param[out] lookup_table: Lookup table giving nearest neighbor order
* of pairwise distance calculations given sample index
* @param[in] emb_ind: Indexes of KNN on embeddings
* @param n: Number of samples
* @param n_neighbors: Number of neighbors considered by trustworthiness score
* @param work: Batch to consider (to do it at once use n * n_neighbors)
*/
template <typename knn_index_t>
RAFT_KERNEL compute_rank(double* rank,
const int* lookup_table,
const knn_index_t* emb_ind,
int n,
int n_neighbors,
int work)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= work) return;
int sample_idx = i / n_neighbors;
knn_index_t emb_nn_ind = emb_ind[i];
int r = lookup_table[(sample_idx * n) + emb_nn_ind];
int tmp = r - n_neighbors + 1;
if (tmp > 0) raft::myAtomicAdd<double>(rank, tmp);
}
/**
* @brief Compute a kNN and returns the indices of the nearest neighbors
* @param h Raft handle
* @param[in] input Input matrix containing the dataset
* @param n Number of samples
* @param d Number of features
* @param n_neighbors number of neighbors
* @param[out] indices KNN indexes
* @param[out] distances KNN distances
*/
template <cuvs::distance::DistanceType distance_type, typename math_t>
void run_knn(const raft::resources& h,
math_t* input,
int n,
int d,
int n_neighbors,
int64_t* indices,
math_t* distances)
{
std::vector<math_t*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = input;
sizes[0] = n;
cuvs::spatial::knn::brute_force_knn<int64_t, float, int>(h,
ptrs,
sizes,
d,
input,
n,
indices,
distances,
n_neighbors,
true,
true,
nullptr,
distance_type);
}
/**
* @brief Compute the trustworthiness score
* @param h Raft handle
* @param X[in]: Data in original dimension
* @param X_embedded[in]: Data in target dimension (embedding)
* @param n: Number of samples
* @param m: Number of features in high/original dimension
* @param d: Number of features in low/embedded dimension
* @param n_neighbors Number of neighbors considered by trustworthiness score
* @param batchSize Batch size
* @return Trustworthiness score
*/
template <typename math_t, cuvs::distance::DistanceType distance_type>
double trustworthiness_score(const raft::resources& h,
const math_t* X,
math_t* X_embedded,
int n,
int m,
int d,
int n_neighbors,
int batchSize = 512)
{
cudaStream_t stream = resource::get_cuda_stream(h);
const int KNN_ALLOC = n * (n_neighbors + 1);
rmm::device_uvector<int64_t> emb_ind(KNN_ALLOC, stream);
rmm::device_uvector<math_t> emb_dist(KNN_ALLOC, stream);
run_knn<distance_type>(h, X_embedded, n, d, n_neighbors + 1, emb_ind.data(), emb_dist.data());
const int PAIRWISE_ALLOC = batchSize * n;
rmm::device_uvector<int> X_ind(PAIRWISE_ALLOC, stream);
rmm::device_uvector<math_t> X_dist(PAIRWISE_ALLOC, stream);
rmm::device_uvector<int> lookup_table(PAIRWISE_ALLOC, stream);
double t = 0.0;
rmm::device_scalar<double> t_dbuf(stream);
int toDo = n;
while (toDo > 0) {
int curBatchSize = min(toDo, batchSize);
// Takes at most batchSize vectors at a time
cuvs::distance::pairwise_distance(
h, &X[(n - toDo) * m], X, X_dist.data(), curBatchSize, n, m, distance_type);
size_t colSortWorkspaceSize = 0;
bool bAllocWorkspace = false;
raft::matrix::sort_cols_per_row(X_dist.data(),
X_ind.data(),
curBatchSize,
n,
bAllocWorkspace,
nullptr,
colSortWorkspaceSize,
stream);
if (bAllocWorkspace) {
rmm::device_uvector<char> sortColsWorkspace(colSortWorkspaceSize, stream);
raft::matrix::sort_cols_per_row(X_dist.data(),
X_ind.data(),
curBatchSize,
n,
bAllocWorkspace,
sortColsWorkspace.data(),
colSortWorkspaceSize,
stream);
}
int work = curBatchSize * n;
int n_blocks = raft::ceildiv(work, N_THREADS);
build_lookup_table<<<n_blocks, N_THREADS, 0, stream>>>(
lookup_table.data(), X_ind.data(), n, work);
RAFT_CUDA_TRY(cudaMemsetAsync(t_dbuf.data(), 0, sizeof(double), stream));
work = curBatchSize * (n_neighbors + 1);
n_blocks = raft::ceildiv(work, N_THREADS);
compute_rank<<<n_blocks, N_THREADS, 0, stream>>>(
t_dbuf.data(),
lookup_table.data(),
&emb_ind.data()[(n - toDo) * (n_neighbors + 1)],
n,
n_neighbors + 1,
work);
RAFT_CUDA_TRY(cudaPeekAtLastError());
t += t_dbuf.value(stream);
toDo -= curBatchSize;
}
t = 1.0 - ((2.0 / ((n * n_neighbors) * ((2.0 * n) - (3.0 * n_neighbors) - 1.0))) * t);
return t;
}
} // namespace detail
} // namespace stats
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/v_measure.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file v_measure.cuh
*/
#include <raft/stats/homogeneity_score.cuh>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief Function to calculate the v-measure between two clusters
*
* @param truthClusterArray: the array of truth classes of type T
* @param predClusterArray: the array of predicted classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
* @param beta: v_measure parameter
*/
template <typename T>
double v_measure(const T* truthClusterArray,
const T* predClusterArray,
int size,
T lowerLabelRange,
T upperLabelRange,
cudaStream_t stream,
double beta = 1.0)
{
double computedHomogeity, computedCompleteness, computedVMeasure;
computedHomogeity = raft::stats::homogeneity_score(
truthClusterArray, predClusterArray, size, lowerLabelRange, upperLabelRange, stream);
computedCompleteness = raft::stats::homogeneity_score(
predClusterArray, truthClusterArray, size, lowerLabelRange, upperLabelRange, stream);
if (computedCompleteness + computedHomogeity == 0.0)
computedVMeasure = 0.0;
else
computedVMeasure = ((1 + beta) * computedHomogeity * computedCompleteness /
(beta * computedHomogeity + computedCompleteness));
return computedVMeasure;
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/rand_index.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file rand_index.cuh
* @todo TODO(Ganesh Venkataramana):
* <pre>
* The below rand_index calculation implementation is a Brute force one that uses
(nElements*nElements) threads (2 dimensional grids and blocks)
* For small datasets, this will suffice; but for larger ones, work done by the threads increase
dramatically.
* A more mathematically intensive implementation that uses half the above threads can be done,
which will prove to be more efficient for larger datasets
* the idea is as follows:
* instead of 2D block and grid configuration with a total of (nElements*nElements) threads (where
each (i,j) through these threads represent an ordered pair selection of 2 data points), a 1D block
and grid configuration with a total of (nElements*(nElements))/2 threads (each thread index
represents an element part of the set of unordered pairwise selections from the dataset (nChoose2))
* In this setup, one has to generate a one-to-one mapping between this 1D thread index (for each
kernel) and the unordered pair of chosen datapoints.
* More specifically, thread0-> {dataPoint1, dataPoint0}, thread1-> {dataPoint2, dataPoint0},
thread2-> {dataPoint2, dataPoint1} ... thread((nElements*(nElements))/2 - 1)->
{dataPoint(nElements-1),dataPoint(nElements-2)}
* say ,
* threadNum: thread index | threadNum = threadIdx.x + BlockIdx.x*BlockDim.x,
* i : index of dataPoint i
* j : index of dataPoint j
* then the mapping is as follows:
* i = ceil((-1 + sqrt(1 + 8*(1 + threadNum)))/2) = floor((1 + sqrt(1 + 8*threadNum))/2)
* j = threadNum - i(i-1)/2
* after obtaining the the pair of datapoints, calculation of rand index is the same as done in
this implementation
* Caveat: since the kernel implementation involves use of emulated sqrt() operations:
* the number of instructions executed per kernel is ~40-50 times
* as the O(nElements*nElements) increase beyond the floating point limit, floating point
inaccuracies occur, and hence the above floor(...) != ceil(...)
* </pre>
*/
#pragma once
#include <cub/cub.cuh>
#include <math.h>
#include <raft/core/interruptible.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief kernel to calculate the values of a and b
* @param firstClusterArray: the array of classes of type T
* @param secondClusterArray: the array of classes of type T
* @param size: the size of the data points
* @param a: number of pairs of points that both the clusters have classified the same
* @param b: number of pairs of points that both the clusters have classified differently
*/
template <typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y>
RAFT_KERNEL computeTheNumerator(
const T* firstClusterArray, const T* secondClusterArray, uint64_t size, uint64_t* a, uint64_t* b)
{
// calculating the indices of pairs of datapoints compared by the current thread
uint64_t j = threadIdx.x + blockIdx.x * blockDim.x;
uint64_t i = threadIdx.y + blockIdx.y * blockDim.y;
// thread-local variables to count a and b
uint64_t myA = 0, myB = 0;
if (i < size && j < size && j < i) {
// checking if the pair have been classified the same by both the clusters
if (firstClusterArray[i] == firstClusterArray[j] &&
secondClusterArray[i] == secondClusterArray[j]) {
++myA;
}
// checking if the pair have been classified differently by both the clusters
else if (firstClusterArray[i] != firstClusterArray[j] &&
secondClusterArray[i] != secondClusterArray[j]) {
++myB;
}
}
// specialize blockReduce for a 2D block of 1024 threads of type uint64_t
typedef cub::BlockReduce<uint64_t, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
// Allocate shared memory for blockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
// summing up thread-local counts specific to a block
myA = BlockReduce(temp_storage).Sum(myA);
__syncthreads();
myB = BlockReduce(temp_storage).Sum(myB);
__syncthreads();
// executed once per block
if (threadIdx.x == 0 && threadIdx.y == 0) {
raft::myAtomicAdd<unsigned long long int>((unsigned long long int*)a, myA);
raft::myAtomicAdd<unsigned long long int>((unsigned long long int*)b, myB);
}
}
/**
* @brief Function to calculate RandIndex
* <a href="https://en.wikipedia.org/wiki/Rand_index">more info on rand index</a>
* @param firstClusterArray: the array of classes of type T
* @param secondClusterArray: the array of classes of type T
* @param size: the size of the data points of type uint64_t
* @param stream: the cudaStream object
*/
template <typename T>
double compute_rand_index(const T* firstClusterArray,
const T* secondClusterArray,
uint64_t size,
cudaStream_t stream)
{
// rand index for size less than 2 is not defined
ASSERT(size >= 2, "Rand Index for size less than 2 not defined!");
// allocating and initializing memory for a and b in the GPU
rmm::device_uvector<uint64_t> arr_buf(2, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(arr_buf.data(), 0, 2 * sizeof(uint64_t), stream));
// kernel configuration
static const int BLOCK_DIM_Y = 16, BLOCK_DIM_X = 16;
dim3 numThreadsPerBlock(BLOCK_DIM_X, BLOCK_DIM_Y);
dim3 numBlocks(raft::ceildiv<int>(size, numThreadsPerBlock.x),
raft::ceildiv<int>(size, numThreadsPerBlock.y));
// calling the kernel
computeTheNumerator<T, BLOCK_DIM_X, BLOCK_DIM_Y><<<numBlocks, numThreadsPerBlock, 0, stream>>>(
firstClusterArray, secondClusterArray, size, arr_buf.data(), arr_buf.data() + 1);
// synchronizing and updating the calculated values of a and b from device to host
uint64_t ab_host[2] = {0};
raft::update_host(ab_host, arr_buf.data(), 2, stream);
raft::interruptible::synchronize(stream);
// error handling
RAFT_CUDA_TRY(cudaGetLastError());
// denominator
uint64_t nChooseTwo = size * (size - 1) / 2;
// calculating the rand_index
return (double)(((double)(ab_host[0] + ab_host[1])) / (double)nChooseTwo);
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/adjusted_rand_index.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file adjusted_rand_index.cuh
* @brief The adjusted Rand index is the corrected-for-chance version of the Rand index.
* Such a correction for chance establishes a baseline by using the expected similarity
* of all pair-wise comparisons between clusterings specified by a random model.
*/
#pragma once
#include "contingencyMatrix.cuh"
#include <cub/cub.cuh>
#include <math.h>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/stats/histogram.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief Lambda to calculate the number of unordered pairs in a given input
*
* @tparam Type: Data type of the input
* @param in: the input to the functional mapping
* @param i: the indexing(not used in this case)
*/
template <typename Type>
struct nCTwo {
HDI Type operator()(Type in, int i = 0)
{
return in % 2 ? ((in - 1) >> 1) * in : (in >> 1) * (in - 1);
}
};
template <typename DataT, typename IdxT>
struct Binner {
Binner(DataT minL) : minLabel(minL) {}
DI int operator()(DataT val, IdxT row, IdxT col) { return int(val - minLabel); }
private:
DataT minLabel;
}; // struct Binner
/**
* @brief Function to count the number of unique elements in the input array
*
* @tparam T data-type for input arrays
*
* @param[in] arr input array [on device] [len = size]
* @param[in] size the size of the input array
* @param[out] minLabel the lower bound of the range of labels
* @param[out] maxLabel the upper bound of the range of labels
* @param[in] stream cuda stream
*
* @return the number of unique elements in the array
*/
template <typename T>
int countUnique(const T* arr, int size, T& minLabel, T& maxLabel, cudaStream_t stream)
{
auto ptr = thrust::device_pointer_cast(arr);
auto minmax = thrust::minmax_element(thrust::cuda::par.on(stream), ptr, ptr + size);
minLabel = *minmax.first;
maxLabel = *minmax.second;
auto totalLabels = int(maxLabel - minLabel + 1);
rmm::device_uvector<int> labelCounts(totalLabels, stream);
rmm::device_scalar<int> nUniq(stream);
raft::stats::histogram<T, int>(
raft::stats::HistTypeAuto,
labelCounts.data(),
totalLabels,
arr,
size,
1,
stream,
[minLabel] __device__(T val, int row, int col) { return int(val - minLabel); });
raft::linalg::mapThenSumReduce<int>(
nUniq.data(),
totalLabels,
[] __device__(const T& val) { return val != 0; },
stream,
labelCounts.data());
auto numUniques = nUniq.value(stream);
return numUniques;
}
/**
* @brief Function to calculate Adjusted RandIndex as described
* <a href="https://en.wikipedia.org/wiki/Rand_index">here</a>
* @tparam T data-type for input label arrays
* @tparam MathT integral data-type used for computing n-choose-r
* @param firstClusterArray: the array of classes
* @param secondClusterArray: the array of classes
* @param size: the size of the data points of type int
* @param stream: the cudaStream object
*/
template <typename T, typename MathT = int>
double compute_adjusted_rand_index(const T* firstClusterArray,
const T* secondClusterArray,
int size,
cudaStream_t stream)
{
ASSERT(size >= 2, "Rand Index for size less than 2 not defined!");
T minFirst, maxFirst, minSecond, maxSecond;
auto nUniqFirst = countUnique(firstClusterArray, size, minFirst, maxFirst, stream);
auto nUniqSecond = countUnique(secondClusterArray, size, minSecond, maxSecond, stream);
auto lowerLabelRange = std::min(minFirst, minSecond);
auto upperLabelRange = std::max(maxFirst, maxSecond);
auto nClasses = upperLabelRange - lowerLabelRange + 1;
// degenerate case of single cluster or clusters each with just one element
if (nUniqFirst == nUniqSecond) {
if (nUniqFirst == 1 || nUniqFirst == size) return 1.0;
}
auto nUniqClasses = MathT(nClasses);
rmm::device_uvector<MathT> dContingencyMatrix(nUniqClasses * nUniqClasses, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(
dContingencyMatrix.data(), 0, nUniqClasses * nUniqClasses * sizeof(MathT), stream));
auto workspaceSz = getContingencyMatrixWorkspaceSize<T, MathT>(
size, firstClusterArray, stream, lowerLabelRange, upperLabelRange);
rmm::device_uvector<char> workspaceBuff(workspaceSz, stream);
contingencyMatrix<T, MathT>(firstClusterArray,
secondClusterArray,
size,
dContingencyMatrix.data(),
stream,
workspaceBuff.data(),
workspaceSz,
lowerLabelRange,
upperLabelRange);
rmm::device_uvector<MathT> a(nUniqClasses, stream);
rmm::device_uvector<MathT> b(nUniqClasses, stream);
rmm::device_scalar<MathT> d_aCTwoSum(stream);
rmm::device_scalar<MathT> d_bCTwoSum(stream);
rmm::device_scalar<MathT> d_nChooseTwoSum(stream);
MathT h_aCTwoSum, h_bCTwoSum, h_nChooseTwoSum;
RAFT_CUDA_TRY(cudaMemsetAsync(a.data(), 0, nUniqClasses * sizeof(MathT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(b.data(), 0, nUniqClasses * sizeof(MathT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_aCTwoSum.data(), 0, sizeof(MathT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_bCTwoSum.data(), 0, sizeof(MathT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_nChooseTwoSum.data(), 0, sizeof(MathT), stream));
// calculating the sum of NijC2
raft::linalg::mapThenSumReduce<MathT, nCTwo<MathT>>(d_nChooseTwoSum.data(),
nUniqClasses * nUniqClasses,
nCTwo<MathT>(),
stream,
dContingencyMatrix.data(),
dContingencyMatrix.data());
// calculating the row-wise sums
raft::linalg::reduce<MathT, MathT>(
a.data(), dContingencyMatrix.data(), nUniqClasses, nUniqClasses, 0, true, true, stream);
// calculating the column-wise sums
raft::linalg::reduce<MathT, MathT>(
b.data(), dContingencyMatrix.data(), nUniqClasses, nUniqClasses, 0, true, false, stream);
// calculating the sum of number of unordered pairs for every element in a
raft::linalg::mapThenSumReduce<MathT, nCTwo<MathT>>(
d_aCTwoSum.data(), nUniqClasses, nCTwo<MathT>(), stream, a.data(), a.data());
// calculating the sum of number of unordered pairs for every element of b
raft::linalg::mapThenSumReduce<MathT, nCTwo<MathT>>(
d_bCTwoSum.data(), nUniqClasses, nCTwo<MathT>(), stream, b.data(), b.data());
// updating in the host memory
raft::update_host(&h_nChooseTwoSum, d_nChooseTwoSum.data(), 1, stream);
raft::update_host(&h_aCTwoSum, d_aCTwoSum.data(), 1, stream);
raft::update_host(&h_bCTwoSum, d_bCTwoSum.data(), 1, stream);
// calculating the ARI
auto nChooseTwo = double(size) * double(size - 1) / 2.0;
auto expectedIndex = double(h_aCTwoSum) * double(h_bCTwoSum) / double(nChooseTwo);
auto maxIndex = (double(h_bCTwoSum) + double(h_aCTwoSum)) / 2.0;
auto index = double(h_nChooseTwoSum);
if (maxIndex - expectedIndex)
return (index - expectedIndex) / (maxIndex - expectedIndex);
else
return 0;
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/kl_divergence.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file kl_divergence.cuh
* @brief The KL divergence tells us how well the probability distribution Q AKA candidatePDF
* approximates the probability distribution P AKA modelPDF.
*/
#pragma once
#include <math.h>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief the KL Diverence mapping function
*
* @tparam Type: Data type of the input
* @param modelPDF: the model probability density function of type DataT
* @param candidatePDF: the candidate probability density function of type DataT
*/
template <typename Type>
struct KLDOp {
HDI Type operator()(Type modelPDF, Type candidatePDF)
{
if (modelPDF == 0.0)
return 0;
else
return modelPDF * (log(modelPDF) - log(candidatePDF));
}
};
/**
* @brief Function to calculate KL Divergence
* <a href="https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence">more info on KL
* Divergence</a>
*
* @tparam DataT: Data type of the input array
* @param modelPDF: the model array of probability density functions of type DataT
* @param candidatePDF: the candidate array of probability density functions of type DataT
* @param size: the size of the data points of type int
* @param stream: the cudaStream object
*/
template <typename DataT>
DataT kl_divergence(const DataT* modelPDF, const DataT* candidatePDF, int size, cudaStream_t stream)
{
rmm::device_scalar<DataT> d_KLDVal(stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_KLDVal.data(), 0, sizeof(DataT), stream));
raft::linalg::mapThenSumReduce<DataT, KLDOp<DataT>, size_t, 256, const DataT*>(
d_KLDVal.data(), (size_t)size, KLDOp<DataT>(), stream, modelPDF, candidatePDF);
DataT h_KLDVal;
raft::update_host(&h_KLDVal, d_KLDVal.data(), 1, stream);
raft::interruptible::synchronize(stream);
return h_KLDVal;
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/histogram.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/stats/stats_types.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/seive.hpp>
#include <raft/util/vectorized.cuh>
#include <stdint.h>
// This file is a shameless amalgamation of independent works done by
// Lars Nyland and Andy Adinets
///@todo: add cub's histogram as another option
namespace cuvs {
namespace stats {
namespace detail {
/** Default mapper which just returns the value of the data itself */
template <typename DataT, typename IdxT>
struct IdentityBinner {
DI int operator()(DataT val, IdxT row, IdxT col) { return int(val); }
};
static const int ThreadsPerBlock = 256;
template <typename IdxT, int VecLen>
dim3 computeGridDim(IdxT nrows, IdxT ncols, const void* kernel)
{
int occupancy;
RAFT_CUDA_TRY(
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy, kernel, ThreadsPerBlock, 0));
const auto maxBlks = occupancy * raft::getMultiProcessorCount();
int nblksx = raft::ceildiv<int>(VecLen ? nrows / VecLen : nrows, ThreadsPerBlock);
// for cases when there aren't a lot of blocks for computing one histogram
nblksx = std::min(nblksx, maxBlks);
return dim3(nblksx, ncols);
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen, typename CoreOp>
DI void histCoreOp(const DataT* data, IdxT nrows, IdxT nbins, BinnerOp binner, CoreOp op, IdxT col)
{
IdxT offset = col * nrows;
auto bdim = IdxT(blockDim.x);
IdxT tid = threadIdx.x + bdim * blockIdx.x;
tid *= VecLen;
IdxT stride = bdim * gridDim.x * VecLen;
int nCeil = raft::alignTo<int>(nrows, stride);
typedef raft::TxN_t<DataT, VecLen> VecType;
VecType a;
for (auto i = tid; i < nCeil; i += stride) {
if (i < nrows) { a.load(data, offset + i); }
#pragma unroll
for (int j = 0; j < VecLen; ++j) {
int binId = binner(a.val.data[j], i + j, col);
op(binId, i + j, col);
}
}
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
RAFT_KERNEL gmemHistKernel(int* bins, const DataT* data, IdxT nrows, IdxT nbins, BinnerOp binner)
{
auto op = [=] __device__(int binId, IdxT row, IdxT col) {
if (row >= nrows) return;
auto binOffset = col * nbins;
#if __CUDA_ARCH__ < 700
raft::myAtomicAdd(bins + binOffset + binId, 1);
#else
auto amask = __activemask();
auto mask = __match_any_sync(amask, binId);
auto leader = __ffs(mask) - 1;
if (raft::laneId() == leader) { raft::myAtomicAdd(bins + binOffset + binId, __popc(mask)); }
#endif // __CUDA_ARCH__
};
histCoreOp<DataT, BinnerOp, IdxT, VecLen>(data, nrows, nbins, binner, op, blockIdx.y);
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
void gmemHist(int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
BinnerOp binner,
cudaStream_t stream)
{
auto blks = computeGridDim<IdxT, VecLen>(
nrows, ncols, (const void*)gmemHistKernel<DataT, BinnerOp, IdxT, VecLen>);
gmemHistKernel<DataT, BinnerOp, IdxT, VecLen>
<<<blks, ThreadsPerBlock, 0, stream>>>(bins, data, nrows, nbins, binner);
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen, bool UseMatchAny>
RAFT_KERNEL smemHistKernel(int* bins, const DataT* data, IdxT nrows, IdxT nbins, BinnerOp binner)
{
extern __shared__ unsigned sbins[];
for (auto i = threadIdx.x; i < nbins; i += blockDim.x) {
sbins[i] = 0;
}
__syncthreads();
auto op = [=] __device__(int binId, IdxT row, IdxT col) {
if (row >= nrows) return;
#if __CUDA_ARCH__ < 700
raft::myAtomicAdd<unsigned int>(sbins + binId, 1);
#else
if (UseMatchAny) {
auto amask = __activemask();
auto mask = __match_any_sync(amask, binId);
auto leader = __ffs(mask) - 1;
if (raft::laneId() == leader) {
raft::myAtomicAdd<unsigned int>(sbins + binId, __popc(mask));
}
} else {
raft::myAtomicAdd<unsigned int>(sbins + binId, 1);
}
#endif // __CUDA_ARCH__
};
IdxT col = blockIdx.y;
histCoreOp<DataT, BinnerOp, IdxT, VecLen>(data, nrows, nbins, binner, op, col);
__syncthreads();
auto binOffset = col * nbins;
for (auto i = threadIdx.x; i < nbins; i += blockDim.x) {
auto val = sbins[i];
if (val > 0) { raft::myAtomicAdd<unsigned int>((unsigned int*)bins + binOffset + i, val); }
}
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen, bool UseMatchAny>
void smemHist(int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
BinnerOp binner,
cudaStream_t stream)
{
auto blks = computeGridDim<IdxT, VecLen>(
nrows, ncols, (const void*)smemHistKernel<DataT, BinnerOp, IdxT, VecLen, UseMatchAny>);
size_t smemSize = nbins * sizeof(unsigned);
smemHistKernel<DataT, BinnerOp, IdxT, VecLen, UseMatchAny>
<<<blks, ThreadsPerBlock, smemSize, stream>>>(bins, data, nrows, nbins, binner);
}
template <unsigned _BIN_BITS>
struct BitsInfo {
static unsigned const BIN_BITS = _BIN_BITS;
static unsigned const WORD_BITS = sizeof(unsigned) * 8;
static unsigned const WORD_BINS = WORD_BITS / BIN_BITS;
static unsigned const BIN_MASK = (1 << BIN_BITS) - 1;
};
template <unsigned BIN_BITS>
DI void incrementBin(unsigned* sbins, int* bins, int nbins, int binId)
{
typedef BitsInfo<BIN_BITS> Bits;
auto iword = binId / Bits::WORD_BINS;
auto ibin = binId % Bits::WORD_BINS;
auto sh = ibin * Bits::BIN_BITS;
auto old_word = atomicAdd(sbins + iword, unsigned(1 << sh));
auto new_word = old_word + unsigned(1 << sh);
if ((new_word >> sh & Bits::BIN_MASK) != 0) return;
// overflow
raft::myAtomicAdd<unsigned int>((unsigned int*)bins + binId, Bits::BIN_MASK + 1);
for (int dbin = 1; ibin + dbin < Bits::WORD_BINS && binId + dbin < nbins; ++dbin) {
auto sh1 = (ibin + dbin) * Bits::BIN_BITS;
if ((new_word >> sh1 & Bits::BIN_MASK) == 0) {
// overflow
raft::myAtomicAdd<unsigned int>((unsigned int*)bins + binId + dbin, Bits::BIN_MASK);
} else {
// correction
raft::myAtomicAdd(bins + binId + dbin, -1);
break;
}
}
}
template <>
DI void incrementBin<1>(unsigned* sbins, int* bins, int nbins, int binId)
{
typedef BitsInfo<1> Bits;
auto iword = binId / Bits::WORD_BITS;
auto sh = binId % Bits::WORD_BITS;
auto old_word = atomicXor(sbins + iword, unsigned(1 << sh));
if ((old_word >> sh & 1) != 0) raft::myAtomicAdd(bins + binId, 2);
}
template <typename DataT, typename BinnerOp, typename IdxT, int BIN_BITS, int VecLen>
RAFT_KERNEL smemBitsHistKernel(
int* bins, const DataT* data, IdxT nrows, IdxT nbins, BinnerOp binner)
{
extern __shared__ unsigned sbins[];
typedef BitsInfo<BIN_BITS> Bits;
auto nwords = raft::ceildiv<int>(nbins, Bits::WORD_BINS);
for (auto j = threadIdx.x; j < nwords; j += blockDim.x) {
sbins[j] = 0;
}
__syncthreads();
IdxT col = blockIdx.y;
IdxT binOffset = col * nbins;
auto op = [=] __device__(int binId, IdxT row, IdxT col) {
if (row >= nrows) return;
incrementBin<Bits::BIN_BITS>(sbins, bins + binOffset, (int)nbins, binId);
};
histCoreOp<DataT, BinnerOp, IdxT, VecLen>(data, nrows, nbins, binner, op, col);
__syncthreads();
for (auto j = threadIdx.x; j < (int)nbins; j += blockDim.x) {
auto shift = j % Bits::WORD_BINS * Bits::BIN_BITS;
int count = sbins[j / Bits::WORD_BINS] >> shift & Bits::BIN_MASK;
if (count > 0) raft::myAtomicAdd(bins + binOffset + j, count);
}
}
template <typename DataT, typename BinnerOp, typename IdxT, int BIN_BITS, int VecLen>
void smemBitsHist(int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
BinnerOp binner,
cudaStream_t stream)
{
typedef BitsInfo<BIN_BITS> Bits;
auto blks = computeGridDim<IdxT, VecLen>(
nrows, ncols, (const void*)smemBitsHistKernel<DataT, BinnerOp, IdxT, Bits::BIN_BITS, VecLen>);
size_t smemSize = raft::ceildiv<size_t>(nbins, Bits::WORD_BITS / Bits::BIN_BITS) * sizeof(int);
smemBitsHistKernel<DataT, BinnerOp, IdxT, Bits::BIN_BITS, VecLen>
<<<blks, ThreadsPerBlock, smemSize, stream>>>(bins, data, nrows, nbins, binner);
}
#define INVALID_KEY -1
DI void clearHashTable(int2* ht, int hashSize)
{
for (auto i = threadIdx.x; i < hashSize; i += blockDim.x) {
ht[i] = {INVALID_KEY, 0};
}
}
DI int findEntry(int2* ht, int hashSize, int binId, int threshold)
{
int idx = binId % hashSize;
int t;
int count = 0;
while ((t = atomicCAS(&(ht[idx].x), INVALID_KEY, binId)) != INVALID_KEY && t != binId) {
++count;
if (count >= threshold) {
idx = INVALID_KEY;
break;
}
++idx;
if (idx >= hashSize) { idx = 0; }
}
return idx;
}
DI void flushHashTable(int2* ht, int hashSize, int* bins, int nbins, int col)
{
int binOffset = col * nbins;
for (auto i = threadIdx.x; i < hashSize; i += blockDim.x) {
if (ht[i].x != INVALID_KEY && ht[i].y > 0) {
raft::myAtomicAdd(bins + binOffset + ht[i].x, ht[i].y);
}
ht[i] = {INVALID_KEY, 0};
}
}
#undef INVALID_KEY
///@todo: honor VecLen template param
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
RAFT_KERNEL smemHashHistKernel(int* bins,
const DataT* data,
IdxT nrows,
IdxT nbins,
BinnerOp binner,
int hashSize,
int threshold)
{
extern __shared__ int2 ht[];
int* needFlush = (int*)&(ht[hashSize]);
if (threadIdx.x == 0) { needFlush[0] = 0; }
clearHashTable(ht, hashSize);
__syncthreads();
auto op = [=] __device__(int binId, IdxT row, IdxT col) {
bool iNeedFlush = false;
if (row < nrows) {
int hidx = findEntry(ht, hashSize, binId, threshold);
if (hidx >= 0) {
raft::myAtomicAdd(&(ht[hidx].y), 1);
} else {
needFlush[0] = 1;
iNeedFlush = true;
}
}
__syncthreads();
if (needFlush[0]) {
flushHashTable(ht, hashSize, bins, nbins, col);
__syncthreads();
if (threadIdx.x == 0) { needFlush[0] = 0; }
__syncthreads();
}
if (iNeedFlush) {
int hidx = findEntry(ht, hashSize, binId, threshold);
// all threads are bound to get one valid entry as all threads in this
// block will make forward progress due to the __syncthreads call in the
// subsequent iteration
raft::myAtomicAdd(&(ht[hidx].y), 1);
}
};
IdxT col = blockIdx.y;
histCoreOp<DataT, BinnerOp, IdxT, VecLen>(data, nrows, nbins, binner, op, col);
__syncthreads();
flushHashTable(ht, hashSize, bins, nbins, col);
}
inline int computeHashTableSize()
{
// we shouldn't have this much of shared memory available anytime soon!
static const unsigned maxBinsEverPossible = 256 * 1024;
static raft::common::Seive primes(maxBinsEverPossible);
unsigned smem = raft::getSharedMemPerBlock();
// divide-by-2 because hash table entry stores 2 elements: idx and count
auto binsPossible = smem / sizeof(unsigned) / 2;
for (; binsPossible > 1; --binsPossible) {
if (primes.isPrime(binsPossible)) return (int)binsPossible;
}
return 1; // should not happen!
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
void smemHashHist(int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
BinnerOp binner,
cudaStream_t stream)
{
static const int flushThreshold = 10;
auto blks = computeGridDim<IdxT, 1>(
nrows, ncols, (const void*)smemHashHistKernel<DataT, BinnerOp, IdxT, 1>);
int hashSize = computeHashTableSize();
size_t smemSize = hashSize * sizeof(int2) + sizeof(int);
smemHashHistKernel<DataT, BinnerOp, IdxT, 1><<<blks, ThreadsPerBlock, smemSize, stream>>>(
bins, data, nrows, nbins, binner, hashSize, flushThreshold);
}
template <typename DataT, typename BinnerOp, typename IdxT, int VecLen>
void histogramVecLen(HistType type,
int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
cudaStream_t stream,
BinnerOp binner)
{
RAFT_CUDA_TRY(cudaMemsetAsync(bins, 0, ncols * nbins * sizeof(int), stream));
switch (type) {
case HistTypeGmem:
gmemHist<DataT, BinnerOp, IdxT, VecLen>(bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmem:
smemHist<DataT, BinnerOp, IdxT, VecLen, false>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemMatchAny:
smemHist<DataT, BinnerOp, IdxT, VecLen, true>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits16:
smemBitsHist<DataT, BinnerOp, IdxT, 16, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits8:
smemBitsHist<DataT, BinnerOp, IdxT, 8, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits4:
smemBitsHist<DataT, BinnerOp, IdxT, 4, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits2:
smemBitsHist<DataT, BinnerOp, IdxT, 2, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemBits1:
smemBitsHist<DataT, BinnerOp, IdxT, 1, VecLen>(
bins, nbins, data, nrows, ncols, binner, stream);
break;
case HistTypeSmemHash:
smemHashHist<DataT, BinnerOp, IdxT, VecLen>(bins, nbins, data, nrows, ncols, binner, stream);
break;
default: ASSERT(false, "histogram: Invalid type passed '%d'!", type);
};
RAFT_CUDA_TRY(cudaGetLastError());
}
template <typename DataT, typename BinnerOp, typename IdxT>
void histogramImpl(HistType type,
int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
cudaStream_t stream,
BinnerOp binner)
{
size_t bytes = nrows * sizeof(DataT);
if (nrows <= 0) return;
if (16 % sizeof(DataT) == 0 && bytes % 16 == 0) {
histogramVecLen<DataT, BinnerOp, IdxT, 16 / sizeof(DataT)>(
type, bins, nbins, data, nrows, ncols, stream, binner);
} else if (8 % sizeof(DataT) == 0 && bytes % 8 == 0) {
histogramVecLen<DataT, BinnerOp, IdxT, 8 / sizeof(DataT)>(
type, bins, nbins, data, nrows, ncols, stream, binner);
} else if (4 % sizeof(DataT) == 0 && bytes % 4 == 0) {
histogramVecLen<DataT, BinnerOp, IdxT, 4 / sizeof(DataT)>(
type, bins, nbins, data, nrows, ncols, stream, binner);
} else if (2 % sizeof(DataT) == 0 && bytes % 2 == 0) {
histogramVecLen<DataT, BinnerOp, IdxT, 2 / sizeof(DataT)>(
type, bins, nbins, data, nrows, ncols, stream, binner);
} else {
histogramVecLen<DataT, BinnerOp, IdxT, 1>(
type, bins, nbins, data, nrows, ncols, stream, binner);
}
}
template <typename IdxT>
HistType selectBestHistAlgo(IdxT nbins)
{
size_t smem = raft::getSharedMemPerBlock();
size_t requiredSize = nbins * sizeof(unsigned);
if (requiredSize <= smem) { return HistTypeSmem; }
for (int bits = 16; bits >= 1; bits >>= 1) {
auto nBytesForBins = raft::ceildiv<size_t>(bits * nbins, 8);
requiredSize = raft::alignTo<size_t>(nBytesForBins, sizeof(unsigned));
if (requiredSize <= smem) { return static_cast<HistType>(bits); }
}
return HistTypeGmem;
}
/**
* @brief Perform histogram on the input data. It chooses the right load size
* based on the input data vector length. It also supports large-bin cases
* using a specialized smem-based hashing technique.
* @tparam DataT input data type
* @tparam IdxT data type used to compute indices
* @tparam BinnerOp takes the input data and computes its bin index
* @param type histogram implementation type to choose
* @param bins the output bins (length = ncols * nbins)
* @param nbins number of bins
* @param data input data (length = ncols * nrows)
* @param nrows data array length in each column (or batch)
* @param ncols number of columns (or batch size)
* @param stream cuda stream
* @param binner the operation that computes the bin index of the input data
*
* @note signature of BinnerOp is `int func(DataT, IdxT);`
*/
template <typename DataT, typename IdxT = int, typename BinnerOp = IdentityBinner<DataT, IdxT>>
void histogram(HistType type,
int* bins,
IdxT nbins,
const DataT* data,
IdxT nrows,
IdxT ncols,
cudaStream_t stream,
BinnerOp binner = IdentityBinner<DataT, IdxT>())
{
HistType computedType = type;
if (type == HistTypeAuto) { computedType = selectBestHistAlgo(nbins); }
histogramImpl<DataT, BinnerOp, IdxT>(
computedType, bins, nbins, data, nrows, ncols, stream, binner);
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/weighted_mean.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/reduce.cuh>
#include <raft/stats/sum.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief Compute the row-wise weighted mean of the input matrix with a
* vector of weights
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param mu the output mean vector
* @param data the input matrix
* @param weights weight of size D if along_row is true, else of size N
* @param D number of columns of data
* @param N number of rows of data
* @param row_major data input matrix is row-major or not
* @param along_rows whether to reduce along rows or columns
* @param stream cuda stream to launch work on
*/
template <typename Type, typename IdxType = int>
void weightedMean(Type* mu,
const Type* data,
const Type* weights,
IdxType D,
IdxType N,
bool row_major,
bool along_rows,
cudaStream_t stream)
{
// sum the weights & copy back to CPU
auto weight_size = along_rows ? D : N;
Type WS = 0;
raft::stats::sum(mu, weights, (IdxType)1, weight_size, false, stream);
raft::update_host(&WS, mu, 1, stream);
raft::linalg::reduce(
mu,
data,
D,
N,
(Type)0,
row_major,
along_rows,
stream,
false,
[weights] __device__(Type v, IdxType i) { return v * weights[i]; },
raft::add_op{},
raft::div_const_op<Type>(WS));
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/homogeneity_score.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file homogeneity_score.cuh
*
* @brief A clustering result satisfies homogeneity if all of its clusters
* contain only data points which are members of a single class.
*/
#pragma once
#include <raft/stats/entropy.cuh>
#include <raft/stats/mutual_info_score.cuh>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief Function to calculate the homogeneity score between two clusters
* <a href="https://en.wikipedia.org/wiki/Homogeneity_(statistics)">more info on mutual
* information</a>
* @param truthClusterArray: the array of truth classes of type T
* @param predClusterArray: the array of predicted classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
*/
template <typename T>
double homogeneity_score(const T* truthClusterArray,
const T* predClusterArray,
int size,
T lowerLabelRange,
T upperLabelRange,
cudaStream_t stream)
{
if (size == 0) return 1.0;
double computedMI, computedEntropy;
computedMI = raft::stats::mutual_info_score(
truthClusterArray, predClusterArray, size, lowerLabelRange, upperLabelRange, stream);
computedEntropy =
raft::stats::entropy(truthClusterArray, size, lowerLabelRange, upperLabelRange, stream);
double homogeneity;
if (computedEntropy) {
homogeneity = computedMI / computedEntropy;
} else
homogeneity = 1.0;
return homogeneity;
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/stddev.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/binary_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cub/cub.cuh>
namespace cuvs {
namespace stats {
namespace detail {
///@todo: ColPerBlk has been tested only for 32!
template <typename Type, typename IdxType, int TPB, int ColsPerBlk = 32>
RAFT_KERNEL stddevKernelRowMajor(Type* std, const Type* data, IdxType D, IdxType N)
{
const int RowsPerBlkPerIter = TPB / ColsPerBlk;
IdxType thisColId = threadIdx.x % ColsPerBlk;
IdxType thisRowId = threadIdx.x / ColsPerBlk;
IdxType colId = thisColId + ((IdxType)blockIdx.y * ColsPerBlk);
IdxType rowId = thisRowId + ((IdxType)blockIdx.x * RowsPerBlkPerIter);
Type thread_data = Type(0);
const IdxType stride = RowsPerBlkPerIter * gridDim.x;
for (IdxType i = rowId; i < N; i += stride) {
Type val = (colId < D) ? data[i * D + colId] : Type(0);
thread_data += val * val;
}
__shared__ Type sstd[ColsPerBlk];
if (threadIdx.x < ColsPerBlk) sstd[threadIdx.x] = Type(0);
__syncthreads();
raft::myAtomicAdd(sstd + thisColId, thread_data);
__syncthreads();
if (threadIdx.x < ColsPerBlk) raft::myAtomicAdd(std + colId, sstd[thisColId]);
}
template <typename Type, typename IdxType, int TPB>
RAFT_KERNEL stddevKernelColMajor(Type* std, const Type* data, const Type* mu, IdxType D, IdxType N)
{
typedef cub::BlockReduce<Type, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
Type thread_data = Type(0);
IdxType colStart = N * blockIdx.x;
Type m = mu[blockIdx.x];
for (IdxType i = threadIdx.x; i < N; i += TPB) {
IdxType idx = colStart + i;
Type diff = data[idx] - m;
thread_data += diff * diff;
}
Type acc = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) { std[blockIdx.x] = raft::sqrt(acc / N); }
}
template <typename Type, typename IdxType, int TPB>
RAFT_KERNEL varsKernelColMajor(Type* var, const Type* data, const Type* mu, IdxType D, IdxType N)
{
typedef cub::BlockReduce<Type, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
Type thread_data = Type(0);
IdxType colStart = N * blockIdx.x;
Type m = mu[blockIdx.x];
for (IdxType i = threadIdx.x; i < N; i += TPB) {
IdxType idx = colStart + i;
Type diff = data[idx] - m;
thread_data += diff * diff;
}
Type acc = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) { var[blockIdx.x] = acc / N; }
}
/**
* @brief Compute stddev of the input matrix
*
* Stddev operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param std the output stddev vector
* @param data the input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param sample whether to evaluate sample stddev or not. In other words,
* whether
* to normalize the output using N-1 or N, for true or false, respectively
* @param rowMajor whether the input data is row or col major
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int>
void stddev(Type* std,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool sample,
bool rowMajor,
cudaStream_t stream)
{
static const int TPB = 256;
if (rowMajor) {
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(N, (IdxType)RowsPerBlk), raft::ceildiv(D, (IdxType)ColsPerBlk));
RAFT_CUDA_TRY(cudaMemset(std, 0, sizeof(Type) * D));
stddevKernelRowMajor<Type, IdxType, TPB, ColsPerBlk><<<grid, TPB, 0, stream>>>(std, data, D, N);
Type ratio = Type(1) / (sample ? Type(N - 1) : Type(N));
raft::linalg::binaryOp(
std,
std,
mu,
D,
[ratio] __device__(Type a, Type b) { return raft::sqrt(a * ratio - b * b); },
stream);
} else {
stddevKernelColMajor<Type, IdxType, TPB><<<D, TPB, 0, stream>>>(std, data, mu, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* @brief Compute variance of the input matrix
*
* Variance operation is assumed to be performed on a given column.
*
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @param var the output stddev vector
* @param data the input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param sample whether to evaluate sample stddev or not. In other words,
* whether
* to normalize the output using N-1 or N, for true or false, respectively
* @param rowMajor whether the input data is row or col major
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int>
void vars(Type* var,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool sample,
bool rowMajor,
cudaStream_t stream)
{
static const int TPB = 256;
if (rowMajor) {
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(N, (IdxType)RowsPerBlk), raft::ceildiv(D, (IdxType)ColsPerBlk));
RAFT_CUDA_TRY(cudaMemset(var, 0, sizeof(Type) * D));
stddevKernelRowMajor<Type, IdxType, TPB, ColsPerBlk><<<grid, TPB, 0, stream>>>(var, data, D, N);
Type ratio = Type(1) / (sample ? Type(N - 1) : Type(N));
raft::linalg::binaryOp(
var, var, mu, D, [ratio] __device__(Type a, Type b) { return a * ratio - b * b; }, stream);
} else {
varsKernelColMajor<Type, IdxType, TPB><<<D, TPB, 0, stream>>>(var, data, mu, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // namespace detail
} // namespace stats
} // namespace cuvs | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/mean_center.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/vectorized.cuh>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief Center the input matrix wrt its mean
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads per block of the cuda kernel launched
* @param out the output mean-centered matrix
* @param data input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param rowMajor whether input is row or col major
* @param bcastAlongRows whether to broadcast vector along rows or columns
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void meanCenter(Type* out,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
out, data, mu, D, N, rowMajor, bcastAlongRows, raft::sub_op{}, stream);
}
/**
* @brief Add the input matrix wrt its mean
* @tparam Type the data type
* @tparam IdxType Integer type used to for addressing
* @tparam TPB threads per block of the cuda kernel launched
* @param out the output mean-added matrix
* @param data input matrix
* @param mu the mean vector
* @param D number of columns of data
* @param N number of rows of data
* @param rowMajor whether input is row or col major
* @param bcastAlongRows whether to broadcast vector along rows or columns
* @param stream cuda stream where to launch work
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void meanAdd(Type* out,
const Type* data,
const Type* mu,
IdxType D,
IdxType N,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
out, data, mu, D, N, rowMajor, bcastAlongRows, raft::add_op{}, stream);
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/entropy.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file entropy.cuh
* @brief Calculates the entropy for a labeling in nats.(ie, uses natural logarithm for the
* calculations)
*/
#pragma once
#include <cub/cub.cuh>
#include <math.h>
#include <raft/linalg/divide.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief Lambda to calculate the entropy of a sample given its probability value
*
* @param p: the input to the functional mapping
* @param q: dummy param
*/
struct entropyOp {
HDI double operator()(double p, double q)
{
if (p)
return -1 * (p) * (log(p));
else
return 0.0;
}
};
/**
* @brief function to calculate the bincounts of number of samples in every label
*
* @tparam LabelT: type of the labels
* @param labels: the pointer to the array containing labels for every data sample
* @param binCountArray: pointer to the 1D array that contains the count of samples per cluster
* @param nRows: number of data samples
* @param lowerLabelRange
* @param upperLabelRange
* @param workspace: device buffer containing workspace memory
* @param stream: the cuda stream where to launch this kernel
*/
template <typename LabelT>
void countLabels(const LabelT* labels,
double* binCountArray,
int nRows,
LabelT lowerLabelRange,
LabelT upperLabelRange,
rmm::device_uvector<char>& workspace,
cudaStream_t stream)
{
int num_levels = upperLabelRange - lowerLabelRange + 2;
LabelT lower_level = lowerLabelRange;
LabelT upper_level = upperLabelRange + 1;
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
labels,
binCountArray,
num_levels,
lower_level,
upper_level,
nRows,
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(),
temp_storage_bytes,
labels,
binCountArray,
num_levels,
lower_level,
upper_level,
nRows,
stream));
}
/**
* @brief Function to calculate entropy
* <a href="https://en.wikipedia.org/wiki/Entropy_(information_theory)">more info on entropy</a>
*
* @param clusterArray: the array of classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
* @return the entropy score
*/
template <typename T>
double entropy(const T* clusterArray,
const int size,
const T lowerLabelRange,
const T upperLabelRange,
cudaStream_t stream)
{
if (!size) return 1.0;
T numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
// declaring, allocating and initializing memory for bincount array and entropy values
rmm::device_uvector<double> prob(numUniqueClasses, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(prob.data(), 0, numUniqueClasses * sizeof(double), stream));
rmm::device_scalar<double> d_entropy(stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_entropy.data(), 0, sizeof(double), stream));
// workspace allocation
rmm::device_uvector<char> workspace(1, stream);
// calculating the bincounts and populating the prob array
countLabels(clusterArray, prob.data(), size, lowerLabelRange, upperLabelRange, workspace, stream);
// scalar dividing by size
raft::linalg::divideScalar<double>(
prob.data(), prob.data(), (double)size, numUniqueClasses, stream);
// calculating the aggregate entropy
raft::linalg::mapThenSumReduce<double, entropyOp>(
d_entropy.data(), numUniqueClasses, entropyOp(), stream, prob.data(), prob.data());
// updating in the host memory
double h_entropy;
raft::update_host(&h_entropy, d_entropy.data(), 1, stream);
raft::interruptible::synchronize(stream);
return h_entropy;
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/neighborhood_recall.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/error.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/math.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resources.hpp>
#include <cub/cub.cuh>
#include <cuda/atomic>
#include <optional>
namespace raft::stats::detail {
template <typename IndicesValueType,
typename DistanceValueType,
typename IndexType,
typename ScalarType>
RAFT_KERNEL neighborhood_recall(
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> indices,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> ref_indices,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
distances,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
ref_distances,
raft::device_scalar_view<ScalarType> recall_score,
DistanceValueType const eps)
{
auto constexpr kThreadsPerBlock = 32;
IndexType const row_idx = blockIdx.x;
auto const lane_idx = threadIdx.x % kThreadsPerBlock;
// Each warp stores a recall score computed across the columns per row
IndexType thread_recall_score = 0;
for (IndexType col_idx = lane_idx; col_idx < indices.extent(1); col_idx += kThreadsPerBlock) {
for (IndexType ref_col_idx = 0; ref_col_idx < ref_indices.extent(1); ref_col_idx++) {
if (indices(row_idx, col_idx) == ref_indices(row_idx, ref_col_idx)) {
thread_recall_score += 1;
break;
} else if (distances.has_value()) {
auto dist = distances.value()(row_idx, col_idx);
auto ref_dist = ref_distances.value()(row_idx, ref_col_idx);
DistanceValueType diff = raft::abs(dist - ref_dist);
DistanceValueType m = std::max(raft::abs(dist), raft::abs(ref_dist));
DistanceValueType ratio = diff > eps ? diff / m : diff;
if (ratio <= eps) {
thread_recall_score += 1;
break;
}
}
}
}
// Reduce across a warp for row score
typedef cub::BlockReduce<IndexType, kThreadsPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
ScalarType row_recall_score = BlockReduce(temp_storage).Sum(thread_recall_score);
// Reduce across all rows for global score
if (lane_idx == 0) {
cuda::atomic_ref<ScalarType, cuda::thread_scope_device> device_recall_score{
*recall_score.data_handle()};
std::size_t const total_count = indices.extent(0) * indices.extent(1);
device_recall_score.fetch_add(row_recall_score / total_count);
}
}
template <typename IndicesValueType,
typename DistanceValueType,
typename IndexType,
typename ScalarType>
void neighborhood_recall(
raft::resources const& res,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> indices,
raft::device_matrix_view<const IndicesValueType, IndexType, raft::row_major> ref_indices,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
distances,
std::optional<raft::device_matrix_view<const DistanceValueType, IndexType, raft::row_major>>
ref_distances,
raft::device_scalar_view<ScalarType> recall_score,
DistanceValueType const eps)
{
// One warp per row, launch a warp-width block per-row kernel
auto constexpr kThreadsPerBlock = 32;
auto const num_blocks = indices.extent(0);
neighborhood_recall<<<num_blocks, kThreadsPerBlock>>>(
indices, ref_indices, distances, ref_distances, recall_score, eps);
}
} // end namespace raft::stats::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/silhouette_score.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cub/cub.cuh>
#include <cuvs/distance/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <iostream>
#include <math.h>
#include <numeric>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/reduce.cuh>
#include <raft/linalg/reduce_cols_by_key.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief kernel that calculates the average intra-cluster distance for every sample data point and
* updates the cluster distance to max value
* @tparam DataT: type of the data samples
* @tparam LabelT: type of the labels
* @param sampleToClusterSumOfDistances: the pointer to the 2D array that contains the sum of
* distances from every sample to every cluster (nRows x nLabels)
* @param binCountArray: pointer to the 1D array that contains the count of samples per cluster (1 x
* nLabels)
* @param d_aArray: the pointer to the array of average intra-cluster distances for every sample in
* device memory (1 x nRows)
* @param labels: the pointer to the array containing labels for every data sample (1 x nRows)
* @param nRows: number of data samples
* @param nLabels: number of Labels
* @param MAX_VAL: DataT specific upper limit
*/
template <typename DataT, typename LabelT>
RAFT_KERNEL populateAKernel(DataT* sampleToClusterSumOfDistances,
DataT* binCountArray,
DataT* d_aArray,
const LabelT* labels,
int nRows,
int nLabels,
const DataT MAX_VAL)
{
// getting the current index
int sampleIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (sampleIndex >= nRows) return;
// sampleDistanceVector is an array that stores that particular row of the distanceMatrix
DataT* sampleToClusterSumOfDistancesVector =
&sampleToClusterSumOfDistances[sampleIndex * nLabels];
LabelT sampleCluster = labels[sampleIndex];
int sampleClusterIndex = (int)sampleCluster;
if (binCountArray[sampleClusterIndex] - 1 <= 0) {
d_aArray[sampleIndex] = -1;
return;
}
else {
d_aArray[sampleIndex] = (sampleToClusterSumOfDistancesVector[sampleClusterIndex]) /
(binCountArray[sampleClusterIndex] - 1);
// modifying the sampleDistanceVector to give sample average distance
sampleToClusterSumOfDistancesVector[sampleClusterIndex] = MAX_VAL;
}
}
/**
* @brief function to calculate the bincounts of number of samples in every label
* @tparam DataT: type of the data samples
* @tparam LabelT: type of the labels
* @param labels: the pointer to the array containing labels for every data sample (1 x nRows)
* @param binCountArray: pointer to the 1D array that contains the count of samples per cluster (1 x
* nLabels)
* @param nRows: number of data samples
* @param nUniqueLabels: number of Labels
* @param workspace: device buffer containing workspace memory
* @param stream: the cuda stream where to launch this kernel
*/
template <typename DataT, typename LabelT>
void countLabels(const LabelT* labels,
DataT* binCountArray,
int nRows,
int nUniqueLabels,
rmm::device_uvector<char>& workspace,
cudaStream_t stream)
{
int num_levels = nUniqueLabels + 1;
LabelT lower_level = 0;
LabelT upper_level = nUniqueLabels;
size_t temp_storage_bytes = 0;
rmm::device_uvector<int> countArray(nUniqueLabels, stream);
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
labels,
binCountArray,
num_levels,
lower_level,
upper_level,
nRows,
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(),
temp_storage_bytes,
labels,
binCountArray,
num_levels,
lower_level,
upper_level,
nRows,
stream));
}
/**
* @brief structure that defines the division Lambda for elementwise op
*/
template <typename DataT>
struct DivOp {
HDI DataT operator()(DataT a, int b, int c)
{
if (b == 0)
return ULLONG_MAX;
else
return a / b;
}
};
/**
* @brief structure that defines the elementwise operation to calculate silhouette score using
* params 'a' and 'b'
*/
template <typename DataT>
struct SilOp {
HDI DataT operator()(DataT a, DataT b)
{
if (a == 0 && b == 0 || a == b)
return 0;
else if (a == -1)
return 0;
else if (a > b)
return (b - a) / a;
else
return (b - a) / b;
}
};
/**
* @brief main function that returns the average silhouette score for a given set of data and its
* clusterings
* @tparam DataT: type of the data samples
* @tparam LabelT: type of the labels
* @param X_in: pointer to the input Data samples array (nRows x nCols)
* @param nRows: number of data samples
* @param nCols: number of features
* @param labels: the pointer to the array containing labels for every data sample (1 x nRows)
* @param nLabels: number of Labels
* @param silhouette_scorePerSample: pointer to the array that is optionally taken in as input and
* is populated with the silhouette score for every sample (1 x nRows)
* @param stream: the cuda stream where to launch this kernel
* @param metric: the numerical value that maps to the type of distance metric to be used in the
* calculations
*/
template <typename DataT, typename LabelT>
DataT silhouette_score(
raft::resources const& handle,
const DataT* X_in,
int nRows,
int nCols,
const LabelT* labels,
int nLabels,
DataT* silhouette_scorePerSample,
cudaStream_t stream,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Unexpanded)
{
ASSERT(nLabels >= 2 && nLabels <= (nRows - 1),
"silhouette Score not defined for the given number of labels!");
// compute the distance matrix
rmm::device_uvector<DataT> distanceMatrix(nRows * nRows, stream);
rmm::device_uvector<char> workspace(1, stream);
cuvs::distance::pairwise_distance(
handle, X_in, X_in, distanceMatrix.data(), nRows, nRows, nCols, metric);
// deciding on the array of silhouette scores for each dataPoint
rmm::device_uvector<DataT> silhouette_scoreSamples(0, stream);
DataT* perSampleSilScore = nullptr;
if (silhouette_scorePerSample == nullptr) {
silhouette_scoreSamples.resize(nRows, stream);
perSampleSilScore = silhouette_scoreSamples.data();
} else {
perSampleSilScore = silhouette_scorePerSample;
}
RAFT_CUDA_TRY(cudaMemsetAsync(perSampleSilScore, 0, nRows * sizeof(DataT), stream));
// getting the sample count per cluster
rmm::device_uvector<DataT> binCountArray(nLabels, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(binCountArray.data(), 0, nLabels * sizeof(DataT), stream));
countLabels(labels, binCountArray.data(), nRows, nLabels, workspace, stream);
// calculating the sample-cluster-distance-sum-array
rmm::device_uvector<DataT> sampleToClusterSumOfDistances(nRows * nLabels, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(
sampleToClusterSumOfDistances.data(), 0, nRows * nLabels * sizeof(DataT), stream));
raft::linalg::reduce_cols_by_key(distanceMatrix.data(),
labels,
sampleToClusterSumOfDistances.data(),
nRows,
nRows,
nLabels,
stream);
// creating the a array and b array
rmm::device_uvector<DataT> d_aArray(nRows, stream);
rmm::device_uvector<DataT> d_bArray(nRows, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_aArray.data(), 0, nRows * sizeof(DataT), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_bArray.data(), 0, nRows * sizeof(DataT), stream));
// kernel that populates the d_aArray
// kernel configuration
dim3 numThreadsPerBlock(32, 1, 1);
dim3 numBlocks(raft::ceildiv<int>(nRows, numThreadsPerBlock.x), 1, 1);
// calling the kernel
populateAKernel<<<numBlocks, numThreadsPerBlock, 0, stream>>>(
sampleToClusterSumOfDistances.data(),
binCountArray.data(),
d_aArray.data(),
labels,
nRows,
nLabels,
std::numeric_limits<DataT>::max());
// elementwise dividing by bincounts
rmm::device_uvector<DataT> averageDistanceBetweenSampleAndCluster(nRows * nLabels, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(
averageDistanceBetweenSampleAndCluster.data(), 0, nRows * nLabels * sizeof(DataT), stream));
raft::linalg::matrixVectorOp(averageDistanceBetweenSampleAndCluster.data(),
sampleToClusterSumOfDistances.data(),
binCountArray.data(),
binCountArray.data(),
nLabels,
nRows,
true,
true,
DivOp<DataT>(),
stream);
// calculating row-wise minimum
raft::linalg::reduce<DataT, DataT, int, raft::identity_op, raft::min_op>(
d_bArray.data(),
averageDistanceBetweenSampleAndCluster.data(),
nLabels,
nRows,
std::numeric_limits<DataT>::max(),
true,
true,
stream,
false,
raft::identity_op{},
raft::min_op{});
// calculating the silhouette score per sample using the d_aArray and d_bArray
raft::linalg::binaryOp<DataT, SilOp<DataT>>(
perSampleSilScore, d_aArray.data(), d_bArray.data(), nRows, SilOp<DataT>(), stream);
// calculating the sum of all the silhouette score
rmm::device_scalar<DataT> d_avgSilhouetteScore(stream);
RAFT_CUDA_TRY(cudaMemsetAsync(d_avgSilhouetteScore.data(), 0, sizeof(DataT), stream));
raft::linalg::mapThenSumReduce<double, raft::identity_op>(d_avgSilhouetteScore.data(),
nRows,
raft::identity_op(),
stream,
perSampleSilScore,
perSampleSilScore);
DataT avgSilhouetteScore = d_avgSilhouetteScore.value(stream);
resource::sync_stream(handle, stream);
avgSilhouetteScore /= nRows;
return avgSilhouetteScore;
}
}; // namespace detail
}; // namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/dispersion.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <memory>
#include <raft/core/interruptible.hpp>
#include <raft/linalg/eltwise.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace cuvs {
namespace stats {
namespace detail {
///@todo: ColsPerBlk has been tested only for 32!
template <typename DataT, typename IdxT, int TPB, int ColsPerBlk = 32>
RAFT_KERNEL weightedMeanKernel(DataT* mu, const DataT* data, const IdxT* counts, IdxT D, IdxT N)
{
constexpr int RowsPerBlkPerIter = TPB / ColsPerBlk;
IdxT thisColId = threadIdx.x % ColsPerBlk;
IdxT thisRowId = threadIdx.x / ColsPerBlk;
IdxT colId = thisColId + ((IdxT)blockIdx.y * ColsPerBlk);
IdxT rowId = thisRowId + ((IdxT)blockIdx.x * RowsPerBlkPerIter);
DataT thread_data = DataT(0);
const IdxT stride = RowsPerBlkPerIter * gridDim.x;
__shared__ DataT smu[ColsPerBlk];
if (threadIdx.x < ColsPerBlk) smu[threadIdx.x] = DataT(0);
for (IdxT i = rowId; i < N; i += stride) {
thread_data += (colId < D) ? data[i * D + colId] * (DataT)counts[i] : DataT(0);
}
__syncthreads();
raft::myAtomicAdd(smu + thisColId, thread_data);
__syncthreads();
if (threadIdx.x < ColsPerBlk && colId < D) raft::myAtomicAdd(mu + colId, smu[thisColId]);
}
template <typename DataT, typename IdxT, int TPB>
RAFT_KERNEL dispersionKernel(DataT* result,
const DataT* clusters,
const IdxT* clusterSizes,
const DataT* mu,
IdxT dim,
IdxT nClusters)
{
IdxT tid = threadIdx.x + blockIdx.x * blockDim.x;
IdxT len = dim * nClusters;
IdxT stride = blockDim.x * gridDim.x;
DataT sum = DataT(0);
for (; tid < len; tid += stride) {
IdxT col = tid % dim;
IdxT row = tid / dim;
DataT diff = clusters[tid] - mu[col];
sum += diff * diff * DataT(clusterSizes[row]);
}
typedef cub::BlockReduce<DataT, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
__syncthreads();
auto acc = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
if (threadIdx.x == 0) raft::myAtomicAdd(result, acc);
}
/**
* @brief Compute cluster dispersion metric. This is very useful for
* automatically finding the 'k' (in kmeans) that improves this metric.
* @tparam DataT data type
* @tparam IdxT index type
* @tparam TPB threads block for kernels launched
* @param centroids the cluster centroids. This is assumed to be row-major
* and of dimension (nClusters x dim)
* @param clusterSizes number of points in the dataset which belong to each
* cluster. This is of length nClusters
* @param globalCentroid compute the global weighted centroid of all cluster
* centroids. This is of length dim. Pass a nullptr if this is not needed
* @param nClusters number of clusters
* @param nPoints number of points in the dataset
* @param dim dataset dimensionality
* @param stream cuda stream
* @return the cluster dispersion value
*/
template <typename DataT, typename IdxT = int, int TPB = 256>
DataT dispersion(const DataT* centroids,
const IdxT* clusterSizes,
DataT* globalCentroid,
IdxT nClusters,
IdxT nPoints,
IdxT dim,
cudaStream_t stream)
{
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(nPoints, (IdxT)RowsPerBlk), raft::ceildiv(dim, (IdxT)ColsPerBlk));
rmm::device_uvector<DataT> mean(0, stream);
rmm::device_uvector<DataT> result(1, stream);
DataT* mu = globalCentroid;
if (globalCentroid == nullptr) {
mean.resize(dim, stream);
mu = mean.data();
}
RAFT_CUDA_TRY(cudaMemsetAsync(mu, 0, sizeof(DataT) * dim, stream));
RAFT_CUDA_TRY(cudaMemsetAsync(result.data(), 0, sizeof(DataT), stream));
weightedMeanKernel<DataT, IdxT, TPB, ColsPerBlk>
<<<grid, TPB, 0, stream>>>(mu, centroids, clusterSizes, dim, nClusters);
RAFT_CUDA_TRY(cudaGetLastError());
DataT ratio = DataT(1) / DataT(nPoints);
raft::linalg::scalarMultiply(mu, mu, ratio, dim, stream);
// finally, compute the dispersion
constexpr int ItemsPerThread = 4;
int nblks = raft::ceildiv<int>(dim * nClusters, TPB * ItemsPerThread);
dispersionKernel<DataT, IdxT, TPB>
<<<nblks, TPB, 0, stream>>>(result.data(), centroids, clusterSizes, mu, dim, nClusters);
RAFT_CUDA_TRY(cudaGetLastError());
DataT h_result;
raft::update_host(&h_result, result.data(), 1, stream);
raft::interruptible::synchronize(stream);
return sqrt(h_result);
}
} // end namespace detail
} // end namespace stats
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/sum.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/eltwise.cuh>
#include <raft/util/cuda_utils.cuh>
#include <cub/cub.cuh>
namespace cuvs {
namespace stats {
namespace detail {
///@todo: ColsPerBlk has been tested only for 32!
template <typename Type, typename IdxType, int TPB, int ColsPerBlk = 32>
RAFT_KERNEL sumKernelRowMajor(Type* mu, const Type* data, IdxType D, IdxType N)
{
const int RowsPerBlkPerIter = TPB / ColsPerBlk;
IdxType thisColId = threadIdx.x % ColsPerBlk;
IdxType thisRowId = threadIdx.x / ColsPerBlk;
IdxType colId = thisColId + ((IdxType)blockIdx.y * ColsPerBlk);
IdxType rowId = thisRowId + ((IdxType)blockIdx.x * RowsPerBlkPerIter);
Type thread_data = Type(0);
const IdxType stride = RowsPerBlkPerIter * gridDim.x;
for (IdxType i = rowId; i < N; i += stride)
thread_data += (colId < D) ? data[i * D + colId] : Type(0);
__shared__ Type smu[ColsPerBlk];
if (threadIdx.x < ColsPerBlk) smu[threadIdx.x] = Type(0);
__syncthreads();
raft::myAtomicAdd(smu + thisColId, thread_data);
__syncthreads();
if (threadIdx.x < ColsPerBlk) raft::myAtomicAdd(mu + colId, smu[thisColId]);
}
template <typename Type, typename IdxType, int TPB>
RAFT_KERNEL sumKernelColMajor(Type* mu, const Type* data, IdxType D, IdxType N)
{
typedef cub::BlockReduce<Type, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
Type thread_data = Type(0);
IdxType colStart = N * blockIdx.x;
for (IdxType i = threadIdx.x; i < N; i += TPB) {
IdxType idx = colStart + i;
thread_data += data[idx];
}
Type acc = BlockReduce(temp_storage).Sum(thread_data);
if (threadIdx.x == 0) { mu[blockIdx.x] = acc; }
}
template <typename Type, typename IdxType = int>
void sum(Type* output, const Type* input, IdxType D, IdxType N, bool rowMajor, cudaStream_t stream)
{
static const int TPB = 256;
if (rowMajor) {
static const int RowsPerThread = 4;
static const int ColsPerBlk = 32;
static const int RowsPerBlk = (TPB / ColsPerBlk) * RowsPerThread;
dim3 grid(raft::ceildiv(N, (IdxType)RowsPerBlk), raft::ceildiv(D, (IdxType)ColsPerBlk));
RAFT_CUDA_TRY(cudaMemset(output, 0, sizeof(Type) * D));
sumKernelRowMajor<Type, IdxType, TPB, ColsPerBlk>
<<<grid, TPB, 0, stream>>>(output, input, D, N);
} else {
sumKernelColMajor<Type, IdxType, TPB><<<D, TPB, 0, stream>>>(output, input, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // namespace detail
} // namespace stats
} // namespace cuvs | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/mutual_info_score.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file mutual_info_score.cuh
* @brief The Mutual Information is a measure of the similarity between two labels of
* the same data.This metric is independent of the absolute values of the labels:
* a permutation of the class or cluster label values won't change the
* score value in any way.
* This metric is furthermore symmetric.This can be useful to
* measure the agreement of two independent label assignments strategies
* on the same dataset when the real ground truth is not known.
*/
#pragma once
#include <cub/cub.cuh>
#include <math.h>
#include <raft/core/interruptible.hpp>
#include <raft/linalg/reduce.cuh>
#include <raft/stats/contingency_matrix.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace cuvs {
namespace stats {
namespace detail {
/**
* @brief kernel to calculate the mutual info score
* @param dContingencyMatrix: the contingency matrix corresponding to the two clusters
* @param a: the row wise sum of the contingency matrix, which is also the bin counts of first
* cluster array
* @param b: the column wise sum of the contingency matrix, which is also the bin counts of second
* cluster array
* @param numUniqueClasses: number of unique classes
* @param size: the size of array a and b (size of the contingency matrix is (size x size))
* @param d_MI: pointer to the device memory that stores the aggregate mutual information
*/
template <typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y>
RAFT_KERNEL mutual_info_kernel(const int* dContingencyMatrix,
const int* a,
const int* b,
int numUniqueClasses,
int size,
double* d_MI)
{
// calculating the indices of pairs of datapoints compared by the current thread
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
// thread-local variable to count the mutual info
double localMI = 0.0;
if (i < numUniqueClasses && j < numUniqueClasses && a[i] * b[j] != 0 &&
dContingencyMatrix[i * numUniqueClasses + j] != 0) {
localMI += (double(dContingencyMatrix[i * numUniqueClasses + j])) *
(log(double(size) * double(dContingencyMatrix[i * numUniqueClasses + j])) -
log(double(a[i] * b[j])));
}
// specialize blockReduce for a 2D block of 1024 threads of type uint64_t
typedef cub::BlockReduce<double, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y>
BlockReduce;
// Allocate shared memory for blockReduce
__shared__ typename BlockReduce::TempStorage temp_storage;
// summing up thread-local counts specific to a block
localMI = BlockReduce(temp_storage).Sum(localMI);
__syncthreads();
// executed once per block
if (threadIdx.x == 0 && threadIdx.y == 0) { raft::myAtomicAdd(d_MI, localMI); }
}
/**
* @brief Function to calculate the mutual information between two clusters
* <a href="https://en.wikipedia.org/wiki/Mutual_information">more info on mutual information</a>
* @param firstClusterArray: the array of classes of type T
* @param secondClusterArray: the array of classes of type T
* @param size: the size of the data points of type int
* @param lowerLabelRange: the lower bound of the range of labels
* @param upperLabelRange: the upper bound of the range of labels
* @param stream: the cudaStream object
*/
template <typename T>
double mutual_info_score(const T* firstClusterArray,
const T* secondClusterArray,
int size,
T lowerLabelRange,
T upperLabelRange,
cudaStream_t stream)
{
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
// declaring, allocating and initializing memory for the contingency marix
rmm::device_uvector<int> dContingencyMatrix(numUniqueClasses * numUniqueClasses, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(
dContingencyMatrix.data(), 0, numUniqueClasses * numUniqueClasses * sizeof(int), stream));
// workspace allocation
size_t workspaceSz = raft::stats::getContingencyMatrixWorkspaceSize(
size, firstClusterArray, stream, lowerLabelRange, upperLabelRange);
rmm::device_uvector<char> pWorkspace(workspaceSz, stream);
// calculating the contingency matrix
raft::stats::contingencyMatrix(firstClusterArray,
secondClusterArray,
(int)size,
(int*)dContingencyMatrix.data(),
stream,
(void*)pWorkspace.data(),
workspaceSz,
lowerLabelRange,
upperLabelRange);
// creating device buffers for all the parameters involved in ARI calculation
// device variables
rmm::device_uvector<int> a(numUniqueClasses, stream);
rmm::device_uvector<int> b(numUniqueClasses, stream);
rmm::device_scalar<double> d_MI(stream);
// host variables
double h_MI;
// initializing device memory
RAFT_CUDA_TRY(cudaMemsetAsync(a.data(), 0, numUniqueClasses * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(b.data(), 0, numUniqueClasses * sizeof(int), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(d_MI.data(), 0, sizeof(double), stream));
// calculating the row-wise sums
raft::linalg::reduce<int, int, int>(
a.data(), dContingencyMatrix.data(), numUniqueClasses, numUniqueClasses, 0, true, true, stream);
// calculating the column-wise sums
raft::linalg::reduce<int, int, int>(b.data(),
dContingencyMatrix.data(),
numUniqueClasses,
numUniqueClasses,
0,
true,
false,
stream);
// kernel configuration
static const int BLOCK_DIM_Y = 16, BLOCK_DIM_X = 16;
dim3 numThreadsPerBlock(BLOCK_DIM_X, BLOCK_DIM_Y);
dim3 numBlocks(raft::ceildiv<int>(numUniqueClasses, numThreadsPerBlock.x),
raft::ceildiv<int>(numUniqueClasses, numThreadsPerBlock.y));
// calling the kernel
mutual_info_kernel<T, BLOCK_DIM_X, BLOCK_DIM_Y><<<numBlocks, numThreadsPerBlock, 0, stream>>>(
dContingencyMatrix.data(), a.data(), b.data(), numUniqueClasses, size, d_MI.data());
// updating in the host memory
h_MI = d_MI.value(stream);
raft::interruptible::synchronize(stream);
return h_MI / size;
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/minmax.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <limits>
namespace cuvs {
namespace stats {
namespace detail {
// TODO: replace with `std::bitcast` once we adopt C++20 or libcu++ adds it
template <class To, class From>
constexpr To bit_cast(const From& from) noexcept
{
To to{};
static_assert(sizeof(To) == sizeof(From));
memcpy(&to, &from, sizeof(To));
return to;
}
template <typename T>
struct encode_traits {};
template <>
struct encode_traits<float> {
using E = int;
};
template <>
struct encode_traits<double> {
using E = long long;
};
HDI int encode(float val)
{
int i = detail::bit_cast<int>(val);
return i >= 0 ? i : (1 << 31) | ~i;
}
HDI long long encode(double val)
{
std::int64_t i = detail::bit_cast<std::int64_t>(val);
return i >= 0 ? i : (1ULL << 63) | ~i;
}
HDI float decode(int val)
{
if (val < 0) val = (1 << 31) | ~val;
return detail::bit_cast<float>(val);
}
HDI double decode(long long val)
{
if (val < 0) val = (1ULL << 63) | ~val;
return detail::bit_cast<double>(val);
}
template <typename T, typename E>
DI T atomicMaxBits(T* address, T val)
{
E old = atomicMax((E*)address, encode(val));
return decode(old);
}
template <typename T, typename E>
DI T atomicMinBits(T* address, T val)
{
E old = atomicMin((E*)address, encode(val));
return decode(old);
}
template <typename T, typename E>
RAFT_KERNEL decodeKernel(T* globalmin, T* globalmax, int ncols)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < ncols) {
globalmin[tid] = decode(*(E*)&globalmin[tid]);
globalmax[tid] = decode(*(E*)&globalmax[tid]);
}
}
///@todo: implement a proper "fill" kernel
template <typename T, typename E>
RAFT_KERNEL minmaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= ncols) return;
*(E*)&globalmin[tid] = encode(init_val);
*(E*)&globalmax[tid] = encode(-init_val);
}
template <typename T, typename E>
RAFT_KERNEL minmaxKernel(const T* data,
const unsigned int* rowids,
const unsigned int* colids,
int nrows,
int ncols,
int row_stride,
T* g_min,
T* g_max,
T* sampledcols,
T init_min_val,
int batch_ncols,
int num_batches)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
extern __shared__ char shmem[];
T* s_min = (T*)shmem;
T* s_max = (T*)(shmem + sizeof(T) * batch_ncols);
int last_batch_ncols = ncols % batch_ncols;
if (last_batch_ncols == 0) { last_batch_ncols = batch_ncols; }
int orig_batch_ncols = batch_ncols;
for (int batch_id = 0; batch_id < num_batches; batch_id++) {
if (batch_id == num_batches - 1) { batch_ncols = last_batch_ncols; }
for (int i = threadIdx.x; i < batch_ncols; i += blockDim.x) {
*(E*)&s_min[i] = encode(init_min_val);
*(E*)&s_max[i] = encode(-init_min_val);
}
__syncthreads();
for (int i = tid; i < nrows * batch_ncols; i += blockDim.x * gridDim.x) {
int col = (batch_id * orig_batch_ncols) + (i / nrows);
int row = i % nrows;
if (colids != nullptr) { col = colids[col]; }
if (rowids != nullptr) { row = rowids[row]; }
int index = row + col * row_stride;
T coldata = data[index];
if (!isnan(coldata)) {
// Min max values are saved in shared memory and global memory as per the shuffled colids.
atomicMinBits<T, E>(&s_min[(int)(i / nrows)], coldata);
atomicMaxBits<T, E>(&s_max[(int)(i / nrows)], coldata);
}
if (sampledcols != nullptr) { sampledcols[batch_id * orig_batch_ncols + i] = coldata; }
}
__syncthreads();
// finally, perform global mem atomics
for (int j = threadIdx.x; j < batch_ncols; j += blockDim.x) {
atomicMinBits<T, E>(&g_min[batch_id * orig_batch_ncols + j], decode(*(E*)&s_min[j]));
atomicMaxBits<T, E>(&g_max[batch_id * orig_batch_ncols + j], decode(*(E*)&s_max[j]));
}
__syncthreads();
}
}
/**
* @brief Computes min/max across every column of the input matrix, as well as
* optionally allow to subsample based on the given row/col ID mapping vectors
*
* @tparam T the data type
* @tparam TPB number of threads per block
* @param data input data
* @param rowids actual row ID mappings. It is of length nrows. If you want to
* skip this index lookup entirely, pass nullptr
* @param colids actual col ID mappings. It is of length ncols. If you want to
* skip this index lookup entirely, pass nullptr
* @param nrows number of rows of data to be worked upon. The actual rows of the
* input "data" can be bigger than this!
* @param ncols number of cols of data to be worked upon. The actual cols of the
* input "data" can be bigger than this!
* @param row_stride stride (in number of elements) between 2 adjacent columns
* @param globalmin final col-wise global minimum (size = ncols)
* @param globalmax final col-wise global maximum (size = ncols)
* @param sampledcols output sampled data. Pass nullptr if you don't need this
* @param stream cuda stream
* @note This method makes the following assumptions:
* 1. input and output matrices are assumed to be col-major
* 2. ncols is small enough to fit the whole of min/max values across all cols
* in shared memory
*/
template <typename T, int TPB = 512>
void minmax(const T* data,
const unsigned* rowids,
const unsigned* colids,
int nrows,
int ncols,
int row_stride,
T* globalmin,
T* globalmax,
T* sampledcols,
cudaStream_t stream)
{
using E = typename encode_traits<T>::E;
int nblks = raft::ceildiv(ncols, TPB);
T init_val = std::numeric_limits<T>::max();
minmaxInitKernel<T, E><<<nblks, TPB, 0, stream>>>(ncols, globalmin, globalmax, init_val);
RAFT_CUDA_TRY(cudaPeekAtLastError());
nblks = raft::ceildiv(nrows * ncols, TPB);
nblks = min(nblks, 65536);
size_t smemSize = sizeof(T) * 2 * ncols;
// Compute the batch_ncols, in [1, ncols] range, that meet the available
// shared memory constraints.
auto smemPerBlk = raft::getSharedMemPerBlock();
int batch_ncols = min(ncols, (int)(smemPerBlk / (sizeof(T) * 2)));
int num_batches = raft::ceildiv(ncols, batch_ncols);
smemSize = sizeof(T) * 2 * batch_ncols;
minmaxKernel<T, E><<<nblks, TPB, smemSize, stream>>>(data,
rowids,
colids,
nrows,
ncols,
row_stride,
globalmin,
globalmax,
sampledcols,
init_val,
batch_ncols,
num_batches);
RAFT_CUDA_TRY(cudaPeekAtLastError());
decodeKernel<T, E><<<nblks, TPB, 0, stream>>>(globalmin, globalmax, ncols);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}; // end namespace detail
}; // end namespace stats
}; // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/scores.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/distance.cuh>
#include <cuvs/spatial/knn/knn.cuh>
#include <memory>
#include <raft/linalg/eltwise.cuh>
#include <raft/linalg/power.cuh>
#include <raft/linalg/subtract.cuh>
#include <raft/stats/mean.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/count.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/reduce.h>
#define N_THREADS 512
namespace cuvs {
namespace stats {
namespace detail {
/**
* Calculates the "Coefficient of Determination" (R-Squared) score
* normalizing the sum of squared errors by the total sum of squares.
*
* This score indicates the proportionate amount of variation in an
* expected response variable is explained by the independent variables
* in a linear regression model. The larger the R-squared value, the
* more variability is explained by the linear regression model.
*
* @param y: Array of ground-truth response variables
* @param y_hat: Array of predicted response variables
* @param n: Number of elements in y and y_hat
* @param stream: cuda stream
* @return: The R-squared value.
*/
template <typename math_t>
math_t r2_score(math_t* y, math_t* y_hat, int n, cudaStream_t stream)
{
rmm::device_scalar<math_t> y_bar(stream);
raft::stats::mean(y_bar.data(), y, 1, n, false, false, stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
rmm::device_uvector<math_t> sse_arr(n, stream);
raft::linalg::eltwiseSub(sse_arr.data(), y, y_hat, n, stream);
raft::linalg::powerScalar(sse_arr.data(), sse_arr.data(), math_t(2.0), n, stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
rmm::device_uvector<math_t> ssto_arr(n, stream);
raft::linalg::subtractDevScalar(ssto_arr.data(), y, y_bar.data(), n, stream);
raft::linalg::powerScalar(ssto_arr.data(), ssto_arr.data(), math_t(2.0), n, stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
thrust::device_ptr<math_t> d_sse = thrust::device_pointer_cast(sse_arr.data());
thrust::device_ptr<math_t> d_ssto = thrust::device_pointer_cast(ssto_arr.data());
math_t sse = thrust::reduce(thrust::cuda::par.on(stream), d_sse, d_sse + n);
math_t ssto = thrust::reduce(thrust::cuda::par.on(stream), d_ssto, d_ssto + n);
return 1.0 - sse / ssto;
}
/**
* @brief Compute accuracy of predictions. Useful for classification.
* @tparam math_t: data type for predictions (e.g., int for classification)
* @param[in] predictions: array of predictions (GPU pointer).
* @param[in] ref_predictions: array of reference (ground-truth) predictions (GPU pointer).
* @param[in] n: number of elements in each of predictions, ref_predictions.
* @param[in] stream: cuda stream.
* @return: Accuracy score in [0, 1]; higher is better.
*/
template <typename math_t>
float accuracy_score(const math_t* predictions,
const math_t* ref_predictions,
int n,
cudaStream_t stream)
{
unsigned long long correctly_predicted = 0ULL;
rmm::device_uvector<math_t> diffs_array(n, stream);
// TODO could write a kernel instead
raft::linalg::eltwiseSub(diffs_array.data(), predictions, ref_predictions, n, stream);
RAFT_CUDA_TRY(cudaGetLastError());
correctly_predicted =
thrust::count(thrust::cuda::par.on(stream), diffs_array.data(), diffs_array.data() + n, 0);
float accuracy = correctly_predicted * 1.0f / n;
return accuracy;
}
template <typename T>
RAFT_KERNEL reg_metrics_kernel(
const T* predictions, const T* ref_predictions, int n, double* abs_diffs, double* tmp_sums)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ double shmem[2]; // {abs_difference_sum, squared difference sum}
for (int i = threadIdx.x; i < 2; i += blockDim.x) {
shmem[i] = 0;
}
__syncthreads();
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
double diff = predictions[i] - ref_predictions[i];
double abs_diff = abs(diff);
raft::myAtomicAdd(&shmem[0], abs_diff);
raft::myAtomicAdd(&shmem[1], diff * diff);
// update absolute difference in global memory for subsequent abs. median computation
abs_diffs[i] = abs_diff;
}
__syncthreads();
// Update tmp_sum w/ total abs_difference_sum and squared difference sum.
for (int i = threadIdx.x; i < 2; i += blockDim.x) {
raft::myAtomicAdd(&tmp_sums[i], shmem[i]);
}
}
/**
* @brief Compute regression metrics mean absolute error, mean squared error, median absolute error
* @tparam T: data type for predictions (e.g., float or double for regression).
* @param[in] predictions: array of predictions (GPU pointer).
* @param[in] ref_predictions: array of reference (ground-truth) predictions (GPU pointer).
* @param[in] n: number of elements in each of predictions, ref_predictions. Should be > 0.
* @param[in] stream: cuda stream.
* @param[out] mean_abs_error: Mean Absolute Error. Sum over n of (|predictions[i] -
* ref_predictions[i]|) / n.
* @param[out] mean_squared_error: Mean Squared Error. Sum over n of ((predictions[i] -
* ref_predictions[i])^2) / n.
* @param[out] median_abs_error: Median Absolute Error. Median of |predictions[i] -
* ref_predictions[i]| for i in [0, n).
*/
template <typename T>
void regression_metrics(const T* predictions,
const T* ref_predictions,
int n,
cudaStream_t stream,
double& mean_abs_error,
double& mean_squared_error,
double& median_abs_error)
{
std::vector<double> mean_errors(2);
std::vector<double> h_sorted_abs_diffs(n);
int thread_cnt = 256;
int block_cnt = raft::ceildiv(n, thread_cnt);
int array_size = n * sizeof(double);
rmm::device_uvector<double> abs_diffs_array(array_size, stream);
rmm::device_uvector<double> sorted_abs_diffs(array_size, stream);
rmm::device_uvector<double> tmp_sums(2 * sizeof(double), stream);
RAFT_CUDA_TRY(cudaMemsetAsync(tmp_sums.data(), 0, 2 * sizeof(double), stream));
reg_metrics_kernel<T><<<block_cnt, thread_cnt, 0, stream>>>(
predictions, ref_predictions, n, abs_diffs_array.data(), tmp_sums.data());
RAFT_CUDA_TRY(cudaGetLastError());
raft::update_host(&mean_errors[0], tmp_sums.data(), 2, stream);
raft::interruptible::synchronize(stream);
mean_abs_error = mean_errors[0] / n;
mean_squared_error = mean_errors[1] / n;
// Compute median error. Sort diffs_array and pick median value
char* temp_storage = nullptr;
size_t temp_storage_bytes;
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortKeys((void*)temp_storage,
temp_storage_bytes,
abs_diffs_array.data(),
sorted_abs_diffs.data(),
n,
0,
8 * sizeof(double),
stream));
rmm::device_uvector<char> temp_storage_v(temp_storage_bytes, stream);
temp_storage = temp_storage_v.data();
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortKeys((void*)temp_storage,
temp_storage_bytes,
abs_diffs_array.data(),
sorted_abs_diffs.data(),
n,
0,
8 * sizeof(double),
stream));
raft::update_host(h_sorted_abs_diffs.data(), sorted_abs_diffs.data(), n, stream);
raft::interruptible::synchronize(stream);
int middle = n / 2;
if (n % 2 == 1) {
median_abs_error = h_sorted_abs_diffs[middle];
} else {
median_abs_error = (h_sorted_abs_diffs[middle] + h_sorted_abs_diffs[middle - 1]) / 2;
}
}
} // namespace detail
} // namespace stats
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/meanvar.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/reduce.cuh>
#include <raft/util/cuda_utils.cuh>
namespace raft::stats::detail {
template <typename T>
class mean_var {
private:
T w;
T m;
T s;
public:
/** Monoidal neutral. */
HDI mean_var() : w(0.0), m(0.0), s(0.0) {}
/** Lift a single value. */
HDI explicit mean_var(T x) : w(1.0), m(x), s(0.0) {}
/**
* Monoidal binary op: combine means and vars of two sets.
* (associative and commutative)
*/
friend HDI auto operator+(mean_var<T> a, mean_var<T> const& b) -> mean_var<T>
{
a += b;
return a;
}
/**
* Combine means and vars of two sets.
*
* Similar to:
* https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
*/
HDI auto operator+=(mean_var<T> const& b) & -> mean_var<T>&
{
mean_var<T>& a(*this);
T cw = a.w + b.w;
if (cw == 0) return a;
T aw_frac = a.w / cw;
T bw_frac = b.w / cw;
a.w = cw;
T d = a.m - b.m;
a.s += b.s + cw * (d * aw_frac) * (d * bw_frac);
a.m = a.m * aw_frac + b.m * bw_frac;
return a;
}
/** Get the computed mean. */
HDI auto mean() const -> T { return m; }
/**
* @brief Get the computed variance.
*
* @param [in] sample whether to produce sample variance (divide by `N - 1` instead of `N`).
* @return variance
*/
HDI auto var(bool sample) const -> T { return s / max(T(1.0), sample ? w - T(1.0) : w); }
HDI void load(volatile mean_var<T>* address)
{
this->m = address->m;
this->s = address->s;
this->w = address->w;
}
HDI void store(volatile mean_var<T>* address)
{
address->m = this->m;
address->s = this->s;
address->w = this->w;
}
};
/*
NB: current implementation here is not optimal, especially the rowmajor version;
leaving this for further work (perhaps, as a more generic "linewiseReduce").
Vectorized loads/stores could speed things up a lot.
*/
/**
* meanvar kernel - row-major version
*
* Assumptions:
*
* 1. blockDim.x == raft::WarpSize
* 2. Dimension X goes along columns (D)
* 3. Dimension Y goes along rows (N)
*
*
* @tparam T element type
* @tparam I indexing type
* @tparam BlockSize must be equal to blockDim.x * blockDim.y * blockDim.z
* @param data input data
* @param mvs meanvars -- output
* @param locks guards for updating meanvars
* @param len total length of input data (N * D)
* @param D number of columns in the input data.
*/
template <typename T, typename I, int BlockSize>
RAFT_KERNEL __launch_bounds__(BlockSize)
meanvar_kernel_rowmajor(const T* data, volatile mean_var<T>* mvs, int* locks, I len, I D)
{
// read the data
const I col = threadIdx.x + blockDim.x * blockIdx.x;
mean_var<T> thread_data;
if (col < D) {
const I step = D * blockDim.y * gridDim.y;
for (I i = col + D * (threadIdx.y + blockDim.y * blockIdx.y); i < len; i += step) {
thread_data += mean_var<T>(data[i]);
}
}
// aggregate within block
if (blockDim.y > 1) {
__shared__ uint8_t shm_bytes[BlockSize * sizeof(mean_var<T>)];
auto shm = (mean_var<T>*)shm_bytes;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
shm[tid] = thread_data;
for (int bs = BlockSize >> 1; bs >= blockDim.x; bs = bs >> 1) {
__syncthreads();
if (tid < bs) { shm[tid] += shm[tid + bs]; }
}
thread_data = shm[tid];
}
// aggregate across blocks
if (threadIdx.y == 0) {
int* lock = locks + blockIdx.x;
if (threadIdx.x == 0 && col < D) {
while (atomicCAS(lock, 0, 1) == 1) {
__threadfence();
}
}
__syncthreads();
if (col < D) {
__threadfence();
mean_var<T> global_data;
global_data.load(mvs + col);
global_data += thread_data;
global_data.store(mvs + col);
__threadfence();
}
__syncthreads();
if (threadIdx.x == 0 && col < D) { __stwt(lock, 0); }
}
}
template <typename T, typename I, int BlockSize>
RAFT_KERNEL __launch_bounds__(BlockSize)
meanvar_kernel_colmajor(T* mean, T* var, const T* data, I D, I N, bool sample)
{
using BlockReduce = cub::BlockReduce<mean_var<T>, BlockSize>;
__shared__ typename BlockReduce::TempStorage shm;
const T* block_data = data + N * blockIdx.x;
mean_var<T> thread_data;
for (I i = threadIdx.x; i < N; i += BlockSize) {
thread_data += mean_var<T>(block_data[i]);
}
mean_var<T> acc = BlockReduce(shm).Sum(thread_data);
if (threadIdx.x == 0) {
mean[blockIdx.x] = acc.mean();
var[blockIdx.x] = acc.var(sample);
}
}
template <typename T, typename I>
RAFT_KERNEL meanvar_kernel_fill(T* mean, T* var, const mean_var<T>* aggr, I D, bool sample)
{
I i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= D) return;
auto x = aggr[i];
mean[i] = x.mean();
var[i] = x.var(sample);
}
template <typename T, typename I = int, int BlockSize = 256>
void meanvar(
T* mean, T* var, const T* data, I D, I N, bool sample, bool rowMajor, cudaStream_t stream)
{
if (rowMajor) {
static_assert(BlockSize >= raft::WarpSize,
"Block size must be not smaller than the warp size.");
const dim3 bs(WarpSize, BlockSize / raft::WarpSize, 1);
dim3 gs(raft::ceildiv<decltype(bs.x)>(D, bs.x), raft::ceildiv<decltype(bs.y)>(N, bs.y), 1);
// Don't create more blocks than necessary to occupy the GPU
int occupancy;
RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&occupancy, meanvar_kernel_rowmajor<T, I, BlockSize>, BlockSize, 0));
gs.y =
std::min(gs.y, raft::ceildiv<decltype(gs.y)>(occupancy * getMultiProcessorCount(), gs.x));
// Global memory: one mean_var<T> for each column
// one lock per all blocks working on the same set of columns
rmm::device_buffer buf(sizeof(mean_var<T>) * D + sizeof(int) * gs.x, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(buf.data(), 0, buf.size(), stream));
mean_var<T>* mvs = static_cast<mean_var<T>*>(buf.data());
int* locks = static_cast<int*>(static_cast<void*>(mvs + D));
const uint64_t len = uint64_t(D) * uint64_t(N);
ASSERT(len <= uint64_t(std::numeric_limits<I>::max()), "N * D does not fit the indexing type");
meanvar_kernel_rowmajor<T, I, BlockSize><<<gs, bs, 0, stream>>>(data, mvs, locks, len, D);
meanvar_kernel_fill<T, I>
<<<raft::ceildiv<I>(D, BlockSize), BlockSize, 0, stream>>>(mean, var, mvs, D, sample);
} else {
meanvar_kernel_colmajor<T, I, BlockSize>
<<<D, BlockSize, 0, stream>>>(mean, var, data, D, N, sample);
}
RAFT_CHECK_CUDA(stream);
}
}; // namespace raft::stats::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/batched/information_criterion.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/linalg/unary_op.cuh>
#include <raft/stats/stats_types.hpp>
#include <cmath>
namespace cuvs {
namespace stats {
namespace batched {
namespace detail {
/**
* Compute the given type of information criterion
*
* @note: it is safe to do the computation in-place (i.e give same pointer
* as input and output)
*
* @param[out] d_ic Information criterion to be returned for each
* series (device)
* @param[in] d_loglikelihood Log-likelihood for each series (device)
* @param[in] ic_type Type of criterion to compute. See IC_Type
* @param[in] n_params Number of parameters in the model
* @param[in] batch_size Number of series in the batch
* @param[in] n_samples Number of samples in each series
* @param[in] stream CUDA stream
*/
template <typename ScalarT, typename IdxT>
void information_criterion(ScalarT* d_ic,
const ScalarT* d_loglikelihood,
IC_Type ic_type,
IdxT n_params,
IdxT batch_size,
IdxT n_samples,
cudaStream_t stream)
{
ScalarT ic_base{};
ScalarT N = static_cast<ScalarT>(n_params);
ScalarT T = static_cast<ScalarT>(n_samples);
switch (ic_type) {
case AIC: ic_base = (ScalarT)2.0 * N; break;
case AICc:
ic_base = (ScalarT)2.0 * (N + (N * (N + (ScalarT)1.0)) / (T - N - (ScalarT)1.0));
break;
case BIC: ic_base = std::log(T) * N; break;
}
/* Compute information criterion from log-likelihood and base term */
raft::linalg::unaryOp(
d_ic,
d_loglikelihood,
batch_size,
[=] __device__(ScalarT loglike) { return ic_base - (ScalarT)2.0 * loglike; },
stream);
}
} // namespace detail
} // namespace batched
} // namespace stats
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail | rapidsai_public_repos/cuvs/cpp/include/cuvs/stats/detail/batched/silhouette_score.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../silhouette_score.cuh"
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/cuda_stream_pool.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/device_atomics.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/reduce.h>
namespace cuvs {
namespace stats {
namespace batched {
namespace detail {
/**
* This kernel initializes matrix b (n_rows * n_labels)
* For each label that the corresponding row is not a part of is initialized as 0
* If the corresponding row is the only sample in its label, again 0
* Only if the there are > 1 samples in the label, row is initialized to max
*/
template <typename value_t, typename value_idx, typename label_idx>
RAFT_KERNEL fill_b_kernel(value_t* b,
const label_idx* y,
value_idx n_rows,
label_idx n_labels,
const value_idx* cluster_counts)
{
value_idx idx = threadIdx.x + blockIdx.x * blockDim.x;
label_idx idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx >= n_rows || idy >= n_labels) { return; }
auto row_cluster = y[idx];
auto col_cluster_count = cluster_counts[idy];
// b for own cluster should be max value
// so that it does not interfere with min operator
// b is also max if col cluster count is 0
// however, b is 0 if self cluster count is 1
if (row_cluster == idy || col_cluster_count == 0) {
if (cluster_counts[row_cluster] == 1) {
b[idx * n_labels + idy] = 0;
} else {
b[idx * n_labels + idy] = std::numeric_limits<value_t>::max();
}
} else {
b[idx * n_labels + idy] = 0;
}
}
/**
* This kernel does an elementwise sweep of chunked pairwise distance matrix
* By knowing the offsets of the chunked pairwise distance matrix in the
* global pairwise distance matrix, we are able to calculate
* intermediate values of a and b for the rows and columns present in the
* current chunked pairwise distance matrix.
*/
template <typename value_t, typename value_idx, typename label_idx>
RAFT_KERNEL compute_chunked_a_b_kernel(value_t* a,
value_t* b,
value_idx row_offset,
value_idx col_offset,
const label_idx* y,
label_idx n_labels,
const value_idx* cluster_counts,
const value_t* distances,
value_idx dist_rows,
value_idx dist_cols)
{
value_idx row_id = threadIdx.x + blockIdx.x * blockDim.x;
value_idx col_id = threadIdx.y + blockIdx.y * blockDim.y;
// these are global offsets of current element
// in the full pairwise distance matrix
value_idx pw_row_id = row_id + row_offset;
value_idx pw_col_id = col_id + col_offset;
if (row_id >= dist_rows || col_id >= dist_cols || pw_row_id == pw_col_id) { return; }
auto row_cluster = y[pw_row_id];
if (cluster_counts[row_cluster] == 1) { return; }
auto col_cluster = y[pw_col_id];
auto col_cluster_counts = cluster_counts[col_cluster];
if (col_cluster == row_cluster) {
atomicAdd(&a[pw_row_id], distances[row_id * dist_cols + col_id] / (col_cluster_counts - 1));
} else {
atomicAdd(&b[pw_row_id * n_labels + col_cluster],
distances[row_id * dist_cols + col_id] / col_cluster_counts);
}
}
template <typename value_idx, typename label_idx>
rmm::device_uvector<value_idx> get_cluster_counts(raft::resources const& handle,
const label_idx* y,
value_idx& n_rows,
label_idx& n_labels)
{
auto stream = resource::get_cuda_stream(handle);
rmm::device_uvector<value_idx> cluster_counts(n_labels, stream);
rmm::device_uvector<char> workspace(1, stream);
raft::stats::detail::countLabels(y, cluster_counts.data(), n_rows, n_labels, workspace, stream);
return cluster_counts;
}
template <typename value_t, typename value_idx>
rmm::device_uvector<value_t> get_pairwise_distance(raft::resources const& handle,
const value_t* left_begin,
const value_t* right_begin,
value_idx& n_left_rows,
value_idx& n_right_rows,
value_idx& n_cols,
cuvs::distance::DistanceType metric,
cudaStream_t stream)
{
rmm::device_uvector<value_t> distances(n_left_rows * n_right_rows, stream);
cuvs::distance::pairwise_distance(
handle, left_begin, right_begin, distances.data(), n_left_rows, n_right_rows, n_cols, metric);
return distances;
}
template <typename value_t, typename value_idx, typename label_idx>
void compute_chunked_a_b(raft::resources const& handle,
value_t* a,
value_t* b,
value_idx& row_offset,
value_idx& col_offset,
const label_idx* y,
label_idx& n_labels,
const value_idx* cluster_counts,
const value_t* distances,
value_idx& dist_rows,
value_idx& dist_cols,
cudaStream_t stream)
{
dim3 block_size(std::min(dist_rows, 32), std::min(dist_cols, 32));
dim3 grid_size(raft::ceildiv(dist_rows, (value_idx)block_size.x),
raft::ceildiv(dist_cols, (value_idx)block_size.y));
detail::compute_chunked_a_b_kernel<<<grid_size, block_size, 0, stream>>>(
a, b, row_offset, col_offset, y, n_labels, cluster_counts, distances, dist_rows, dist_cols);
}
template <typename value_t, typename value_idx, typename label_idx>
value_t silhouette_score(
raft::resources const& handle,
const value_t* X,
value_idx n_rows,
value_idx n_cols,
const label_idx* y,
label_idx n_labels,
value_t* scores,
value_idx chunk,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Unexpanded)
{
ASSERT(n_labels >= 2 && n_labels <= (n_rows - 1),
"silhouette Score not defined for the given number of labels!");
rmm::device_uvector<value_idx> cluster_counts = get_cluster_counts(handle, y, n_rows, n_labels);
auto stream = resource::get_cuda_stream(handle);
auto policy = resource::get_thrust_policy(handle);
auto b_size = n_rows * n_labels;
value_t *a_ptr, *b_ptr;
rmm::device_uvector<value_t> a(0, stream);
rmm::device_uvector<value_t> b(b_size, stream);
b_ptr = b.data();
// since a and silhouette score per sample are same size, reusing
if (scores == nullptr || scores == NULL) {
a.resize(n_rows, stream);
a_ptr = a.data();
} else {
a_ptr = scores;
}
thrust::fill(policy, a_ptr, a_ptr + n_rows, 0);
dim3 block_size(std::min(n_rows, 32), std::min(n_labels, 32));
dim3 grid_size(raft::ceildiv(n_rows, (value_idx)block_size.x),
raft::ceildiv(n_labels, (label_idx)block_size.y));
detail::fill_b_kernel<<<grid_size, block_size, 0, stream>>>(
b_ptr, y, n_rows, n_labels, cluster_counts.data());
resource::wait_stream_pool_on_stream(handle);
auto n_iters = 0;
for (value_idx i = 0; i < n_rows; i += chunk) {
for (value_idx j = 0; j < n_rows; j += chunk) {
++n_iters;
auto chunk_stream = resource::get_next_usable_stream(handle, i + chunk * j);
const auto* left_begin = X + (i * n_cols);
const auto* right_begin = X + (j * n_cols);
auto n_left_rows = (i + chunk) < n_rows ? chunk : (n_rows - i);
auto n_right_rows = (j + chunk) < n_rows ? chunk : (n_rows - j);
rmm::device_uvector<value_t> distances = get_pairwise_distance(
handle, left_begin, right_begin, n_left_rows, n_right_rows, n_cols, metric, chunk_stream);
compute_chunked_a_b(handle,
a_ptr,
b_ptr,
i,
j,
y,
n_labels,
cluster_counts.data(),
distances.data(),
n_left_rows,
n_right_rows,
chunk_stream);
}
}
resource::sync_stream_pool(handle);
// calculating row-wise minimum in b
// this prim only supports int indices for now
raft::linalg::reduce<value_t, value_t, value_idx, raft::identity_op, raft::min_op>(
b_ptr,
b_ptr,
n_labels,
n_rows,
std::numeric_limits<value_t>::max(),
true,
true,
stream,
false,
raft::identity_op(),
raft::min_op());
// calculating the silhouette score per sample
raft::linalg::binaryOp<value_t, raft::stats::detail::SilOp<value_t>, value_t, value_idx>(
a_ptr, a_ptr, b_ptr, n_rows, raft::stats::detail::SilOp<value_t>(), stream);
return thrust::reduce(policy, a_ptr, a_ptr + n_rows, value_t(0)) / n_rows;
}
} // namespace detail
} // namespace batched
} // namespace stats
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/single_linkage_types.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
namespace cuvs::cluster::hierarchy {
/**
* Determines the method for computing the minimum spanning tree (MST)
*/
enum LinkageDistance {
/**
* Use a pairwise distance matrix as input to the mst. This
* is very fast and the best option for fairly small datasets (~50k data points)
*/
PAIRWISE = 0,
/**
* Construct a KNN graph as input to the mst and provide additional
* edges if the mst does not converge. This is slower but scales
* to very large datasets.
*/
KNN_GRAPH = 1
};
}; // namespace cuvs::cluster::hierarchy
// The code below is now considered legacy
namespace cuvs::cluster {
using hierarchy::LinkageDistance;
/**
* Simple container object for consolidating linkage results. This closely
* mirrors the trained instance variables populated in
* Scikit-learn's AgglomerativeClustering estimator.
* @tparam value_idx
* @tparam value_t
*/
template <typename idx_t>
class linkage_output {
public:
idx_t m;
idx_t n_clusters;
idx_t n_leaves;
idx_t n_connected_components;
// TODO: These will be made private in a future release
idx_t* labels; // size: m
idx_t* children; // size: (m-1, 2)
raft::device_vector_view<idx_t> get_labels()
{
return raft::make_device_vector_view<idx_t>(labels, m);
}
raft::device_matrix_view<idx_t> get_children()
{
return raft::make_device_matrix_view<idx_t>(children, m - 1, 2);
}
};
class linkage_output_int : public linkage_output<int> {};
class linkage_output_int64 : public linkage_output<int64_t> {};
}; // namespace cuvs::cluster
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/kmeans_balanced_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/cluster/kmeans_types.hpp>
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/logger.hpp>
#include <raft/random/rng_state.hpp>
namespace cuvs::cluster::kmeans_balanced {
/**
* Simple object to specify hyper-parameters to the balanced k-means algorithm.
*
* The following metrics are currently supported in k-means balanced:
* - InnerProduct
* - L2Expanded
* - L2SqrtExpanded
*/
struct kmeans_balanced_params : kmeans_base_params {
/**
* Number of training iterations
*/
uint32_t n_iters = 20;
};
} // namespace cuvs::cluster::kmeans_balanced
namespace cuvs::cluster {
using kmeans_balanced::kmeans_balanced_params;
} // namespace cuvs::cluster
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/kmeans_deprecated.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/cluster/detail/kmeans_deprecated.cuh>
namespace cuvs::cluster {
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param tol Tolerance for convergence. k-means stops when the
* change in residual divided by n is less than tol.
* @param maxiter Maximum number of k-means iterations.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param residual On exit, residual sum of squares (sum of squares
* of distances between observation vectors and centroids).
* @param iters on exit, number of k-means iterations.
* @param seed random seed to be used.
* @return error flag
*/
template <typename index_type_t, typename value_type_t>
int kmeans(raft::resources const& handle,
index_type_t n,
index_type_t d,
index_type_t k,
value_type_t tol,
index_type_t maxiter,
const value_type_t* __restrict__ obs,
index_type_t* __restrict__ codes,
value_type_t& residual,
index_type_t& iters,
unsigned long long seed = 123456)
{
return detail::kmeans<index_type_t, value_type_t>(
handle, n, d, k, tol, maxiter, obs, codes, residual, iters, seed);
}
} // namespace cuvs::cluster
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/kmeans_types.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/logger.hpp>
#include <raft/random/rng_state.hpp>
namespace cuvs::cluster {
/** Base structure for parameters that are common to all k-means algorithms */
struct kmeans_base_params {
/**
* Metric to use for distance computation. The supported metrics can vary per algorithm.
*/
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2Expanded;
};
} // namespace cuvs::cluster
namespace cuvs::cluster::kmeans {
/**
* Simple object to specify hyper-parameters to the kmeans algorithm.
*/
struct KMeansParams : kmeans_base_params {
enum InitMethod {
/**
* Sample the centroids using the kmeans++ strategy
*/
KMeansPlusPlus,
/**
* Sample the centroids uniformly at random
*/
Random,
/**
* User provides the array of initial centroids
*/
Array
};
/**
* The number of clusters to form as well as the number of centroids to generate (default:8).
*/
int n_clusters = 8;
/**
* Method for initialization, defaults to k-means++:
* - InitMethod::KMeansPlusPlus (k-means++): Use scalable k-means++ algorithm
* to select the initial cluster centers.
* - InitMethod::Random (random): Choose 'n_clusters' observations (rows) at
* random from the input data for the initial centroids.
* - InitMethod::Array (ndarray): Use 'centroids' as initial cluster centers.
*/
InitMethod init = KMeansPlusPlus;
/**
* Maximum number of iterations of the k-means algorithm for a single run.
*/
int max_iter = 300;
/**
* Relative tolerance with regards to inertia to declare convergence.
*/
double tol = 1e-4;
/**
* verbosity level.
*/
int verbosity = RAFT_LEVEL_INFO;
/**
* Seed to the random number generator.
*/
raft::random::RngState rng_state{0};
/**
* Number of instance k-means algorithm will be run with different seeds.
*/
int n_init = 1;
/**
* Oversampling factor for use in the k-means|| algorithm
*/
double oversampling_factor = 2.0;
// batch_samples and batch_centroids are used to tile 1NN computation which is
// useful to optimize/control the memory footprint
// Default tile is [batch_samples x n_clusters] i.e. when batch_centroids is 0
// then don't tile the centroids
int batch_samples = 1 << 15;
/**
* if 0 then batch_centroids = n_clusters
*/
int batch_centroids = 0; //
bool inertia_check = false;
};
} // namespace cuvs::cluster::kmeans
namespace cuvs::cluster {
using kmeans::KMeansParams;
} // namespace cuvs::cluster
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/kmeans.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/cluster/detail/kmeans.cuh>
#include <cuvs/cluster/detail/kmeans_auto_find_k.cuh>
#include <cuvs/cluster/kmeans_types.hpp>
#include <optional>
#include <raft/core/kvp.hpp>
#include <raft/core/mdarray.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
namespace cuvs::cluster::kmeans {
/**
* Functor used for sampling centroids
*/
template <typename DataT, typename IndexT>
using SamplingOp = detail::SamplingOp<DataT, IndexT>;
/**
* Functor used to extract the index from a KeyValue pair
* storing both index and a distance.
*/
template <typename IndexT, typename DataT>
using KeyValueIndexOp = detail::KeyValueIndexOp<IndexT, DataT>;
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <cuvs/cluster/kmeans.cuh>
* #include <cuvs/cluster/kmeans_types.hpp>
* using namespace cuvs::cluster;
* ...
* raft::raft::resources handle;
* cuvs::cluster::KMeansParams params;
* int n_features = 15, inertia, n_iter;
* auto centroids = raft::make_device_matrix<float, int>(handle, params.n_clusters, n_features);
*
* kmeans::fit(handle,
* params,
* X,
* std::nullopt,
* centroids,
* raft::make_scalar_view(&inertia),
* raft::make_scalar_view(&n_iter));
* @endcode
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers.
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT>
void fit(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
detail::kmeans_fit<DataT, IndexT>(handle, params, X, sample_weight, centroids, inertia, n_iter);
}
/**
* @brief Predict the closest cluster each sample in X belongs to.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <cuvs/cluster/kmeans.cuh>
* #include <cuvs/cluster/kmeans_types.hpp>
* using namespace cuvs::cluster;
* ...
* raft::raft::resources handle;
* cuvs::cluster::KMeansParams params;
* int n_features = 15, inertia, n_iter;
* auto centroids = raft::make_device_matrix<float, int>(handle, params.n_clusters, n_features);
*
* kmeans::fit(handle,
* params,
* X,
* std::nullopt,
* centroids.view(),
* raft::make_scalar_view(&inertia),
* raft::make_scalar_view(&n_iter));
* ...
* auto labels = raft::make_device_vector<int, int>(handle, X.extent(0));
*
* kmeans::predict(handle,
* params,
* X,
* std::nullopt,
* centroids.view(),
* false,
* labels.view(),
* raft::make_scalar_view(&ineratia));
* @endcode
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X New data to predict.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[in] centroids Cluster centroids. The data must be in
* row-major format.
* [dim = n_clusters x n_features]
* @param[in] normalize_weight True if the weights should be normalized
* @param[out] labels Index of the cluster each sample in X
* belongs to.
* [len = n_samples]
* @param[out] inertia Sum of squared distances of samples to
* their closest cluster center.
*/
template <typename DataT, typename IndexT>
void predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
bool normalize_weight,
raft::host_scalar_view<DataT> inertia)
{
detail::kmeans_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, labels, normalize_weight, inertia);
}
/**
* @brief Compute k-means clustering and predicts cluster index for each sample
* in the input.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <cuvs/cluster/kmeans.cuh>
* #include <cuvs/cluster/kmeans_types.hpp>
* using namespace cuvs::cluster;
* ...
* raft::raft::resources handle;
* cuvs::cluster::KMeansParams params;
* int n_features = 15, inertia, n_iter;
* auto centroids = raft::make_device_matrix<float, int>(handle, params.n_clusters, n_features);
* auto labels = raft::make_device_vector<int, int>(handle, X.extent(0));
*
* kmeans::fit_predict(handle,
* params,
* X,
* std::nullopt,
* centroids.view(),
* labels.view(),
* raft::make_scalar_view(&inertia),
* raft::make_scalar_view(&n_iter));
* @endcode
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must be
* in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids Optional
* [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] labels Index of the cluster each sample in X belongs
* to.
* [len = n_samples]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT>
void fit_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
std::optional<raft::device_matrix_view<DataT, IndexT>> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
detail::kmeans_fit_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, labels, inertia, n_iter);
}
/**
* @brief Transform X to a cluster-distance space.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Cluster centroids. The data must be in row-major format.
* [dim = n_clusters x n_features]
* @param[out] X_new X transformed in the new space.
* [dim = n_samples x n_features]
*/
template <typename DataT, typename IndexT>
void transform(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_matrix_view<DataT, IndexT> X_new)
{
detail::kmeans_transform<DataT, IndexT>(handle, params, X, centroids, X_new);
}
template <typename DataT, typename IndexT>
void transform(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT* X_new)
{
detail::kmeans_transform<DataT, IndexT>(
handle, params, X, centroids, n_samples, n_features, X_new);
}
/**
* Automatically find the optimal value of k using a binary search.
* This method maximizes the Calinski-Harabasz Index while minimizing the per-cluster inertia.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <cuvs/cluster/kmeans.cuh>
* #include <cuvs/cluster/kmeans_types.hpp>
*
* #include <raft/random/make_blobs.cuh>
*
* using namespace cuvs::cluster;
*
* raft::handle_t handle;
* int n_samples = 100, n_features = 15, n_clusters = 10;
* auto X = raft::make_device_matrix<float, int>(handle, n_samples, n_features);
* auto labels = raft::make_device_vector<float, int>(handle, n_samples);
*
* raft::random::make_blobs(handle, X, labels, n_clusters);
*
* auto best_k = raft::make_host_scalar<int>(0);
* auto n_iter = raft::make_host_scalar<int>(0);
* auto inertia = raft::make_host_scalar<int>(0);
*
* kmeans::find_k(handle, X, best_k.view(), inertia.view(), n_iter.view(), n_clusters+1);
*
* @endcode
*
* @tparam idx_t indexing type (should be integral)
* @tparam value_t value type (should be floating point)
* @param handle raft handle
* @param X input observations (shape n_samples, n_dims)
* @param best_k best k found from binary search
* @param inertia inertia of best k found
* @param n_iter number of iterations used to find best k
* @param kmax maximum k to try in search
* @param kmin minimum k to try in search (should be >= 1)
* @param maxiter maximum number of iterations to run
* @param tol tolerance for early stopping convergence
*/
template <typename idx_t, typename value_t>
void find_k(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t> X,
raft::host_scalar_view<idx_t> best_k,
raft::host_scalar_view<value_t> inertia,
raft::host_scalar_view<idx_t> n_iter,
idx_t kmax,
idx_t kmin = 1,
idx_t maxiter = 100,
value_t tol = 1e-3)
{
detail::find_k(handle, X, best_k, inertia, n_iter, kmax, kmin, maxiter, tol);
}
/**
* @brief Select centroids according to a sampling operation
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] isSampleCentroid Flag the sample chosen as initial centroid
* [dim = n_samples]
* @param[in] select_op The sampling operation used to select the centroids
* @param[out] inRankCp The sampled centroids
* [dim = n_selected_centroids x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void sample_centroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<std::uint8_t, IndexT> isSampleCentroid,
SamplingOp<DataT, IndexT>& select_op,
rmm::device_uvector<DataT>& inRankCp,
rmm::device_uvector<char>& workspace)
{
detail::sampleCentroids<DataT, IndexT>(
handle, X, minClusterDistance, isSampleCentroid, select_op, inRankCp, workspace);
}
/**
* @brief Compute cluster cost
*
* @tparam DataT the type of data used for weights, distances.
* @tparam ReductionOpT the type of data used for the reduction operation.
*
* @param[in] handle The raft handle
* @param[in] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] workspace Temporary workspace buffer which can get resized
* @param[out] clusterCost Resulting cluster cost
* @param[in] reduction_op The reduction operation used for the cost
*
*/
template <typename DataT, typename IndexT, typename ReductionOpT>
void cluster_cost(raft::resources const& handle,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
rmm::device_uvector<char>& workspace,
raft::device_scalar_view<DataT> clusterCost,
ReductionOpT reduction_op)
{
detail::computeClusterCost(
handle, minClusterDistance, workspace, clusterCost, raft::identity_op{}, reduction_op);
}
/**
* @brief Update centroids given current centroids and number of points assigned to each centroid.
* This function also produces a vector of RAFT key/value pairs containing the cluster assignment
* for each point and its distance.
*
* @tparam DataT
* @tparam IndexT
* @param[in] handle: Raft handle to use for managing library resources
* @param[in] X: input matrix (size n_samples, n_features)
* @param[in] sample_weights: number of samples currently assigned to each centroid (size n_samples)
* @param[in] centroids: matrix of current centroids (size n_clusters, n_features)
* @param[in] labels: Iterator of labels (can also be a raw pointer)
* @param[out] weight_per_cluster: sum of sample weights per cluster (size n_clusters)
* @param[out] new_centroids: output matrix of updated centroids (size n_clusters, n_features)
*/
template <typename DataT, typename IndexT, typename LabelsIterator>
void update_centroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT, row_major> X,
raft::device_vector_view<const DataT, IndexT> sample_weights,
raft::device_matrix_view<const DataT, IndexT, row_major> centroids,
LabelsIterator labels,
raft::device_vector_view<DataT, IndexT> weight_per_cluster,
raft::device_matrix_view<DataT, IndexT, row_major> new_centroids)
{
// TODO: Passing these into the algorithm doesn't really present much of a benefit
// because they are being resized anyways.
// ref https://github.com/rapidsai/raft/issues/930
rmm::device_uvector<char> workspace(0, resource::get_cuda_stream(handle));
detail::update_centroids<DataT, IndexT>(
handle, X, sample_weights, centroids, labels, weight_per_cluster, new_centroids, workspace);
}
/**
* @brief Compute distance for every sample to it's nearest centroid
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[out] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[out] L2NormBuf_OR_DistBuf Resizable buffer to store L2 norm of centroids or distance
* matrix
* @param[in] metric Distance metric to use
* @param[in] batch_samples batch size for input data samples
* @param[in] batch_centroids batch size for input centroids
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void min_cluster_distance(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
cuvs::distance::DistanceType metric,
int batch_samples,
int batch_centroids,
rmm::device_uvector<char>& workspace)
{
detail::minClusterDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterDistance,
L2NormX,
L2NormBuf_OR_DistBuf,
metric,
batch_samples,
batch_centroids,
workspace);
}
/**
* @brief Calculates a <key, value> pair for every sample in input 'X' where key is an
* index of one of the 'centroids' (index of the nearest centroid) and 'value'
* is the distance between the sample and the 'centroid[key]'
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[out] minClusterAndDistance Distance vector that contains for every sample, the nearest
* centroid and it's distance
* [dim = n_samples]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[out] L2NormBuf_OR_DistBuf Resizable buffer to store L2 norm of centroids or distance
* matrix
* @param[in] metric distance metric
* @param[in] batch_samples batch size of data samples
* @param[in] batch_centroids batch size of centroids
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void min_cluster_and_distance(
raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<raft::KeyValuePair<IndexT, DataT>, IndexT> minClusterAndDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
cuvs::distance::DistanceType metric,
int batch_samples,
int batch_centroids,
rmm::device_uvector<char>& workspace)
{
detail::minClusterAndDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance,
L2NormX,
L2NormBuf_OR_DistBuf,
metric,
batch_samples,
batch_centroids,
workspace);
}
/**
* @brief Shuffle and randomly select 'n_samples_to_gather' from input 'in' and stores
* in 'out' does not modify the input
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] in The data to shuffle and gather
* [dim = n_samples x n_features]
* @param[out] out The sampled data
* [dim = n_samples_to_gather x n_features]
* @param[in] n_samples_to_gather Number of sample to gather
* @param[in] seed Seed for the shuffle
*
*/
template <typename DataT, typename IndexT>
void shuffle_and_gather(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> in,
raft::device_matrix_view<DataT, IndexT> out,
uint32_t n_samples_to_gather,
uint64_t seed)
{
detail::shuffleAndGather<DataT, IndexT>(handle, in, out, n_samples_to_gather, seed);
}
/**
* @brief Count the number of samples in each cluster
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
* @param[out] sampleCountInCluster The count for each centroid
* [dim = n_cluster]
*
*/
template <typename DataT, typename IndexT>
void count_samples_in_cluster(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> L2NormX,
raft::device_matrix_view<DataT, IndexT> centroids,
rmm::device_uvector<char>& workspace,
raft::device_vector_view<DataT, IndexT> sampleCountInCluster)
{
detail::countSamplesInCluster<DataT, IndexT>(
handle, params, X, L2NormX, centroids, workspace, sampleCountInCluster);
}
/**
* @brief Selects 'n_clusters' samples from the input X using kmeans++ algorithm.
*
* @see "k-means++: the advantages of careful seeding". 2007, Arthur, D. and Vassilvitskii, S.
* ACM-SIAM symposium on Discrete algorithms.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[out] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
*/
template <typename DataT, typename IndexT>
void init_plus_plus(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids,
rmm::device_uvector<char>& workspace)
{
detail::kmeansPlusPlus<DataT, IndexT>(handle, params, X, centroids, workspace);
}
/*
* @brief Main function used to fit KMeans (after cluster initialization)
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids [in] Initial cluster centers.
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
* @param[in] workspace Temporary workspace buffer which can get resized
*/
template <typename DataT, typename IndexT>
void fit_main(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const DataT, IndexT> sample_weights,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter,
rmm::device_uvector<char>& workspace)
{
detail::kmeans_fit_main<DataT, IndexT>(
handle, params, X, sample_weights, centroids, inertia, n_iter, workspace);
}
}; // namespace cuvs::cluster::kmeans
namespace cuvs::cluster {
/**
* Note: All of the functions below in cuvs::cluster are deprecated and will
* be removed in a future release. Please use cuvs::cluster::kmeans instead.
*/
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers.
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT = int>
void kmeans_fit(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
kmeans::fit<DataT, IndexT>(handle, params, X, sample_weight, centroids, inertia, n_iter);
}
template <typename DataT, typename IndexT = int>
void kmeans_fit(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT& inertia,
IndexT& n_iter)
{
kmeans::fit<DataT, IndexT>(
handle, params, X, sample_weight, centroids, n_samples, n_features, inertia, n_iter);
}
/**
* @brief Predict the closest cluster each sample in X belongs to.
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X New data to predict.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[in] centroids Cluster centroids. The data must be in
* row-major format.
* [dim = n_clusters x n_features]
* @param[in] normalize_weight True if the weights should be normalized
* @param[out] labels Index of the cluster each sample in X
* belongs to.
* [len = n_samples]
* @param[out] inertia Sum of squared distances of samples to
* their closest cluster center.
*/
template <typename DataT, typename IndexT = int>
void kmeans_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
bool normalize_weight,
raft::host_scalar_view<DataT> inertia)
{
kmeans::predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, labels, normalize_weight, inertia);
}
template <typename DataT, typename IndexT = int>
void kmeans_predict(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
IndexT* labels,
bool normalize_weight,
DataT& inertia)
{
kmeans::predict<DataT, IndexT>(handle,
params,
X,
sample_weight,
centroids,
n_samples,
n_features,
labels,
normalize_weight,
inertia);
}
/**
* @brief Compute k-means clustering and predicts cluster index for each sample
* in the input.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must be
* in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids Optional
* [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] labels Index of the cluster each sample in X belongs
* to.
* [len = n_samples]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT = int>
void kmeans_fit_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
std::optional<raft::device_matrix_view<DataT, IndexT>> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
kmeans::fit_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, labels, inertia, n_iter);
}
template <typename DataT, typename IndexT = int>
void kmeans_fit_predict(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
DataT* centroids,
IndexT n_samples,
IndexT n_features,
IndexT* labels,
DataT& inertia,
IndexT& n_iter)
{
kmeans::fit_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, n_samples, n_features, labels, inertia, n_iter);
}
/**
* @brief Transform X to a cluster-distance space.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Cluster centroids. The data must be in row-major format.
* [dim = n_clusters x n_features]
* @param[out] X_new X transformed in the new space.
* [dim = n_samples x n_features]
*/
template <typename DataT, typename IndexT = int>
void kmeans_transform(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_matrix_view<DataT, IndexT> X_new)
{
kmeans::transform<DataT, IndexT>(handle, params, X, centroids, X_new);
}
template <typename DataT, typename IndexT = int>
void kmeans_transform(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT* X_new)
{
kmeans::transform<DataT, IndexT>(handle, params, X, centroids, n_samples, n_features, X_new);
}
template <typename DataT, typename IndexT>
using SamplingOp = kmeans::SamplingOp<DataT, IndexT>;
template <typename IndexT, typename DataT>
using KeyValueIndexOp = kmeans::KeyValueIndexOp<IndexT, DataT>;
/**
* @brief Select centroids according to a sampling operation
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] isSampleCentroid Flag the sample chosen as initial centroid
* [dim = n_samples]
* @param[in] select_op The sampling operation used to select the centroids
* @param[out] inRankCp The sampled centroids
* [dim = n_selected_centroids x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void sampleCentroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<std::uint8_t, IndexT> isSampleCentroid,
SamplingOp<DataT, IndexT>& select_op,
rmm::device_uvector<DataT>& inRankCp,
rmm::device_uvector<char>& workspace)
{
kmeans::sample_centroids<DataT, IndexT>(
handle, X, minClusterDistance, isSampleCentroid, select_op, inRankCp, workspace);
}
/**
* @brief Compute cluster cost
*
* @tparam DataT the type of data used for weights, distances.
* @tparam ReductionOpT the type of data used for the reduction operation.
*
* @param[in] handle The raft handle
* @param[in] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] workspace Temporary workspace buffer which can get resized
* @param[out] clusterCost Resulting cluster cost
* @param[in] reduction_op The reduction operation used for the cost
*
*/
template <typename DataT, typename IndexT, typename ReductionOpT>
void computeClusterCost(raft::resources const& handle,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
rmm::device_uvector<char>& workspace,
raft::device_scalar_view<DataT> clusterCost,
ReductionOpT reduction_op)
{
kmeans::cluster_cost(handle, minClusterDistance, workspace, clusterCost, reduction_op);
}
/**
* @brief Compute distance for every sample to it's nearest centroid
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[out] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[out] L2NormBuf_OR_DistBuf Resizable buffer to store L2 norm of centroids or distance
* matrix
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void minClusterDistanceCompute(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
rmm::device_uvector<char>& workspace)
{
kmeans::min_cluster_distance<DataT, IndexT>(handle,
X,
centroids,
minClusterDistance,
L2NormX,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
}
/**
* @brief Calculates a <key, value> pair for every sample in input 'X' where key is an
* index of one of the 'centroids' (index of the nearest centroid) and 'value'
* is the distance between the sample and the 'centroid[key]'
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[out] minClusterAndDistance Distance vector that contains for every sample, the nearest
* centroid and it's distance
* [dim = n_samples]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[out] L2NormBuf_OR_DistBuf Resizable buffer to store L2 norm of centroids or distance
* matrix
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void minClusterAndDistanceCompute(
raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<raft::KeyValuePair<IndexT, DataT>, IndexT> minClusterAndDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
rmm::device_uvector<char>& workspace)
{
kmeans::min_cluster_and_distance<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance,
L2NormX,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
}
/**
* @brief Shuffle and randomly select 'n_samples_to_gather' from input 'in' and stores
* in 'out' does not modify the input
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] in The data to shuffle and gather
* [dim = n_samples x n_features]
* @param[out] out The sampled data
* [dim = n_samples_to_gather x n_features]
* @param[in] n_samples_to_gather Number of sample to gather
* @param[in] seed Seed for the shuffle
*
*/
template <typename DataT, typename IndexT>
void shuffleAndGather(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> in,
raft::device_matrix_view<DataT, IndexT> out,
uint32_t n_samples_to_gather,
uint64_t seed)
{
kmeans::shuffle_and_gather<DataT, IndexT>(handle, in, out, n_samples_to_gather, seed);
}
/**
* @brief Count the number of samples in each cluster
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
* @param[out] sampleCountInCluster The count for each centroid
* [dim = n_cluster]
*
*/
template <typename DataT, typename IndexT>
void countSamplesInCluster(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> L2NormX,
raft::device_matrix_view<DataT, IndexT> centroids,
rmm::device_uvector<char>& workspace,
raft::device_vector_view<DataT, IndexT> sampleCountInCluster)
{
kmeans::count_samples_in_cluster<DataT, IndexT>(
handle, params, X, L2NormX, centroids, workspace, sampleCountInCluster);
}
/*
* @brief Selects 'n_clusters' samples from the input X using kmeans++ algorithm.
* @note This is the algorithm described in
* "k-means++: the advantages of careful seeding". 2007, Arthur, D. and Vassilvitskii, S.
* ACM-SIAM symposium on Discrete algorithms.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[out] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
*/
template <typename DataT, typename IndexT>
void kmeansPlusPlus(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
rmm::device_uvector<char>& workspace)
{
kmeans::init_plus_plus<DataT, IndexT>(handle, params, X, centroidsRawData, workspace);
}
/*
* @brief Main function used to fit KMeans (after cluster initialization)
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids [in] Initial cluster centers.
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
* @param[in] workspace Temporary workspace buffer which can get resized
*/
template <typename DataT, typename IndexT>
void kmeans_fit_main(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const DataT, IndexT> weight,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter,
rmm::device_uvector<char>& workspace)
{
kmeans::fit_main<DataT, IndexT>(
handle, params, X, weight, centroidsRawData, inertia, n_iter, workspace);
}
}; // namespace cuvs::cluster
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/single_linkage.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/cluster/detail/single_linkage.cuh>
#include <cuvs/cluster/single_linkage_types.hpp>
#include <raft/core/device_mdspan.hpp>
namespace cuvs::cluster {
/**
* Note: All of the functions below in the cuvs::cluster namespace are deprecated
* and will be removed in a future release. Please use cuvs::cluster::hierarchy
* instead.
*/
/**
* Single-linkage clustering, capable of constructing a KNN graph to
* scale the algorithm beyond the n^2 memory consumption of implementations
* that use the fully-connected graph of pairwise distances by connecting
* a knn graph when k is not large enough to connect it.
* @tparam value_idx
* @tparam value_t
* @tparam dist_type method to use for constructing connectivities graph
* @param[in] handle raft handle
* @param[in] X dense input matrix in row-major layout
* @param[in] m number of rows in X
* @param[in] n number of columns in X
* @param[in] metric distance metrix to use when constructing connectivities graph
* @param[out] out struct containing output dendrogram and cluster assignments
* @param[in] c a constant used when constructing connectivities from knn graph. Allows the indirect
control
* of k. The algorithm will set `k = log(n) + c`
* @param[in] n_clusters number of clusters to assign data samples
*/
template <typename value_idx,
typename value_t,
LinkageDistance dist_type = LinkageDistance::KNN_GRAPH>
void single_linkage(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
cuvs::distance::DistanceType metric,
linkage_output<value_idx>* out,
int c,
size_t n_clusters)
{
detail::single_linkage<value_idx, value_t, dist_type>(
handle, X, m, n, metric, out, c, n_clusters);
}
}; // namespace cuvs::cluster
namespace cuvs::cluster::hierarchy {
constexpr int DEFAULT_CONST_C = 15;
/**
* Single-linkage clustering, capable of constructing a KNN graph to
* scale the algorithm beyond the n^2 memory consumption of implementations
* that use the fully-connected graph of pairwise distances by connecting
* a knn graph when k is not large enough to connect it.
* @tparam value_idx
* @tparam value_t
* @tparam dist_type method to use for constructing connectivities graph
* @param[in] handle raft handle
* @param[in] X dense input matrix in row-major layout
* @param[out] dendrogram output dendrogram (size [n_rows - 1] * 2)
* @param[out] labels output labels vector (size n_rows)
* @param[in] metric distance metrix to use when constructing connectivities graph
* @param[in] n_clusters number of clusters to assign data samples
* @param[in] c a constant used when constructing connectivities from knn graph. Allows the indirect
control of k. The algorithm will set `k = log(n) + c`
*/
template <typename value_t, typename idx_t, LinkageDistance dist_type = LinkageDistance::KNN_GRAPH>
void single_linkage(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, row_major> X,
raft::device_matrix_view<idx_t, idx_t, row_major> dendrogram,
raft::device_vector_view<idx_t, idx_t> labels,
cuvs::distance::DistanceType metric,
size_t n_clusters,
std::optional<int> c = std::make_optional<int>(DEFAULT_CONST_C))
{
linkage_output<idx_t> out_arrs;
out_arrs.children = dendrogram.data_handle();
out_arrs.labels = labels.data_handle();
cuvs::cluster::single_linkage<idx_t, value_t, dist_type>(
handle,
X.data_handle(),
static_cast<std::size_t>(X.extent(0)),
static_cast<std::size_t>(X.extent(1)),
metric,
&out_arrs,
c.has_value() ? c.value() : DEFAULT_CONST_C,
n_clusters);
}
}; // namespace cuvs::cluster::hierarchy
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/kmeans_balanced.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/device_memory_resource.hpp>
#include <utility>
#include <cuvs/cluster/detail/kmeans_balanced.cuh>
#include <raft/core/mdarray.hpp>
#include <raft/util/cuda_utils.cuh>
namespace cuvs::cluster::kmeans_balanced {
/**
* @brief Find clusters of balanced sizes with a hierarchical k-means algorithm.
*
* This variant of the k-means algorithm first clusters the dataset in mesoclusters, then clusters
* the subsets associated to each mesocluster into fine clusters, and finally runs a few k-means
* iterations over the whole dataset and with all the centroids to obtain the final clusters.
*
* Each k-means iteration applies expectation-maximization-balancing:
* - Balancing: adjust centers for clusters that have a small number of entries. If the size of a
* cluster is below a threshold, the center is moved towards a bigger cluster.
* - Expectation: predict the labels (i.e find closest cluster centroid to each point)
* - Maximization: calculate optimal centroids (i.e find the center of gravity of each cluster)
*
* The number of mesoclusters is chosen by rounding the square root of the number of clusters. E.g
* for 512 clusters, we would have 23 mesoclusters. The number of fine clusters per mesocluster is
* chosen proportionally to the number of points in each mesocluster.
*
* This variant of k-means uses random initialization and a fixed number of iterations, though
* iterations can be repeated if the balancing step moved the centroids.
*
* Additionally, this algorithm supports quantized datasets in arbitrary types but the core part of
* the algorithm will work with a floating-point type, hence a conversion function can be provided
* to map the data type to the math type.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <cuvs/cluster/kmeans_balanced.cuh>
* #include <cuvs/cluster/kmeans_balanced_types.hpp>
* ...
* raft::handle_t handle;
* cuvs::cluster::kmeans_balanced_params params;
* auto centroids = raft::make_device_matrix<float, int>(handle, n_clusters, n_features);
* cuvs::cluster::kmeans_balanced::fit(handle, params, X, centroids.view());
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] params Structure containing the hyper-parameters
* @param[in] X Training instances to cluster. The data must be in row-major format.
* [dim = n_samples x n_features]
* @param[out] centroids The generated centroids [dim = n_clusters x n_features]
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the arithmetic
* datatype. If DataT == MathT, this must be the identity.
*/
template <typename DataT, typename MathT, typename IndexT, typename MappingOpT = raft::identity_op>
void fit(const raft::resources& handle,
kmeans_balanced_params const& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<MathT, IndexT> centroids,
MappingOpT mapping_op = raft::identity_op())
{
RAFT_EXPECTS(X.extent(1) == centroids.extent(1),
"Number of features in dataset and centroids are different");
RAFT_EXPECTS(static_cast<uint64_t>(X.extent(0)) * static_cast<uint64_t>(X.extent(1)) <=
static_cast<uint64_t>(std::numeric_limits<IndexT>::max()),
"The chosen index type cannot represent all indices for the given dataset");
RAFT_EXPECTS(centroids.extent(0) > IndexT{0} && centroids.extent(0) <= X.extent(0),
"The number of centroids must be strictly positive and cannot exceed the number of "
"points in the training dataset.");
detail::build_hierarchical(handle,
params,
X.extent(1),
X.data_handle(),
X.extent(0),
centroids.data_handle(),
centroids.extent(0),
mapping_op);
}
/**
* @brief Predict the closest cluster each sample in X belongs to.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <cuvs/cluster/kmeans_balanced.cuh>
* #include <cuvs/cluster/kmeans_balanced_types.hpp>
* ...
* raft::handle_t handle;
* cuvs::cluster::kmeans_balanced_params params;
* auto labels = raft::make_device_vector<float, int>(handle, n_rows);
* cuvs::cluster::kmeans_balanced::predict(handle, params, X, centroids, labels);
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam LabelT Type of the output labels.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] params Structure containing the hyper-parameters
* @param[in] X Dataset for which to infer the closest clusters.
* [dim = n_samples x n_features]
* @param[in] centroids The input centroids [dim = n_clusters x n_features]
* @param[out] labels The output labels [dim = n_samples]
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the arithmetic
* datatype. If DataT == MathT, this must be the identity.
*/
template <typename DataT,
typename MathT,
typename IndexT,
typename LabelT,
typename MappingOpT = raft::identity_op>
void predict(const raft::resources& handle,
kmeans_balanced_params const& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const MathT, IndexT> centroids,
raft::device_vector_view<LabelT, IndexT> labels,
MappingOpT mapping_op = raft::identity_op())
{
RAFT_EXPECTS(X.extent(0) == labels.extent(0),
"Number of rows in dataset and labels are different");
RAFT_EXPECTS(X.extent(1) == centroids.extent(1),
"Number of features in dataset and centroids are different");
RAFT_EXPECTS(static_cast<uint64_t>(X.extent(0)) * static_cast<uint64_t>(X.extent(1)) <=
static_cast<uint64_t>(std::numeric_limits<IndexT>::max()),
"The chosen index type cannot represent all indices for the given dataset");
RAFT_EXPECTS(static_cast<uint64_t>(centroids.extent(0)) <=
static_cast<uint64_t>(std::numeric_limits<LabelT>::max()),
"The chosen label type cannot represent all cluster labels");
detail::predict(handle,
params,
centroids.data_handle(),
centroids.extent(0),
X.extent(1),
X.data_handle(),
X.extent(0),
labels.data_handle(),
mapping_op);
}
/**
* @brief Compute hierarchical balanced k-means clustering and predict cluster index for each sample
* in the input.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <cuvs/cluster/kmeans_balanced.cuh>
* #include <cuvs/cluster/kmeans_balanced_types.hpp>
* ...
* raft::handle_t handle;
* cuvs::cluster::kmeans_balanced_params params;
* auto centroids = raft::make_device_matrix<float, int>(handle, n_clusters, n_features);
* auto labels = raft::make_device_vector<float, int>(handle, n_rows);
* cuvs::cluster::kmeans_balanced::fit_predict(
* handle, params, X, centroids.view(), labels.view());
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam LabelT Type of the output labels.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] params Structure containing the hyper-parameters
* @param[in] X Training instances to cluster. The data must be in row-major format.
* [dim = n_samples x n_features]
* @param[out] centroids The output centroids [dim = n_clusters x n_features]
* @param[out] labels The output labels [dim = n_samples]
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the arithmetic
* datatype. If DataT and MathT are the same, this must be the identity.
*/
template <typename DataT,
typename MathT,
typename IndexT,
typename LabelT,
typename MappingOpT = raft::identity_op>
void fit_predict(const raft::resources& handle,
kmeans_balanced_params const& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<MathT, IndexT> centroids,
raft::device_vector_view<LabelT, IndexT> labels,
MappingOpT mapping_op = raft::identity_op())
{
auto centroids_const = raft::make_device_matrix_view<const MathT, IndexT>(
centroids.data_handle(), centroids.extent(0), centroids.extent(1));
cuvs::cluster::kmeans_balanced::fit(handle, params, X, centroids, mapping_op);
cuvs::cluster::kmeans_balanced::predict(handle, params, X, centroids_const, labels, mapping_op);
}
namespace helpers {
/**
* @brief Randomly initialize centers and apply expectation-maximization-balancing iterations
*
* This is essentially the non-hierarchical balanced k-means algorithm which is used by the
* hierarchical algorithm once to build the mesoclusters and once per mesocluster to build the fine
* clusters.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <cuvs/cluster/kmeans_balanced.cuh>
* #include <cuvs/cluster/kmeans_balanced_types.hpp>
* ...
* raft::handle_t handle;
* cuvs::cluster::kmeans_balanced_params params;
* auto centroids = raft::make_device_matrix<float, int>(handle, n_clusters, n_features);
* auto labels = raft::make_device_vector<int, int>(handle, n_samples);
* auto sizes = raft::make_device_vector<int, int>(handle, n_clusters);
* cuvs::cluster::kmeans_balanced::build_clusters(
* handle, params, X, centroids.view(), labels.view(), sizes.view());
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam LabelT Type of the output labels.
* @tparam CounterT Counter type supported by CUDA's native atomicAdd.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] params Structure containing the hyper-parameters
* @param[in] X Training instances to cluster. The data must be in row-major format.
* [dim = n_samples x n_features]
* @param[out] centroids The output centroids [dim = n_clusters x n_features]
* @param[out] labels The output labels [dim = n_samples]
* @param[out] cluster_sizes Size of each cluster [dim = n_clusters]
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the
* arithmetic datatype. If DataT == MathT, this must be the identity.
* @param[in] X_norm (optional) Dataset's row norms [dim = n_samples]
*/
template <typename DataT,
typename MathT,
typename IndexT,
typename LabelT,
typename CounterT,
typename MappingOpT>
void build_clusters(const raft::resources& handle,
const kmeans_balanced_params& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<MathT, IndexT> centroids,
raft::device_vector_view<LabelT, IndexT> labels,
raft::device_vector_view<CounterT, IndexT> cluster_sizes,
MappingOpT mapping_op = raft::identity_op(),
std::optional<raft::device_vector_view<const MathT>> X_norm = std::nullopt)
{
RAFT_EXPECTS(X.extent(0) == labels.extent(0),
"Number of rows in dataset and labels are different");
RAFT_EXPECTS(X.extent(1) == centroids.extent(1),
"Number of features in dataset and centroids are different");
RAFT_EXPECTS(centroids.extent(0) == cluster_sizes.extent(0),
"Number of rows in centroids and clusyer_sizes are different");
detail::build_clusters(handle,
params,
X.extent(1),
X.data_handle(),
X.extent(0),
centroids.extent(0),
centroids.data_handle(),
labels.data_handle(),
cluster_sizes.data_handle(),
mapping_op,
resource::get_workspace_resource(handle),
X_norm.has_value() ? X_norm.value().data_handle() : nullptr);
}
/**
* @brief Given the data and labels, calculate cluster centers and sizes in one sweep.
*
* Let `S_i = {x_k | x_k \in X & labels[k] == i}` be the vectors in the dataset with label i.
*
* On exit,
* `centers_i = (\sum_{x \in S_i} x + w_i * center_i) / (|S_i| + w_i)`,
* where `w_i = reset_counters ? 0 : cluster_size[i]`.
*
* In other words, the updated cluster centers are a weighted average of the existing cluster
* center, and the coordinates of the points labeled with i. _This allows calling this function
* multiple times with different datasets with the same effect as if calling this function once
* on the combined dataset_.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <cuvs/cluster/kmeans_balanced.cuh>
* ...
* raft::handle_t handle;
* auto centroids = raft::make_device_matrix<float, int>(handle, n_clusters, n_features);
* auto sizes = raft::make_device_vector<int, int>(handle, n_clusters);
* cuvs::cluster::kmeans_balanced::calc_centers_and_sizes(
* handle, X, labels, centroids.view(), sizes.view(), true);
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam LabelT Type of the output labels.
* @tparam CounterT Counter type supported by CUDA's native atomicAdd.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] X Dataset for which to calculate cluster centers. The data must be in
* row-major format. [dim = n_samples x n_features]
* @param[in] labels The input labels [dim = n_samples]
* @param[out] centroids The output centroids [dim = n_clusters x n_features]
* @param[out] cluster_sizes Size of each cluster [dim = n_clusters]
* @param[in] reset_counters Whether to clear the output arrays before calculating.
* When set to `false`, this function may be used to update existing
* centers and sizes using the weighted average principle.
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the
* arithmetic datatype. If DataT == MathT, this must be the identity.
*/
template <typename DataT,
typename MathT,
typename IndexT,
typename LabelT,
typename CounterT,
typename MappingOpT = raft::identity_op>
void calc_centers_and_sizes(const raft::resources& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const LabelT, IndexT> labels,
raft::device_matrix_view<MathT, IndexT> centroids,
raft::device_vector_view<CounterT, IndexT> cluster_sizes,
bool reset_counters = true,
MappingOpT mapping_op = raft::identity_op())
{
RAFT_EXPECTS(X.extent(0) == labels.extent(0),
"Number of rows in dataset and labels are different");
RAFT_EXPECTS(X.extent(1) == centroids.extent(1),
"Number of features in dataset and centroids are different");
RAFT_EXPECTS(centroids.extent(0) == cluster_sizes.extent(0),
"Number of rows in centroids and clusyer_sizes are different");
detail::calc_centers_and_sizes(handle,
centroids.data_handle(),
cluster_sizes.data_handle(),
centroids.extent(0),
X.extent(1),
X.data_handle(),
X.extent(0),
labels.data_handle(),
reset_counters,
mapping_op);
}
} // namespace helpers
} // namespace cuvs::cluster::kmeans_balanced
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/detail/connectivities.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/linalg/unary_op.cuh>
#include <rmm/device_uvector.hpp>
#include <cuvs/cluster/single_linkage_types.hpp>
#include <cuvs/distance/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/neighbors/knn_graph.cuh>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
namespace cuvs::cluster::detail {
template <cuvs::cluster::LinkageDistance dist_type, typename value_idx, typename value_t>
struct distance_graph_impl {
void run(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
cuvs::distance::DistanceType metric,
rmm::device_uvector<value_idx>& indptr,
rmm::device_uvector<value_idx>& indices,
rmm::device_uvector<value_t>& data,
int c);
};
/**
* Connectivities specialization to build a knn graph
* @tparam value_idx
* @tparam value_t
*/
template <typename value_idx, typename value_t>
struct distance_graph_impl<cuvs::cluster::LinkageDistance::KNN_GRAPH, value_idx, value_t> {
void run(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
cuvs::distance::DistanceType metric,
rmm::device_uvector<value_idx>& indptr,
rmm::device_uvector<value_idx>& indices,
rmm::device_uvector<value_t>& data,
int c)
{
auto stream = resource::get_cuda_stream(handle);
auto thrust_policy = resource::get_thrust_policy(handle);
// Need to symmetrize knn into undirected graph
raft::sparse::COO<value_t, value_idx> knn_graph_coo(stream);
raft::sparse::neighbors::knn_graph(handle, X, m, n, metric, knn_graph_coo, c);
indices.resize(knn_graph_coo.nnz, stream);
data.resize(knn_graph_coo.nnz, stream);
// self-loops get max distance
auto transform_in = thrust::make_zip_iterator(
thrust::make_tuple(knn_graph_coo.rows(), knn_graph_coo.cols(), knn_graph_coo.vals()));
thrust::transform(thrust_policy,
transform_in,
transform_in + knn_graph_coo.nnz,
knn_graph_coo.vals(),
[=] __device__(const thrust::tuple<value_idx, value_idx, value_t>& tup) {
bool self_loop = thrust::get<0>(tup) == thrust::get<1>(tup);
return (self_loop * std::numeric_limits<value_t>::max()) +
(!self_loop * thrust::get<2>(tup));
});
raft::sparse::convert::sorted_coo_to_csr(
knn_graph_coo.rows(), knn_graph_coo.nnz, indptr.data(), m + 1, stream);
// TODO: Wouldn't need to copy here if we could compute knn
// graph directly on the device uvectors
// ref: https://github.com/rapidsai/raft/issues/227
raft::copy_async(indices.data(), knn_graph_coo.cols(), knn_graph_coo.nnz, stream);
raft::copy_async(data.data(), knn_graph_coo.vals(), knn_graph_coo.nnz, stream);
}
};
template <typename value_idx>
RAFT_KERNEL fill_indices2(value_idx* indices, size_t m, size_t nnz)
{
value_idx tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid >= nnz) return;
value_idx v = tid % m;
indices[tid] = v;
}
/**
* Compute connected CSR of pairwise distances
* @tparam value_idx
* @tparam value_t
* @param handle
* @param X
* @param m
* @param n
* @param metric
* @param[out] indptr
* @param[out] indices
* @param[out] data
*/
template <typename value_idx, typename value_t>
void pairwise_distances(const raft::resources& handle,
const value_t* X,
size_t m,
size_t n,
cuvs::distance::DistanceType metric,
value_idx* indptr,
value_idx* indices,
value_t* data)
{
auto stream = resource::get_cuda_stream(handle);
auto exec_policy = resource::get_thrust_policy(handle);
value_idx nnz = m * m;
value_idx blocks = raft::ceildiv(nnz, (value_idx)256);
fill_indices2<value_idx><<<blocks, 256, 0, stream>>>(indices, m, nnz);
thrust::sequence(exec_policy, indptr, indptr + m, 0, (int)m);
raft::update_device(indptr + m, &nnz, 1, stream);
// TODO: It would ultimately be nice if the MST could accept
// dense inputs directly so we don't need to double the memory
// usage to hand it a sparse array here.
distance::pairwise_distance<value_t, value_idx>(handle, X, X, data, m, m, n, metric);
// self-loops get max distance
auto transform_in =
thrust::make_zip_iterator(thrust::make_tuple(thrust::make_counting_iterator(0), data));
thrust::transform(exec_policy,
transform_in,
transform_in + nnz,
data,
[=] __device__(const thrust::tuple<value_idx, value_t>& tup) {
value_idx idx = thrust::get<0>(tup);
bool self_loop = idx % m == idx / m;
return (self_loop * std::numeric_limits<value_t>::max()) +
(!self_loop * thrust::get<1>(tup));
});
}
/**
* Connectivities specialization for pairwise distances
* @tparam value_idx
* @tparam value_t
*/
template <typename value_idx, typename value_t>
struct distance_graph_impl<cuvs::cluster::LinkageDistance::PAIRWISE, value_idx, value_t> {
void run(const raft::resources& handle,
const value_t* X,
size_t m,
size_t n,
cuvs::distance::DistanceType metric,
rmm::device_uvector<value_idx>& indptr,
rmm::device_uvector<value_idx>& indices,
rmm::device_uvector<value_t>& data,
int c)
{
auto stream = resource::get_cuda_stream(handle);
size_t nnz = m * m;
indices.resize(nnz, stream);
data.resize(nnz, stream);
pairwise_distances(handle, X, m, n, metric, indptr.data(), indices.data(), data.data());
}
};
/**
* Returns a CSR connectivities graph based on the given linkage distance.
* @tparam value_idx
* @tparam value_t
* @tparam dist_type
* @param[in] handle raft handle
* @param[in] X dense data for which to construct connectivites
* @param[in] m number of rows in X
* @param[in] n number of columns in X
* @param[in] metric distance metric to use
* @param[out] indptr indptr array of connectivities graph
* @param[out] indices column indices array of connectivities graph
* @param[out] data distances array of connectivities graph
* @param[out] c constant 'c' used for nearest neighbors-based distances
* which will guarantee k <= log(n) + c
*/
template <typename value_idx, typename value_t, cuvs::cluster::LinkageDistance dist_type>
void get_distance_graph(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
cuvs::distance::DistanceType metric,
rmm::device_uvector<value_idx>& indptr,
rmm::device_uvector<value_idx>& indices,
rmm::device_uvector<value_t>& data,
int c)
{
auto stream = resource::get_cuda_stream(handle);
indptr.resize(m + 1, stream);
distance_graph_impl<dist_type, value_idx, value_t> dist_graph;
dist_graph.run(handle, X, m, n, metric, indptr, indices, data, c);
}
}; // namespace cuvs::cluster::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/detail/mst.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/sparse/neighbors/cross_component_nn.cuh>
#include <raft/sparse/op/sort.cuh>
#include <raft/sparse/solver/mst.cuh>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace cuvs::cluster::detail {
template <typename value_idx, typename value_t>
void merge_msts(sparse::solver::Graph_COO<value_idx, value_idx, value_t>& coo1,
sparse::solver::Graph_COO<value_idx, value_idx, value_t>& coo2,
cudaStream_t stream)
{
/** Add edges to existing mst **/
int final_nnz = coo2.n_edges + coo1.n_edges;
coo1.src.resize(final_nnz, stream);
coo1.dst.resize(final_nnz, stream);
coo1.weights.resize(final_nnz, stream);
/**
* Construct final edge list
*/
raft::copy_async(coo1.src.data() + coo1.n_edges, coo2.src.data(), coo2.n_edges, stream);
raft::copy_async(coo1.dst.data() + coo1.n_edges, coo2.dst.data(), coo2.n_edges, stream);
raft::copy_async(coo1.weights.data() + coo1.n_edges, coo2.weights.data(), coo2.n_edges, stream);
coo1.n_edges = final_nnz;
}
/**
* Connect an unconnected knn graph (one in which mst returns an msf). The
* device buffers underlying the Graph_COO object are modified in-place.
* @tparam value_idx index type
* @tparam value_t floating-point value type
* @param[in] handle raft handle
* @param[in] X original dense data from which knn grpah was constructed
* @param[inout] msf edge list containing the mst result
* @param[in] m number of rows in X
* @param[in] n number of columns in X
* @param[inout] color the color labels array returned from the mst invocation
* @return updated MST edge list
*/
template <typename value_idx, typename value_t, typename red_op>
void connect_knn_graph(
raft::resources const& handle,
const value_t* X,
sparse::solver::Graph_COO<value_idx, value_idx, value_t>& msf,
size_t m,
size_t n,
value_idx* color,
red_op reduction_op,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2SqrtExpanded)
{
auto stream = resource::get_cuda_stream(handle);
raft::sparse::COO<value_t, value_idx> connected_edges(stream);
// default row and column batch sizes are chosen for computing cross component nearest neighbors.
// Reference: PR #1445
static constexpr size_t default_row_batch_size = 4096;
static constexpr size_t default_col_batch_size = 16;
raft::sparse::neighbors::cross_component_nn<value_idx, value_t>(handle,
connected_edges,
X,
color,
m,
n,
reduction_op,
min(m, default_row_batch_size),
min(n, default_col_batch_size));
rmm::device_uvector<value_idx> indptr2(m + 1, stream);
raft::sparse::convert::sorted_coo_to_csr(
connected_edges.rows(), connected_edges.nnz, indptr2.data(), m + 1, stream);
// On the second call, we hand the MST the original colors
// and the new set of edges and let it restart the optimization process
auto new_mst =
raft::sparse::solver::mst<value_idx, value_idx, value_t, double>(handle,
indptr2.data(),
connected_edges.cols(),
connected_edges.vals(),
m,
connected_edges.nnz,
color,
stream,
false,
false);
merge_msts<value_idx, value_t>(msf, new_mst, stream);
}
/**
* Constructs an MST and sorts the resulting edges in ascending
* order by their weight.
*
* Hierarchical clustering heavily relies upon the ordering
* and vertices returned in the MST. If the result of the
* MST was actually a minimum-spanning forest, the CSR
* being passed into the MST is not connected. In such a
* case, this graph will be connected by performing a
* KNN across the components.
* @tparam value_idx
* @tparam value_t
* @param[in] handle raft handle
* @param[in] indptr CSR indptr of connectivities graph
* @param[in] indices CSR indices array of connectivities graph
* @param[in] pw_dists CSR weights array of connectivities graph
* @param[in] m number of rows in X / src vertices in connectivities graph
* @param[in] n number of columns in X
* @param[out] mst_src output src edges
* @param[out] mst_dst output dst edges
* @param[out] mst_weight output weights (distances)
* @param[in] max_iter maximum iterations to run knn graph connection. This
* argument is really just a safeguard against the potential for infinite loops.
*/
template <typename value_idx, typename value_t, typename red_op>
void build_sorted_mst(
raft::resources const& handle,
const value_t* X,
const value_idx* indptr,
const value_idx* indices,
const value_t* pw_dists,
size_t m,
size_t n,
value_idx* mst_src,
value_idx* mst_dst,
value_t* mst_weight,
value_idx* color,
size_t nnz,
red_op reduction_op,
cuvs::distance::DistanceType metric = cuvs::distance::DistanceType::L2SqrtExpanded,
int max_iter = 10)
{
auto stream = resource::get_cuda_stream(handle);
// We want to have MST initialize colors on first call.
auto mst_coo = raft::sparse::solver::mst<value_idx, value_idx, value_t, double>(
handle, indptr, indices, pw_dists, (value_idx)m, nnz, color, stream, false, true);
int iters = 1;
int n_components = raft::sparse::neighbors::get_n_components(color, m, stream);
while (n_components > 1 && iters < max_iter) {
connect_knn_graph<value_idx, value_t>(handle, X, mst_coo, m, n, color, reduction_op);
iters++;
n_components = raft::sparse::neighbors::get_n_components(color, m, stream);
}
/**
* The `max_iter` argument was introduced only to prevent the potential for an infinite loop.
* Ideally the log2(n) guarantees of the MST should be enough to connect KNN graphs with a
* massive number of data samples in very few iterations. If it does not, there are 3 likely
* reasons why (in order of their likelihood):
* 1. There is a bug in this code somewhere
* 2. Either the given KNN graph wasn't generated from X or the same metric is not being used
* to generate the 1-nn (currently only L2SqrtExpanded is supported).
* 3. max_iter was not large enough to connect the graph (less likely).
*
* Note that a KNN graph generated from 50 random isotropic balls (with significant overlap)
* was able to be connected in a single iteration.
*/
RAFT_EXPECTS(n_components == 1,
"KNN graph could not be connected in %d iterations. "
"Please verify that the input knn graph is generated from X "
"(and the same distance metric used),"
" or increase 'max_iter'",
max_iter);
raft::sparse::op::coo_sort_by_weight(
mst_coo.src.data(), mst_coo.dst.data(), mst_coo.weights.data(), mst_coo.n_edges, stream);
raft::copy_async(mst_src, mst_coo.src.data(), mst_coo.n_edges, stream);
raft::copy_async(mst_dst, mst_coo.dst.data(), mst_coo.n_edges, stream);
raft::copy_async(mst_weight, mst_coo.weights.data(), mst_coo.n_edges, stream);
}
}; // namespace cuvs::cluster::detail | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/detail/kmeans.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <ctime>
#include <optional>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <random>
#include <cuda.h>
#include <thrust/fill.h>
#include <thrust/transform.h>
#include <cuvs/cluster/detail/kmeans_common.cuh>
#include <cuvs/cluster/kmeans_types.hpp>
#include <cuvs/distance/distance_types.hpp>
#include <raft/common/nvtx.hpp>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/kvp.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/mdarray.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/reduce_cols_by_key.cuh>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace cuvs {
namespace cluster {
namespace detail {
// =========================================================
// Init functions
// =========================================================
// Selects 'n_clusters' samples randomly from X
template <typename DataT, typename IndexT>
void initRandom(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope("initRandom");
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_clusters = params.n_clusters;
detail::shuffleAndGather<DataT, IndexT>(handle, X, centroids, n_clusters, params.rng_state.seed);
}
/*
* @brief Selects 'n_clusters' samples from the input X using kmeans++ algorithm.
* @note This is the algorithm described in
* "k-means++: the advantages of careful seeding". 2007, Arthur, D. and Vassilvitskii, S.
* ACM-SIAM symposium on Discrete algorithms.
*
* Scalable kmeans++ pseudocode
* 1: C = sample a point uniformly at random from X
* 2: while |C| < k
* 3: Sample x in X with probability p_x = d^2(x, C) / phi_X (C)
* 4: C = C U {x}
* 5: end for
*/
template <typename DataT, typename IndexT>
void kmeansPlusPlus(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
rmm::device_uvector<char>& workspace)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope("kmeansPlusPlus");
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
auto metric = params.metric;
// number of seeding trials for each center (except the first)
auto n_trials = 2 + static_cast<int>(std::ceil(log(n_clusters)));
RAFT_LOG_DEBUG(
"Run sequential k-means++ to select %d centroids from %d input samples "
"(%d seeding trials per iterations)",
n_clusters,
n_samples,
n_trials);
auto dataBatchSize = getDataBatchSize(params.batch_samples, n_samples);
// temporary buffers
auto indices = raft::make_device_vector<IndexT, IndexT>(handle, n_trials);
auto centroidCandidates = raft::make_device_matrix<DataT, IndexT>(handle, n_trials, n_features);
auto costPerCandidate = raft::make_device_vector<DataT, IndexT>(handle, n_trials);
auto minClusterDistance = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
auto distBuffer = raft::make_device_matrix<DataT, IndexT>(handle, n_trials, n_samples);
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
rmm::device_scalar<DataT> clusterCost(stream);
rmm::device_scalar<cub::KeyValuePair<int, DataT>> minClusterIndexAndDistance(stream);
// Device and matrix views
raft::device_vector_view<IndexT, IndexT> indices_view(indices.data_handle(), n_trials);
auto const_weights_view =
raft::make_device_vector_view<const DataT, IndexT>(minClusterDistance.data_handle(), n_samples);
auto const_indices_view =
raft::make_device_vector_view<const IndexT, IndexT>(indices.data_handle(), n_trials);
auto const_X_view =
raft::make_device_matrix_view<const DataT, IndexT>(X.data_handle(), n_samples, n_features);
raft::device_matrix_view<DataT, IndexT> candidates_view(
centroidCandidates.data_handle(), n_trials, n_features);
// L2 norm of X: ||c||^2
auto L2NormX = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
if (metric == cuvs::distance::DistanceType::L2Expanded ||
metric == cuvs::distance::DistanceType::L2SqrtExpanded) {
raft::linalg::rowNorm(L2NormX.data_handle(),
X.data_handle(),
X.extent(1),
X.extent(0),
raft::linalg::L2Norm,
true,
stream);
}
raft::random::RngState rng(params.rng_state.seed, params.rng_state.type);
std::mt19937 gen(params.rng_state.seed);
std::uniform_int_distribution<> dis(0, n_samples - 1);
// <<< Step-1 >>>: C <-- sample a point uniformly at random from X
auto initialCentroid = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + dis(gen) * n_features, 1, n_features);
int n_clusters_picked = 1;
// store the chosen centroid in the buffer
raft::copy(
centroidsRawData.data_handle(), initialCentroid.data_handle(), initialCentroid.size(), stream);
// C = initial set of centroids
auto centroids = raft::make_device_matrix_view<DataT, IndexT>(
centroidsRawData.data_handle(), initialCentroid.extent(0), initialCentroid.extent(1));
// <<< End of Step-1 >>>
// Calculate cluster distance, d^2(x, C), for all the points x in X to the nearest centroid
detail::minClusterDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterDistance.view(),
L2NormX.view(),
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
RAFT_LOG_DEBUG(" k-means++ - Sampled %d/%d centroids", n_clusters_picked, n_clusters);
// <<<< Step-2 >>> : while |C| < k
while (n_clusters_picked < n_clusters) {
// <<< Step-3 >>> : Sample x in X with probability p_x = d^2(x, C) / phi_X (C)
// Choose 'n_trials' centroid candidates from X with probability proportional to the squared
// distance to the nearest existing cluster
raft::random::discrete(handle, rng, indices_view, const_weights_view);
raft::matrix::gather(handle, const_X_view, const_indices_view, candidates_view);
// Calculate pairwise distance between X and the centroid candidates
// Output - pwd [n_trials x n_samples]
auto pwd = distBuffer.view();
detail::pairwise_distance_kmeans<DataT, IndexT>(
handle, centroidCandidates.view(), X, pwd, workspace, metric);
// Update nearest cluster distance for each centroid candidate
// Note pwd and minDistBuf points to same buffer which currently holds pairwise distance values.
// Outputs minDistanceBuf[n_trials x n_samples] where minDistance[i, :] contains updated
// minClusterDistance that includes candidate-i
auto minDistBuf = distBuffer.view();
raft::linalg::matrixVectorOp(minDistBuf.data_handle(),
pwd.data_handle(),
minClusterDistance.data_handle(),
pwd.extent(1),
pwd.extent(0),
true,
true,
raft::min_op{},
stream);
// Calculate costPerCandidate[n_trials] where costPerCandidate[i] is the cluster cost when using
// centroid candidate-i
raft::linalg::reduce(costPerCandidate.data_handle(),
minDistBuf.data_handle(),
minDistBuf.extent(1),
minDistBuf.extent(0),
static_cast<DataT>(0),
true,
true,
stream);
// Greedy Choice - Choose the candidate that has minimum cluster cost
// ArgMin operation below identifies the index of minimum cost in costPerCandidate
{
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
cub::DeviceReduce::ArgMin(nullptr,
temp_storage_bytes,
costPerCandidate.data_handle(),
minClusterIndexAndDistance.data(),
costPerCandidate.extent(0),
stream);
// Allocate temporary storage
workspace.resize(temp_storage_bytes, stream);
// Run argmin-reduction
cub::DeviceReduce::ArgMin(workspace.data(),
temp_storage_bytes,
costPerCandidate.data_handle(),
minClusterIndexAndDistance.data(),
costPerCandidate.extent(0),
stream);
int bestCandidateIdx = -1;
raft::copy(&bestCandidateIdx, &minClusterIndexAndDistance.data()->key, 1, stream);
resource::sync_stream(handle);
/// <<< End of Step-3 >>>
/// <<< Step-4 >>>: C = C U {x}
// Update minimum cluster distance corresponding to the chosen centroid candidate
raft::copy(minClusterDistance.data_handle(),
minDistBuf.data_handle() + bestCandidateIdx * n_samples,
n_samples,
stream);
raft::copy(centroidsRawData.data_handle() + n_clusters_picked * n_features,
centroidCandidates.data_handle() + bestCandidateIdx * n_features,
n_features,
stream);
++n_clusters_picked;
/// <<< End of Step-4 >>>
}
RAFT_LOG_DEBUG(" k-means++ - Sampled %d/%d centroids", n_clusters_picked, n_clusters);
} /// <<<< Step-5 >>>
}
/**
*
* @tparam DataT
* @tparam IndexT
* @param handle
* @param[in] X input matrix (size n_samples, n_features)
* @param[in] weight number of samples currently assigned to each centroid
* @param[in] cur_centroids matrix of current centroids (size n_clusters, n_features)
* @param[in] l2norm_x
* @param[out] min_cluster_and_dist
* @param[out] new_centroids
* @param[out] new_weight
* @param[inout] workspace
*/
template <typename DataT, typename IndexT, typename LabelsIterator>
void update_centroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT, row_major> X,
raft::device_vector_view<const DataT, IndexT> sample_weights,
raft::device_matrix_view<const DataT, IndexT, row_major> centroids,
// TODO: Figure out how to best wrap iterator types in mdspan
LabelsIterator cluster_labels,
raft::device_vector_view<DataT, IndexT> weight_per_cluster,
raft::device_matrix_view<DataT, IndexT, row_major> new_centroids,
rmm::device_uvector<char>& workspace)
{
auto n_clusters = centroids.extent(0);
auto n_samples = X.extent(0);
workspace.resize(n_samples, resource::get_cuda_stream(handle));
// Calculates weighted sum of all the samples assigned to cluster-i and stores the
// result in new_centroids[i]
raft::linalg::reduce_rows_by_key((DataT*)X.data_handle(),
X.extent(1),
cluster_labels,
sample_weights.data_handle(),
workspace.data(),
X.extent(0),
X.extent(1),
n_clusters,
new_centroids.data_handle(),
resource::get_cuda_stream(handle));
// Reduce weights by key to compute weight in each cluster
raft::linalg::reduce_cols_by_key(sample_weights.data_handle(),
cluster_labels,
weight_per_cluster.data_handle(),
(IndexT)1,
(IndexT)sample_weights.extent(0),
(IndexT)n_clusters,
resource::get_cuda_stream(handle));
// Computes new_centroids[i] = new_centroids[i]/weight_per_cluster[i] where
// new_centroids[n_clusters x n_features] - 2D array, new_centroids[i] has sum of all the
// samples assigned to cluster-i
// weight_per_cluster[n_clusters] - 1D array, weight_per_cluster[i] contains sum of weights in
// cluster-i.
// Note - when weight_per_cluster[i] is 0, new_centroids[i] is reset to 0
raft::linalg::matrixVectorOp(new_centroids.data_handle(),
new_centroids.data_handle(),
weight_per_cluster.data_handle(),
new_centroids.extent(1),
new_centroids.extent(0),
true,
false,
raft::div_checkzero_op{},
resource::get_cuda_stream(handle));
// copy centroids[i] to new_centroids[i] when weight_per_cluster[i] is 0
cub::ArgIndexInputIterator<DataT*> itr_wt(weight_per_cluster.data_handle());
raft::matrix::gather_if(
const_cast<DataT*>(centroids.data_handle()),
static_cast<int>(centroids.extent(1)),
static_cast<int>(centroids.extent(0)),
itr_wt,
itr_wt,
static_cast<int>(weight_per_cluster.size()),
new_centroids.data_handle(),
[=] __device__(raft::KeyValuePair<ptrdiff_t, DataT> map) { // predicate
// copy when the sum of weights in the cluster is 0
return map.value == 0;
},
raft::key_op{},
resource::get_cuda_stream(handle));
}
// TODO: Resizing is needed to use mdarray instead of rmm::device_uvector
template <typename DataT, typename IndexT>
void kmeans_fit_main(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const DataT, IndexT> weight,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter,
rmm::device_uvector<char>& workspace)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope("kmeans_fit_main");
logger::get(RAFT_NAME).set_level(params.verbosity);
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
auto metric = params.metric;
// stores (key, value) pair corresponding to each sample where
// - key is the index of nearest cluster
// - value is the distance to the nearest cluster
auto minClusterAndDistance =
raft::make_device_vector<raft::KeyValuePair<IndexT, DataT>, IndexT>(handle, n_samples);
// temporary buffer to store L2 norm of centroids or distance matrix,
// destructor releases the resource
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
// temporary buffer to store intermediate centroids, destructor releases the
// resource
auto newCentroids = raft::make_device_matrix<DataT, IndexT>(handle, n_clusters, n_features);
// temporary buffer to store weights per cluster, destructor releases the
// resource
auto wtInCluster = raft::make_device_vector<DataT, IndexT>(handle, n_clusters);
rmm::device_scalar<DataT> clusterCostD(stream);
// L2 norm of X: ||x||^2
auto L2NormX = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
auto l2normx_view =
raft::make_device_vector_view<const DataT, IndexT>(L2NormX.data_handle(), n_samples);
if (metric == cuvs::distance::DistanceType::L2Expanded ||
metric == cuvs::distance::DistanceType::L2SqrtExpanded) {
raft::linalg::rowNorm(L2NormX.data_handle(),
X.data_handle(),
X.extent(1),
X.extent(0),
raft::linalg::L2Norm,
true,
stream);
}
RAFT_LOG_DEBUG(
"Calling KMeans.fit with %d samples of input data and the initialized "
"cluster centers",
n_samples);
DataT priorClusteringCost = 0;
for (n_iter[0] = 1; n_iter[0] <= params.max_iter; ++n_iter[0]) {
RAFT_LOG_DEBUG(
"KMeans.fit: Iteration-%d: fitting the model using the initialized "
"cluster centers",
n_iter[0]);
auto centroids = raft::make_device_matrix_view<DataT, IndexT>(
centroidsRawData.data_handle(), n_clusters, n_features);
// computes minClusterAndDistance[0:n_samples) where
// minClusterAndDistance[i] is a <key, value> pair where
// 'key' is index to a sample in 'centroids' (index of the nearest
// centroid) and 'value' is the distance between the sample 'X[i]' and the
// 'centroid[key]'
detail::minClusterAndDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance.view(),
l2normx_view,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// Using TransformInputIteratorT to dereference an array of
// raft::KeyValuePair and converting them to just return the Key to be used
// in reduce_rows_by_key prims
detail::KeyValueIndexOp<IndexT, DataT> conversion_op;
cub::TransformInputIterator<IndexT,
detail::KeyValueIndexOp<IndexT, DataT>,
raft::KeyValuePair<IndexT, DataT>*>
itr(minClusterAndDistance.data_handle(), conversion_op);
update_centroids(handle,
X,
weight,
raft::make_device_matrix_view<const DataT, IndexT>(
centroidsRawData.data_handle(), n_clusters, n_features),
itr,
wtInCluster.view(),
newCentroids.view(),
workspace);
// compute the squared norm between the newCentroids and the original
// centroids, destructor releases the resource
auto sqrdNorm = raft::make_device_scalar(handle, DataT(0));
raft::linalg::mapThenSumReduce(sqrdNorm.data_handle(),
newCentroids.size(),
raft::sqdiff_op{},
stream,
centroids.data_handle(),
newCentroids.data_handle());
DataT sqrdNormError = 0;
raft::copy(&sqrdNormError, sqrdNorm.data_handle(), sqrdNorm.size(), stream);
raft::copy(
centroidsRawData.data_handle(), newCentroids.data_handle(), newCentroids.size(), stream);
bool done = false;
if (params.inertia_check) {
// calculate cluster cost phi_x(C)
detail::computeClusterCost(handle,
minClusterAndDistance.view(),
workspace,
raft::make_device_scalar_view(clusterCostD.data()),
raft::value_op{},
raft::add_op{});
DataT curClusteringCost = clusterCostD.value(stream);
ASSERT(curClusteringCost != (DataT)0.0,
"Too few points and centroids being found is getting 0 cost from "
"centers");
if (n_iter[0] > 1) {
DataT delta = curClusteringCost / priorClusteringCost;
if (delta > 1 - params.tol) done = true;
}
priorClusteringCost = curClusteringCost;
}
resource::sync_stream(handle, stream);
if (sqrdNormError < params.tol) done = true;
if (done) {
RAFT_LOG_DEBUG("Threshold triggered after %d iterations. Terminating early.", n_iter[0]);
break;
}
}
auto centroids = raft::make_device_matrix_view<DataT, IndexT>(
centroidsRawData.data_handle(), n_clusters, n_features);
detail::minClusterAndDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance.view(),
l2normx_view,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// TODO: add different templates for InType of binaryOp to avoid thrust transform
thrust::transform(raft::resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
weight.data_handle(),
minClusterAndDistance.data_handle(),
[=] __device__(const raft::KeyValuePair<IndexT, DataT> kvp, DataT wt) {
raft::KeyValuePair<IndexT, DataT> res;
res.value = kvp.value * wt;
res.key = kvp.key;
return res;
});
// calculate cluster cost phi_x(C)
detail::computeClusterCost(handle,
minClusterAndDistance.view(),
workspace,
raft::make_device_scalar_view(clusterCostD.data()),
raft::value_op{},
raft::add_op{});
inertia[0] = clusterCostD.value(stream);
RAFT_LOG_DEBUG("KMeans.fit: completed after %d iterations with %f inertia[0] ",
n_iter[0] > params.max_iter ? n_iter[0] - 1 : n_iter[0],
inertia[0]);
}
/*
* @brief Selects 'n_clusters' samples from X using scalable kmeans++ algorithm.
* @note This is the algorithm described in
* "Scalable K-Means++", 2012, Bahman Bahmani, Benjamin Moseley,
* Andrea Vattani, Ravi Kumar, Sergei Vassilvitskii,
* https://arxiv.org/abs/1203.6402
* Scalable kmeans++ pseudocode
* 1: C = sample a point uniformly at random from X
* 2: psi = phi_X (C)
* 3: for O( log(psi) ) times do
* 4: C' = sample each point x in X independently with probability
* p_x = l * (d^2(x, C) / phi_X (C) )
* 5: C = C U C'
* 6: end for
* 7: For x in C, set w_x to be the number of points in X closer to x than any
* other point in C
* 8: Recluster the weighted points in C into k clusters
* TODO: Resizing is needed to use mdarray instead of rmm::device_uvector
*/
template <typename DataT, typename IndexT>
void initScalableKMeansPlusPlus(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
rmm::device_uvector<char>& workspace)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope(
"initScalableKMeansPlusPlus");
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
auto metric = params.metric;
raft::random::RngState rng(params.rng_state.seed, params.rng_state.type);
// <<<< Step-1 >>> : C <- sample a point uniformly at random from X
std::mt19937 gen(params.rng_state.seed);
std::uniform_int_distribution<> dis(0, n_samples - 1);
auto cIdx = dis(gen);
auto initialCentroid = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + cIdx * n_features, 1, n_features);
// flag the sample that is chosen as initial centroid
std::vector<uint8_t> h_isSampleCentroid(n_samples);
std::fill(h_isSampleCentroid.begin(), h_isSampleCentroid.end(), 0);
h_isSampleCentroid[cIdx] = 1;
// device buffer to flag the sample that is chosen as initial centroid
auto isSampleCentroid = raft::make_device_vector<uint8_t, IndexT>(handle, n_samples);
raft::copy(
isSampleCentroid.data_handle(), h_isSampleCentroid.data(), isSampleCentroid.size(), stream);
rmm::device_uvector<DataT> centroidsBuf(initialCentroid.size(), stream);
// reset buffer to store the chosen centroid
raft::copy(centroidsBuf.data(), initialCentroid.data_handle(), initialCentroid.size(), stream);
auto potentialCentroids = raft::make_device_matrix_view<DataT, IndexT>(
centroidsBuf.data(), initialCentroid.extent(0), initialCentroid.extent(1));
// <<< End of Step-1 >>>
// temporary buffer to store L2 norm of centroids or distance matrix,
// destructor releases the resource
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
// L2 norm of X: ||x||^2
auto L2NormX = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
if (metric == cuvs::distance::DistanceType::L2Expanded ||
metric == cuvs::distance::DistanceType::L2SqrtExpanded) {
raft::linalg::rowNorm(L2NormX.data_handle(),
X.data_handle(),
X.extent(1),
X.extent(0),
raft::linalg::L2Norm,
true,
stream);
}
auto minClusterDistanceVec = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
auto uniformRands = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
rmm::device_scalar<DataT> clusterCost(stream);
// <<< Step-2 >>>: psi <- phi_X (C)
detail::minClusterDistanceCompute<DataT, IndexT>(handle,
X,
potentialCentroids,
minClusterDistanceVec.view(),
L2NormX.view(),
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// compute partial cluster cost from the samples in rank
detail::computeClusterCost(handle,
minClusterDistanceVec.view(),
workspace,
raft::make_device_scalar_view(clusterCost.data()),
raft::identity_op{},
raft::add_op{});
auto psi = clusterCost.value(stream);
// <<< End of Step-2 >>>
// Scalable kmeans++ paper claims 8 rounds is sufficient
resource::sync_stream(handle, stream);
int niter = std::min(8, (int)ceil(log(psi)));
RAFT_LOG_DEBUG("KMeans||: psi = %g, log(psi) = %g, niter = %d ", psi, log(psi), niter);
// <<<< Step-3 >>> : for O( log(psi) ) times do
for (int iter = 0; iter < niter; ++iter) {
RAFT_LOG_DEBUG("KMeans|| - Iteration %d: # potential centroids sampled - %d",
iter,
potentialCentroids.extent(0));
detail::minClusterDistanceCompute<DataT, IndexT>(handle,
X,
potentialCentroids,
minClusterDistanceVec.view(),
L2NormX.view(),
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
detail::computeClusterCost(handle,
minClusterDistanceVec.view(),
workspace,
raft::make_device_scalar_view<DataT>(clusterCost.data()),
raft::identity_op{},
raft::add_op{});
psi = clusterCost.value(stream);
// <<<< Step-4 >>> : Sample each point x in X independently and identify new
// potentialCentroids
raft::random::uniform(
handle, rng, uniformRands.data_handle(), uniformRands.extent(0), (DataT)0, (DataT)1);
detail::SamplingOp<DataT, IndexT> select_op(psi,
params.oversampling_factor,
n_clusters,
uniformRands.data_handle(),
isSampleCentroid.data_handle());
rmm::device_uvector<DataT> CpRaw(0, stream);
detail::sampleCentroids<DataT, IndexT>(handle,
X,
minClusterDistanceVec.view(),
isSampleCentroid.view(),
select_op,
CpRaw,
workspace);
auto Cp = raft::make_device_matrix_view<DataT, IndexT>(
CpRaw.data(), CpRaw.size() / n_features, n_features);
/// <<<< End of Step-4 >>>>
/// <<<< Step-5 >>> : C = C U C'
// append the data in Cp to the buffer holding the potentialCentroids
centroidsBuf.resize(centroidsBuf.size() + Cp.size(), stream);
raft::copy(
centroidsBuf.data() + centroidsBuf.size() - Cp.size(), Cp.data_handle(), Cp.size(), stream);
IndexT tot_centroids = potentialCentroids.extent(0) + Cp.extent(0);
potentialCentroids =
raft::make_device_matrix_view<DataT, IndexT>(centroidsBuf.data(), tot_centroids, n_features);
/// <<<< End of Step-5 >>>
} /// <<<< Step-6 >>>
RAFT_LOG_DEBUG("KMeans||: total # potential centroids sampled - %d",
potentialCentroids.extent(0));
if ((int)potentialCentroids.extent(0) > n_clusters) {
// <<< Step-7 >>>: For x in C, set w_x to be the number of pts closest to X
// temporary buffer to store the sample count per cluster, destructor
// releases the resource
auto weight = raft::make_device_vector<DataT, IndexT>(handle, potentialCentroids.extent(0));
detail::countSamplesInCluster<DataT, IndexT>(
handle, params, X, L2NormX.view(), potentialCentroids, workspace, weight.view());
// <<< end of Step-7 >>>
// Step-8: Recluster the weighted points in C into k clusters
detail::kmeansPlusPlus<DataT, IndexT>(
handle, params, potentialCentroids, centroidsRawData, workspace);
auto inertia = make_host_scalar<DataT>(0);
auto n_iter = make_host_scalar<IndexT>(0);
KMeansParams default_params;
default_params.n_clusters = params.n_clusters;
detail::kmeans_fit_main<DataT, IndexT>(handle,
default_params,
potentialCentroids,
weight.view(),
centroidsRawData,
inertia.view(),
n_iter.view(),
workspace);
} else if ((int)potentialCentroids.extent(0) < n_clusters) {
// supplement with random
auto n_random_clusters = n_clusters - potentialCentroids.extent(0);
RAFT_LOG_DEBUG(
"[Warning!] KMeans||: found fewer than %d centroids during "
"initialization (found %d centroids, remaining %d centroids will be "
"chosen randomly from input samples)",
n_clusters,
potentialCentroids.extent(0),
n_random_clusters);
// generate `n_random_clusters` centroids
KMeansParams rand_params;
rand_params.init = KMeansParams::InitMethod::Random;
rand_params.n_clusters = n_random_clusters;
initRandom<DataT, IndexT>(handle, rand_params, X, centroidsRawData);
// copy centroids generated during kmeans|| iteration to the buffer
raft::copy(centroidsRawData.data_handle() + n_random_clusters * n_features,
potentialCentroids.data_handle(),
potentialCentroids.size(),
stream);
} else {
// found the required n_clusters
raft::copy(centroidsRawData.data_handle(),
potentialCentroids.data_handle(),
potentialCentroids.size(),
stream);
}
}
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. It must be noted
* that the data must be in row-major format and stored in device accessible
* location.
* @param[in] n_samples Number of samples in the input X.
* @param[in] n_features Number of features or the dimensions of each
* sample.
* @param[in] sample_weight Optional weights for each observation in X.
* @param[inout] centroids [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers
* [out] Otherwise, generated centroids from the
* kmeans algorithm is stored at the address pointed by 'centroids'.
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT>
void kmeans_fit(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope("kmeans_fit");
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
cudaStream_t stream = resource::get_cuda_stream(handle);
// Check that parameters are valid
if (sample_weight.has_value())
RAFT_EXPECTS(sample_weight.value().extent(0) == n_samples,
"invalid parameter (sample_weight!=n_samples)");
RAFT_EXPECTS(n_clusters > 0, "invalid parameter (n_clusters<=0)");
RAFT_EXPECTS(params.tol > 0, "invalid parameter (tol<=0)");
RAFT_EXPECTS(params.oversampling_factor >= 0, "invalid parameter (oversampling_factor<0)");
RAFT_EXPECTS((int)centroids.extent(0) == params.n_clusters,
"invalid parameter (centroids.extent(0) != n_clusters)");
RAFT_EXPECTS(centroids.extent(1) == n_features,
"invalid parameter (centroids.extent(1) != n_features)");
// Display a message if the batch size is smaller than n_samples but will be ignored
if (params.batch_samples < (int)n_samples &&
(params.metric == cuvs::distance::DistanceType::L2Expanded ||
params.metric == cuvs::distance::DistanceType::L2SqrtExpanded)) {
RAFT_LOG_DEBUG(
"batch_samples=%d was passed, but batch_samples=%d will be used (reason: "
"batch_samples has no impact on the memory footprint when FusedL2NN can be used)",
params.batch_samples,
(int)n_samples);
}
// Display a message if batch_centroids is set and a fusedL2NN-compatible metric is used
if (params.batch_centroids != 0 && params.batch_centroids != params.n_clusters &&
(params.metric == cuvs::distance::DistanceType::L2Expanded ||
params.metric == cuvs::distance::DistanceType::L2SqrtExpanded)) {
RAFT_LOG_DEBUG(
"batch_centroids=%d was passed, but batch_centroids=%d will be used (reason: "
"batch_centroids has no impact on the memory footprint when FusedL2NN can be used)",
params.batch_centroids,
params.n_clusters);
}
logger::get(RAFT_NAME).set_level(params.verbosity);
// Allocate memory
rmm::device_uvector<char> workspace(0, stream);
auto weight = raft::make_device_vector<DataT>(handle, n_samples);
if (sample_weight.has_value())
raft::copy(weight.data_handle(), sample_weight.value().data_handle(), n_samples, stream);
else
thrust::fill(raft::resource::get_thrust_policy(handle),
weight.data_handle(),
weight.data_handle() + weight.size(),
1);
// check if weights sum up to n_samples
checkWeight<DataT>(handle, weight.view(), workspace);
auto centroidsRawData = raft::make_device_matrix<DataT, IndexT>(handle, n_clusters, n_features);
auto n_init = params.n_init;
if (params.init == KMeansParams::InitMethod::Array && n_init != 1) {
RAFT_LOG_DEBUG(
"Explicit initial center position passed: performing only one init in "
"k-means instead of n_init=%d",
n_init);
n_init = 1;
}
std::mt19937 gen(params.rng_state.seed);
inertia[0] = std::numeric_limits<DataT>::max();
for (auto seed_iter = 0; seed_iter < n_init; ++seed_iter) {
KMeansParams iter_params = params;
iter_params.rng_state.seed = gen();
DataT iter_inertia = std::numeric_limits<DataT>::max();
IndexT n_current_iter = 0;
if (iter_params.init == KMeansParams::InitMethod::Random) {
// initializing with random samples from input dataset
RAFT_LOG_DEBUG(
"KMeans.fit (Iteration-%d/%d): initialize cluster centers by "
"randomly choosing from the "
"input data.",
seed_iter + 1,
n_init);
initRandom<DataT, IndexT>(handle, iter_params, X, centroidsRawData.view());
} else if (iter_params.init == KMeansParams::InitMethod::KMeansPlusPlus) {
// default method to initialize is kmeans++
RAFT_LOG_DEBUG(
"KMeans.fit (Iteration-%d/%d): initialize cluster centers using "
"k-means++ algorithm.",
seed_iter + 1,
n_init);
if (iter_params.oversampling_factor == 0)
detail::kmeansPlusPlus<DataT, IndexT>(
handle, iter_params, X, centroidsRawData.view(), workspace);
else
detail::initScalableKMeansPlusPlus<DataT, IndexT>(
handle, iter_params, X, centroidsRawData.view(), workspace);
} else if (iter_params.init == KMeansParams::InitMethod::Array) {
RAFT_LOG_DEBUG(
"KMeans.fit (Iteration-%d/%d): initialize cluster centers from "
"the ndarray array input "
"passed to init argument.",
seed_iter + 1,
n_init);
raft::copy(
centroidsRawData.data_handle(), centroids.data_handle(), n_clusters * n_features, stream);
} else {
THROW("unknown initialization method to select initial centers");
}
detail::kmeans_fit_main<DataT, IndexT>(handle,
iter_params,
X,
weight.view(),
centroidsRawData.view(),
raft::make_host_scalar_view<DataT>(&iter_inertia),
raft::make_host_scalar_view<IndexT>(&n_current_iter),
workspace);
if (iter_inertia < inertia[0]) {
inertia[0] = iter_inertia;
n_iter[0] = n_current_iter;
raft::copy(
centroids.data_handle(), centroidsRawData.data_handle(), n_clusters * n_features, stream);
}
RAFT_LOG_DEBUG("KMeans.fit after iteration-%d/%d: inertia - %f, n_iter[0] - %d",
seed_iter + 1,
n_init,
inertia[0],
n_iter[0]);
}
RAFT_LOG_DEBUG("KMeans.fit: async call returned (fit could still be running on the device)");
}
template <typename DataT, typename IndexT = int>
void kmeans_fit(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT& inertia,
IndexT& n_iter)
{
auto XView = raft::make_device_matrix_view<const DataT, IndexT>(X, n_samples, n_features);
auto centroidsView =
raft::make_device_matrix_view<DataT, IndexT>(centroids, params.n_clusters, n_features);
std::optional<raft::device_vector_view<const DataT>> sample_weightView = std::nullopt;
if (sample_weight)
sample_weightView =
raft::make_device_vector_view<const DataT, IndexT>(sample_weight, n_samples);
auto inertiaView = raft::make_host_scalar_view(&inertia);
auto n_iterView = raft::make_host_scalar_view(&n_iter);
detail::kmeans_fit<DataT, IndexT>(
handle, params, XView, sample_weightView, centroidsView, inertiaView, n_iterView);
}
template <typename DataT, typename IndexT>
void kmeans_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
bool normalize_weight,
raft::host_scalar_view<DataT> inertia)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope("kmeans_predict");
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
cudaStream_t stream = resource::get_cuda_stream(handle);
// Check that parameters are valid
if (sample_weight.has_value())
RAFT_EXPECTS(sample_weight.value().extent(0) == n_samples,
"invalid parameter (sample_weight!=n_samples)");
RAFT_EXPECTS(params.n_clusters > 0, "invalid parameter (n_clusters<=0)");
RAFT_EXPECTS(params.tol > 0, "invalid parameter (tol<=0)");
RAFT_EXPECTS(params.oversampling_factor >= 0, "invalid parameter (oversampling_factor<0)");
RAFT_EXPECTS((int)centroids.extent(0) == params.n_clusters,
"invalid parameter (centroids.extent(0) != n_clusters)");
RAFT_EXPECTS(centroids.extent(1) == n_features,
"invalid parameter (centroids.extent(1) != n_features)");
logger::get(RAFT_NAME).set_level(params.verbosity);
auto metric = params.metric;
// Allocate memory
// Device-accessible allocation of expandable storage used as temporary buffers
rmm::device_uvector<char> workspace(0, stream);
auto weight = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
if (sample_weight.has_value())
raft::copy(weight.data_handle(), sample_weight.value().data_handle(), n_samples, stream);
else
thrust::fill(raft::resource::get_thrust_policy(handle),
weight.data_handle(),
weight.data_handle() + weight.size(),
1);
// check if weights sum up to n_samples
if (normalize_weight) checkWeight(handle, weight.view(), workspace);
auto minClusterAndDistance =
raft::make_device_vector<raft::KeyValuePair<IndexT, DataT>, IndexT>(handle, n_samples);
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
// L2 norm of X: ||x||^2
auto L2NormX = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
if (metric == cuvs::distance::DistanceType::L2Expanded ||
metric == cuvs::distance::DistanceType::L2SqrtExpanded) {
raft::linalg::rowNorm(L2NormX.data_handle(),
X.data_handle(),
X.extent(1),
X.extent(0),
raft::linalg::L2Norm,
true,
stream);
}
// computes minClusterAndDistance[0:n_samples) where minClusterAndDistance[i]
// is a <key, value> pair where
// 'key' is index to a sample in 'centroids' (index of the nearest
// centroid) and 'value' is the distance between the sample 'X[i]' and the
// 'centroid[key]'
auto l2normx_view =
raft::make_device_vector_view<const DataT, IndexT>(L2NormX.data_handle(), n_samples);
detail::minClusterAndDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance.view(),
l2normx_view,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// calculate cluster cost phi_x(C)
rmm::device_scalar<DataT> clusterCostD(stream);
// TODO: add different templates for InType of binaryOp to avoid thrust transform
thrust::transform(raft::resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
weight.data_handle(),
minClusterAndDistance.data_handle(),
[=] __device__(const raft::KeyValuePair<IndexT, DataT> kvp, DataT wt) {
raft::KeyValuePair<IndexT, DataT> res;
res.value = kvp.value * wt;
res.key = kvp.key;
return res;
});
detail::computeClusterCost(handle,
minClusterAndDistance.view(),
workspace,
raft::make_device_scalar_view(clusterCostD.data()),
raft::value_op{},
raft::add_op{});
thrust::transform(raft::resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
labels.data_handle(),
raft::key_op{});
inertia[0] = clusterCostD.value(stream);
}
template <typename DataT, typename IndexT = int>
void kmeans_predict(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
IndexT* labels,
bool normalize_weight,
DataT& inertia)
{
auto XView = raft::make_device_matrix_view<const DataT, IndexT>(X, n_samples, n_features);
auto centroidsView =
raft::make_device_matrix_view<const DataT, IndexT>(centroids, params.n_clusters, n_features);
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weightView{std::nullopt};
if (sample_weight)
sample_weightView.emplace(
raft::make_device_vector_view<const DataT, IndexT>(sample_weight, n_samples));
auto labelsView = raft::make_device_vector_view<IndexT, IndexT>(labels, n_samples);
auto inertiaView = raft::make_host_scalar_view(&inertia);
detail::kmeans_predict<DataT, IndexT>(handle,
params,
XView,
sample_weightView,
centroidsView,
labelsView,
normalize_weight,
inertiaView);
}
template <typename DataT, typename IndexT = int>
void kmeans_fit_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
std::optional<raft::device_matrix_view<DataT, IndexT>> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope("kmeans_fit_predict");
if (!centroids.has_value()) {
auto n_features = X.extent(1);
auto centroids_matrix =
raft::make_device_matrix<DataT, IndexT>(handle, params.n_clusters, n_features);
detail::kmeans_fit<DataT, IndexT>(
handle, params, X, sample_weight, centroids_matrix.view(), inertia, n_iter);
detail::kmeans_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids_matrix.view(), labels, true, inertia);
} else {
detail::kmeans_fit<DataT, IndexT>(
handle, params, X, sample_weight, centroids.value(), inertia, n_iter);
detail::kmeans_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids.value(), labels, true, inertia);
}
}
template <typename DataT, typename IndexT = int>
void kmeans_fit_predict(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
DataT* centroids,
IndexT n_samples,
IndexT n_features,
IndexT* labels,
DataT& inertia,
IndexT& n_iter)
{
auto XView = raft::make_device_matrix_view<const DataT, IndexT>(X, n_samples, n_features);
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weightView{std::nullopt};
if (sample_weight)
sample_weightView.emplace(
raft::make_device_vector_view<const DataT, IndexT>(sample_weight, n_samples));
std::optional<raft::device_matrix_view<DataT, IndexT>> centroidsView{std::nullopt};
if (centroids)
centroidsView.emplace(
raft::make_device_matrix_view<DataT, IndexT>(centroids, params.n_clusters, n_features));
auto labelsView = raft::make_device_vector_view<IndexT, IndexT>(labels, n_samples);
auto inertiaView = raft::make_host_scalar_view(&inertia);
auto n_iterView = raft::make_host_scalar_view(&n_iter);
detail::kmeans_fit_predict<DataT, IndexT>(
handle, params, XView, sample_weightView, centroidsView, labelsView, inertiaView, n_iterView);
}
/**
* @brief Transform X to a cluster-distance space.
*
* @param[in] handle The handle to the cuML library context that
* manages the CUDA resources.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format
* @param[in] centroids Cluster centroids. The data must be in row-major format.
* @param[out] X_new X transformed in the new space..
*/
template <typename DataT, typename IndexT = int>
void kmeans_transform(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT> X,
raft::device_matrix_view<const DataT> centroids,
raft::device_matrix_view<DataT> X_new)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope("kmeans_transform");
logger::get(RAFT_NAME).set_level(params.verbosity);
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
auto metric = params.metric;
// Device-accessible allocation of expandable storage used as temporary buffers
rmm::device_uvector<char> workspace(0, stream);
auto dataBatchSize = getDataBatchSize(params.batch_samples, n_samples);
// tile over the input data and calculate distance matrix [n_samples x
// n_clusters]
for (IndexT dIdx = 0; dIdx < (IndexT)n_samples; dIdx += dataBatchSize) {
// # of samples for the current batch
auto ns = std::min(static_cast<IndexT>(dataBatchSize), static_cast<IndexT>(n_samples - dIdx));
// datasetView [ns x n_features] - view representing the current batch of
// input dataset
auto datasetView = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + n_features * dIdx, ns, n_features);
// pairwiseDistanceView [ns x n_clusters]
auto pairwiseDistanceView = raft::make_device_matrix_view<DataT, IndexT>(
X_new.data_handle() + n_clusters * dIdx, ns, n_clusters);
// calculate pairwise distance between cluster centroids and current batch
// of input dataset
pairwise_distance_kmeans<DataT, IndexT>(
handle, datasetView, centroids, pairwiseDistanceView, workspace, metric);
}
}
template <typename DataT, typename IndexT = int>
void kmeans_transform(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT* X_new)
{
auto XView = raft::make_device_matrix_view<const DataT, IndexT>(X, n_samples, n_features);
auto centroidsView =
raft::make_device_matrix_view<const DataT, IndexT>(centroids, params.n_clusters, n_features);
auto X_newView = raft::make_device_matrix_view<DataT, IndexT>(X_new, n_samples, n_features);
detail::kmeans_transform<DataT, IndexT>(handle, params, XView, centroidsView, X_newView);
}
} // namespace detail
} // namespace cluster
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/detail/single_linkage.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <cuvs/cluster/detail/agglomerative.cuh>
#include <cuvs/cluster/detail/connectivities.cuh>
#include <cuvs/cluster/detail/mst.cuh>
#include <cuvs/cluster/single_linkage_types.hpp>
namespace cuvs::cluster::detail {
static const size_t EMPTY = 0;
/**
* Single-linkage clustering, capable of constructing a KNN graph to
* scale the algorithm beyond the n^2 memory consumption of implementations
* that use the fully-connected graph of pairwise distances by connecting
* a knn graph when k is not large enough to connect it.
* @tparam value_idx
* @tparam value_t
* @tparam dist_type method to use for constructing connectivities graph
* @param[in] handle raft handle
* @param[in] X dense input matrix in row-major layout
* @param[in] m number of rows in X
* @param[in] n number of columns in X
* @param[in] metric distance metrix to use when constructing connectivities graph
* @param[out] out struct containing output dendrogram and cluster assignments
* @param[in] c a constant used when constructing connectivities from knn graph. Allows the indirect
control
* of k. The algorithm will set `k = log(n) + c`
* @param[in] n_clusters number of clusters to assign data samples
*/
template <typename value_idx, typename value_t, LinkageDistance dist_type>
void single_linkage(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
cuvs::distance::DistanceType metric,
linkage_output<value_idx>* out,
int c,
size_t n_clusters)
{
ASSERT(n_clusters <= m, "n_clusters must be less than or equal to the number of data points");
auto stream = resource::get_cuda_stream(handle);
rmm::device_uvector<value_idx> indptr(EMPTY, stream);
rmm::device_uvector<value_idx> indices(EMPTY, stream);
rmm::device_uvector<value_t> pw_dists(EMPTY, stream);
/**
* 1. Construct distance graph
*/
detail::get_distance_graph<value_idx, value_t, dist_type>(
handle, X, m, n, metric, indptr, indices, pw_dists, c);
rmm::device_uvector<value_idx> mst_rows(m - 1, stream);
rmm::device_uvector<value_idx> mst_cols(m - 1, stream);
rmm::device_uvector<value_t> mst_data(m - 1, stream);
/**
* 2. Construct MST, sorted by weights
*/
rmm::device_uvector<value_idx> color(m, stream);
raft::sparse::neighbors::FixConnectivitiesRedOp<value_idx, value_t> op(m);
detail::build_sorted_mst<value_idx, value_t>(handle,
X,
indptr.data(),
indices.data(),
pw_dists.data(),
m,
n,
mst_rows.data(),
mst_cols.data(),
mst_data.data(),
color.data(),
indices.size(),
op,
metric);
pw_dists.release();
/**
* Perform hierarchical labeling
*/
size_t n_edges = mst_rows.size();
rmm::device_uvector<value_t> out_delta(n_edges, stream);
rmm::device_uvector<value_idx> out_size(n_edges, stream);
// Create dendrogram
detail::build_dendrogram_host<value_idx, value_t>(handle,
mst_rows.data(),
mst_cols.data(),
mst_data.data(),
n_edges,
out->children,
out_delta.data(),
out_size.data());
detail::extract_flattened_clusters(handle, out->labels, out->children, n_clusters, m);
out->m = m;
out->n_clusters = n_clusters;
out->n_leaves = m;
out->n_connected_components = 1;
}
}; // namespace cuvs::cluster::detail | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/detail/kmeans_common.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <ctime>
#include <optional>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <random>
#include <cub/cub.cuh>
#include <cuda.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <cuvs/cluster/kmeans_types.hpp>
#include <cuvs/distance/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/distance/fused_l2_nn.cuh>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/kvp.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/mdarray.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resources.hpp>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/random/permute.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace cuvs {
namespace cluster {
namespace detail {
template <typename DataT, typename IndexT>
struct SamplingOp {
DataT* rnd;
uint8_t* flag;
DataT cluster_cost;
double oversampling_factor;
IndexT n_clusters;
CUB_RUNTIME_FUNCTION __forceinline__
SamplingOp(DataT c, double l, IndexT k, DataT* rand, uint8_t* ptr)
: cluster_cost(c), oversampling_factor(l), n_clusters(k), rnd(rand), flag(ptr)
{
}
__host__ __device__ __forceinline__ bool operator()(
const raft::KeyValuePair<ptrdiff_t, DataT>& a) const
{
DataT prob_threshold = (DataT)rnd[a.key];
DataT prob_x = ((oversampling_factor * n_clusters * a.value) / cluster_cost);
return !flag[a.key] && (prob_x > prob_threshold);
}
};
template <typename IndexT, typename DataT>
struct KeyValueIndexOp {
__host__ __device__ __forceinline__ IndexT
operator()(const raft::KeyValuePair<IndexT, DataT>& a) const
{
return a.key;
}
};
// Computes the intensity histogram from a sequence of labels
template <typename SampleIteratorT, typename CounterT, typename IndexT>
void countLabels(raft::resources const& handle,
SampleIteratorT labels,
CounterT* count,
IndexT n_samples,
IndexT n_clusters,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
// CUB::DeviceHistogram requires a signed index type
typedef typename std::make_signed_t<IndexT> CubIndexT;
CubIndexT num_levels = n_clusters + 1;
CubIndexT lower_level = 0;
CubIndexT upper_level = n_clusters;
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
labels,
count,
num_levels,
lower_level,
upper_level,
static_cast<CubIndexT>(n_samples),
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(),
temp_storage_bytes,
labels,
count,
num_levels,
lower_level,
upper_level,
static_cast<CubIndexT>(n_samples),
stream));
}
template <typename DataT, typename IndexT>
void checkWeight(raft::resources const& handle,
raft::device_vector_view<DataT, IndexT> weight,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto wt_aggr = raft::make_device_scalar<DataT>(handle, 0);
auto n_samples = weight.extent(0);
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceReduce::Sum(
nullptr, temp_storage_bytes, weight.data_handle(), wt_aggr.data_handle(), n_samples, stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceReduce::Sum(workspace.data(),
temp_storage_bytes,
weight.data_handle(),
wt_aggr.data_handle(),
n_samples,
stream));
DataT wt_sum = 0;
raft::copy(&wt_sum, wt_aggr.data_handle(), 1, stream);
resource::sync_stream(handle, stream);
if (wt_sum != n_samples) {
RAFT_LOG_DEBUG(
"[Warning!] KMeans: normalizing the user provided sample weight to "
"sum up to %d samples",
n_samples);
auto scale = static_cast<DataT>(n_samples) / wt_sum;
raft::linalg::unaryOp(weight.data_handle(),
weight.data_handle(),
n_samples,
raft::mul_const_op<DataT>{scale},
stream);
}
}
template <typename IndexT>
IndexT getDataBatchSize(int batch_samples, IndexT n_samples)
{
auto minVal = std::min(static_cast<IndexT>(batch_samples), n_samples);
return (minVal == 0) ? n_samples : minVal;
}
template <typename IndexT>
IndexT getCentroidsBatchSize(int batch_centroids, IndexT n_local_clusters)
{
auto minVal = std::min(static_cast<IndexT>(batch_centroids), n_local_clusters);
return (minVal == 0) ? n_local_clusters : minVal;
}
template <typename InputT,
typename OutputT,
typename MainOpT,
typename ReductionOpT,
typename IndexT = int>
void computeClusterCost(raft::resources const& handle,
raft::device_vector_view<InputT, IndexT> minClusterDistance,
rmm::device_uvector<char>& workspace,
raft::device_scalar_view<OutputT> clusterCost,
MainOpT main_op,
ReductionOpT reduction_op)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
cub::TransformInputIterator<OutputT, MainOpT, InputT*> itr(minClusterDistance.data_handle(),
main_op);
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceReduce::Reduce(nullptr,
temp_storage_bytes,
itr,
clusterCost.data_handle(),
minClusterDistance.size(),
reduction_op,
OutputT(),
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceReduce::Reduce(workspace.data(),
temp_storage_bytes,
itr,
clusterCost.data_handle(),
minClusterDistance.size(),
reduction_op,
OutputT(),
stream));
}
template <typename DataT, typename IndexT>
void sampleCentroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<uint8_t, IndexT> isSampleCentroid,
SamplingOp<DataT, IndexT>& select_op,
rmm::device_uvector<DataT>& inRankCp,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_local_samples = X.extent(0);
auto n_features = X.extent(1);
auto nSelected = raft::make_device_scalar<IndexT>(handle, 0);
cub::ArgIndexInputIterator<DataT*> ip_itr(minClusterDistance.data_handle());
auto sampledMinClusterDistance =
raft::make_device_vector<raft::KeyValuePair<ptrdiff_t, DataT>, IndexT>(handle, n_local_samples);
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceSelect::If(nullptr,
temp_storage_bytes,
ip_itr,
sampledMinClusterDistance.data_handle(),
nSelected.data_handle(),
n_local_samples,
select_op,
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceSelect::If(workspace.data(),
temp_storage_bytes,
ip_itr,
sampledMinClusterDistance.data_handle(),
nSelected.data_handle(),
n_local_samples,
select_op,
stream));
IndexT nPtsSampledInRank = 0;
raft::copy(&nPtsSampledInRank, nSelected.data_handle(), 1, stream);
resource::sync_stream(handle, stream);
uint8_t* rawPtr_isSampleCentroid = isSampleCentroid.data_handle();
thrust::for_each_n(raft::resource::get_thrust_policy(handle),
sampledMinClusterDistance.data_handle(),
nPtsSampledInRank,
[=] __device__(raft::KeyValuePair<ptrdiff_t, DataT> val) {
rawPtr_isSampleCentroid[val.key] = 1;
});
inRankCp.resize(nPtsSampledInRank * n_features, stream);
raft::matrix::gather((DataT*)X.data_handle(),
X.extent(1),
X.extent(0),
sampledMinClusterDistance.data_handle(),
nPtsSampledInRank,
inRankCp.data(),
raft::key_op{},
stream);
}
// calculate pairwise distance between 'dataset[n x d]' and 'centroids[k x d]',
// result will be stored in 'pairwiseDistance[n x k]'
template <typename DataT, typename IndexT>
void pairwise_distance_kmeans(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_matrix_view<DataT, IndexT> pairwiseDistance,
rmm::device_uvector<char>& workspace,
cuvs::distance::DistanceType metric)
{
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = centroids.extent(0);
ASSERT(X.extent(1) == centroids.extent(1),
"# features in dataset and centroids are different (must be same)");
cuvs::distance::pairwise_distance(handle,
X.data_handle(),
centroids.data_handle(),
pairwiseDistance.data_handle(),
n_samples,
n_clusters,
n_features,
workspace,
metric);
}
// shuffle and randomly select 'n_samples_to_gather' from input 'in' and stores
// in 'out' does not modify the input
template <typename DataT, typename IndexT>
void shuffleAndGather(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> in,
raft::device_matrix_view<DataT, IndexT> out,
uint32_t n_samples_to_gather,
uint64_t seed)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = in.extent(0);
auto n_features = in.extent(1);
auto indices = raft::make_device_vector<IndexT, IndexT>(handle, n_samples);
// shuffle indices on device
raft::random::permute<DataT, IndexT, IndexT>(indices.data_handle(),
nullptr,
nullptr,
(IndexT)in.extent(1),
(IndexT)in.extent(0),
true,
stream);
raft::matrix::gather((DataT*)in.data_handle(),
in.extent(1),
in.extent(0),
indices.data_handle(),
static_cast<IndexT>(n_samples_to_gather),
out.data_handle(),
stream);
}
// Calculates a <key, value> pair for every sample in input 'X' where key is an
// index to an sample in 'centroids' (index of the nearest centroid) and 'value'
// is the distance between the sample and the 'centroid[key]'
template <typename DataT, typename IndexT>
void minClusterAndDistanceCompute(
raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<raft::KeyValuePair<IndexT, DataT>, IndexT> minClusterAndDistance,
raft::device_vector_view<const DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
cuvs::distance::DistanceType metric,
int batch_samples,
int batch_centroids,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = centroids.extent(0);
// todo(lsugy): change batch size computation when using fusedL2NN!
bool is_fused = metric == cuvs::distance::DistanceType::L2Expanded ||
metric == cuvs::distance::DistanceType::L2SqrtExpanded;
auto dataBatchSize = is_fused ? (IndexT)n_samples : getDataBatchSize(batch_samples, n_samples);
auto centroidsBatchSize = getCentroidsBatchSize(batch_centroids, n_clusters);
if (is_fused) {
L2NormBuf_OR_DistBuf.resize(n_clusters, stream);
raft::linalg::rowNorm(L2NormBuf_OR_DistBuf.data(),
centroids.data_handle(),
centroids.extent(1),
centroids.extent(0),
raft::linalg::L2Norm,
true,
stream);
} else {
// TODO: Unless pool allocator is used, passing in a workspace for this
// isn't really increasing performance because this needs to do a re-allocation
// anyways. ref https://github.com/rapidsai/raft/issues/930
L2NormBuf_OR_DistBuf.resize(dataBatchSize * centroidsBatchSize, stream);
}
// Note - pairwiseDistance and centroidsNorm share the same buffer
// centroidsNorm [n_clusters] - tensor wrapper around centroids L2 Norm
auto centroidsNorm =
raft::make_device_vector_view<DataT, IndexT>(L2NormBuf_OR_DistBuf.data(), n_clusters);
// pairwiseDistance[ns x nc] - tensor wrapper around the distance buffer
auto pairwiseDistance = raft::make_device_matrix_view<DataT, IndexT>(
L2NormBuf_OR_DistBuf.data(), dataBatchSize, centroidsBatchSize);
raft::KeyValuePair<IndexT, DataT> initial_value(0, std::numeric_limits<DataT>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
initial_value);
// tile over the input dataset
for (IndexT dIdx = 0; dIdx < n_samples; dIdx += dataBatchSize) {
// # of samples for the current batch
auto ns = std::min((IndexT)dataBatchSize, n_samples - dIdx);
// datasetView [ns x n_features] - view representing the current batch of
// input dataset
auto datasetView = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + (dIdx * n_features), ns, n_features);
// minClusterAndDistanceView [ns x n_clusters]
auto minClusterAndDistanceView =
raft::make_device_vector_view<raft::KeyValuePair<IndexT, DataT>, IndexT>(
minClusterAndDistance.data_handle() + dIdx, ns);
auto L2NormXView =
raft::make_device_vector_view<const DataT, IndexT>(L2NormX.data_handle() + dIdx, ns);
if (is_fused) {
workspace.resize((sizeof(int)) * ns, stream);
// todo(lsugy): remove cIdx
cuvs::distance::fusedL2NNMinReduce<DataT, raft::KeyValuePair<IndexT, DataT>, IndexT>(
minClusterAndDistanceView.data_handle(),
datasetView.data_handle(),
centroids.data_handle(),
L2NormXView.data_handle(),
centroidsNorm.data_handle(),
ns,
n_clusters,
n_features,
(void*)workspace.data(),
metric != cuvs::distance::DistanceType::L2Expanded,
false,
stream);
} else {
// tile over the centroids
for (IndexT cIdx = 0; cIdx < n_clusters; cIdx += centroidsBatchSize) {
// # of centroids for the current batch
auto nc = std::min((IndexT)centroidsBatchSize, n_clusters - cIdx);
// centroidsView [nc x n_features] - view representing the current batch
// of centroids
auto centroidsView = raft::make_device_matrix_view<const DataT, IndexT>(
centroids.data_handle() + (cIdx * n_features), nc, n_features);
// pairwiseDistanceView [ns x nc] - view representing the pairwise
// distance for current batch
auto pairwiseDistanceView =
raft::make_device_matrix_view<DataT, IndexT>(pairwiseDistance.data_handle(), ns, nc);
// calculate pairwise distance between current tile of cluster centroids
// and input dataset
pairwise_distance_kmeans<DataT, IndexT>(
handle, datasetView, centroidsView, pairwiseDistanceView, workspace, metric);
// argmin reduction returning <index, value> pair
// calculates the closest centroid and the distance to the closest
// centroid
raft::linalg::coalescedReduction(
minClusterAndDistanceView.data_handle(),
pairwiseDistanceView.data_handle(),
pairwiseDistanceView.extent(1),
pairwiseDistanceView.extent(0),
initial_value,
stream,
true,
[=] __device__(const DataT val, const IndexT i) {
raft::KeyValuePair<IndexT, DataT> pair;
pair.key = cIdx + i;
pair.value = val;
return pair;
},
raft::argmin_op{},
raft::identity_op{});
}
}
}
}
template <typename DataT, typename IndexT>
void minClusterDistanceCompute(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
cuvs::distance::DistanceType metric,
int batch_samples,
int batch_centroids,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = centroids.extent(0);
bool is_fused = metric == cuvs::distance::DistanceType::L2Expanded ||
metric == cuvs::distance::DistanceType::L2SqrtExpanded;
auto dataBatchSize = is_fused ? (IndexT)n_samples : getDataBatchSize(batch_samples, n_samples);
auto centroidsBatchSize = getCentroidsBatchSize(batch_centroids, n_clusters);
if (is_fused) {
L2NormBuf_OR_DistBuf.resize(n_clusters, stream);
raft::linalg::rowNorm(L2NormBuf_OR_DistBuf.data(),
centroids.data_handle(),
centroids.extent(1),
centroids.extent(0),
raft::linalg::L2Norm,
true,
stream);
} else {
L2NormBuf_OR_DistBuf.resize(dataBatchSize * centroidsBatchSize, stream);
}
// Note - pairwiseDistance and centroidsNorm share the same buffer
// centroidsNorm [n_clusters] - tensor wrapper around centroids L2 Norm
auto centroidsNorm =
raft::make_device_vector_view<DataT, IndexT>(L2NormBuf_OR_DistBuf.data(), n_clusters);
// pairwiseDistance[ns x nc] - tensor wrapper around the distance buffer
auto pairwiseDistance = raft::make_device_matrix_view<DataT, IndexT>(
L2NormBuf_OR_DistBuf.data(), dataBatchSize, centroidsBatchSize);
thrust::fill(raft::resource::get_thrust_policy(handle),
minClusterDistance.data_handle(),
minClusterDistance.data_handle() + minClusterDistance.size(),
std::numeric_limits<DataT>::max());
// tile over the input data and calculate distance matrix [n_samples x
// n_clusters]
for (IndexT dIdx = 0; dIdx < n_samples; dIdx += dataBatchSize) {
// # of samples for the current batch
auto ns = std::min((IndexT)dataBatchSize, n_samples - dIdx);
// datasetView [ns x n_features] - view representing the current batch of
// input dataset
auto datasetView = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + dIdx * n_features, ns, n_features);
// minClusterDistanceView [ns x n_clusters]
auto minClusterDistanceView =
raft::make_device_vector_view<DataT, IndexT>(minClusterDistance.data_handle() + dIdx, ns);
auto L2NormXView =
raft::make_device_vector_view<DataT, IndexT>(L2NormX.data_handle() + dIdx, ns);
if (is_fused) {
workspace.resize((sizeof(IndexT)) * ns, stream);
cuvs::distance::fusedL2NNMinReduce<DataT, DataT, IndexT>(
minClusterDistanceView.data_handle(),
datasetView.data_handle(),
centroids.data_handle(),
L2NormXView.data_handle(),
centroidsNorm.data_handle(),
ns,
n_clusters,
n_features,
(void*)workspace.data(),
metric != cuvs::distance::DistanceType::L2Expanded,
false,
stream);
} else {
// tile over the centroids
for (IndexT cIdx = 0; cIdx < n_clusters; cIdx += centroidsBatchSize) {
// # of centroids for the current batch
auto nc = std::min((IndexT)centroidsBatchSize, n_clusters - cIdx);
// centroidsView [nc x n_features] - view representing the current batch
// of centroids
auto centroidsView = raft::make_device_matrix_view<DataT, IndexT>(
centroids.data_handle() + cIdx * n_features, nc, n_features);
// pairwiseDistanceView [ns x nc] - view representing the pairwise
// distance for current batch
auto pairwiseDistanceView =
raft::make_device_matrix_view<DataT, IndexT>(pairwiseDistance.data_handle(), ns, nc);
// calculate pairwise distance between current tile of cluster centroids
// and input dataset
pairwise_distance_kmeans<DataT, IndexT>(
handle, datasetView, centroidsView, pairwiseDistanceView, workspace, metric);
raft::linalg::coalescedReduction(minClusterDistanceView.data_handle(),
pairwiseDistanceView.data_handle(),
pairwiseDistanceView.extent(1),
pairwiseDistanceView.extent(0),
std::numeric_limits<DataT>::max(),
stream,
true,
raft::identity_op{},
raft::min_op{},
raft::identity_op{});
}
}
}
}
template <typename DataT, typename IndexT>
void countSamplesInCluster(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const DataT, IndexT> L2NormX,
raft::device_matrix_view<DataT, IndexT> centroids,
rmm::device_uvector<char>& workspace,
raft::device_vector_view<DataT, IndexT> sampleCountInCluster)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = centroids.extent(0);
// stores (key, value) pair corresponding to each sample where
// - key is the index of nearest cluster
// - value is the distance to the nearest cluster
auto minClusterAndDistance =
raft::make_device_vector<raft::KeyValuePair<IndexT, DataT>, IndexT>(handle, n_samples);
// temporary buffer to store distance matrix, destructor releases the resource
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
// computes minClusterAndDistance[0:n_samples) where minClusterAndDistance[i]
// is a <key, value> pair where
// 'key' is index to an sample in 'centroids' (index of the nearest
// centroid) and 'value' is the distance between the sample 'X[i]' and the
// 'centroid[key]'
detail::minClusterAndDistanceCompute(handle,
X,
(raft::device_matrix_view<const DataT, IndexT>)centroids,
minClusterAndDistance.view(),
L2NormX,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// Using TransformInputIteratorT to dereference an array of raft::KeyValuePair
// and converting them to just return the Key to be used in reduce_rows_by_key
// prims
detail::KeyValueIndexOp<IndexT, DataT> conversion_op;
cub::TransformInputIterator<IndexT,
detail::KeyValueIndexOp<IndexT, DataT>,
raft::KeyValuePair<IndexT, DataT>*>
itr(minClusterAndDistance.data_handle(), conversion_op);
// count # of samples in each cluster
countLabels(handle,
itr,
sampleCountInCluster.data_handle(),
(IndexT)n_samples,
(IndexT)n_clusters,
workspace);
}
} // namespace detail
} // namespace cluster
} // namespace cuvs
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/detail/agglomerative.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <cstddef>
namespace cuvs::cluster::detail {
template <typename value_idx, typename value_t>
class UnionFind {
public:
value_idx next_label;
std::vector<value_idx> parent;
std::vector<value_idx> size;
value_idx n_indices;
UnionFind(value_idx N_)
: n_indices(2 * N_ - 1), parent(2 * N_ - 1, -1), size(2 * N_ - 1, 1), next_label(N_)
{
memset(size.data() + N_, 0, (size.size() - N_) * sizeof(value_idx));
}
value_idx find(value_idx n)
{
value_idx p;
p = n;
while (parent[n] != -1)
n = parent[n];
// path compression
while (parent[p] != n) {
p = parent[p == -1 ? n_indices - 1 : p];
parent[p == -1 ? n_indices - 1 : p] = n;
}
return n;
}
void perform_union(value_idx m, value_idx n)
{
size[next_label] = size[m] + size[n];
parent[m] = next_label;
parent[n] = next_label;
next_label += 1;
}
};
/**
* Agglomerative labeling on host. This has not been found to be a bottleneck
* in the algorithm. A parallel version of this can be done using a parallel
* variant of Kruskal's MST algorithm
* (ref http://cucis.ece.northwestern.edu/publications/pdf/HenPat12.pdf),
* which breaks apart the sorted MST results into overlapping subsets and
* independently runs Kruskal's algorithm on each subset, merging them back
* together into a single hierarchy when complete. Unfortunately,
* this is nontrivial and the speedup wouldn't be useful until this
* becomes a bottleneck.
*
* @tparam value_idx
* @tparam value_t
* @param[in] handle the raft handle
* @param[in] rows src edges of the sorted MST
* @param[in] cols dst edges of the sorted MST
* @param[in] nnz the number of edges in the sorted MST
* @param[out] out_src parents of output
* @param[out] out_dst children of output
* @param[out] out_delta distances of output
* @param[out] out_size cluster sizes of output
*/
template <typename value_idx, typename value_t>
void build_dendrogram_host(raft::resources const& handle,
const value_idx* rows,
const value_idx* cols,
const value_t* data,
size_t nnz,
value_idx* children,
value_t* out_delta,
value_idx* out_size)
{
auto stream = resource::get_cuda_stream(handle);
value_idx n_edges = nnz;
std::vector<value_idx> mst_src_h(n_edges);
std::vector<value_idx> mst_dst_h(n_edges);
std::vector<value_t> mst_weights_h(n_edges);
update_host(mst_src_h.data(), rows, n_edges, stream);
update_host(mst_dst_h.data(), cols, n_edges, stream);
update_host(mst_weights_h.data(), data, n_edges, stream);
resource::sync_stream(handle, stream);
std::vector<value_idx> children_h(n_edges * 2);
std::vector<value_idx> out_size_h(n_edges);
std::vector<value_t> out_delta_h(n_edges);
UnionFind<value_idx, value_t> U(nnz + 1);
for (std::size_t i = 0; i < nnz; i++) {
value_idx a = mst_src_h[i];
value_idx b = mst_dst_h[i];
value_t delta = mst_weights_h[i];
value_idx aa = U.find(a);
value_idx bb = U.find(b);
value_idx children_idx = i * 2;
children_h[children_idx] = aa;
children_h[children_idx + 1] = bb;
out_delta_h[i] = delta;
out_size_h[i] = U.size[aa] + U.size[bb];
U.perform_union(aa, bb);
}
raft::update_device(children, children_h.data(), n_edges * 2, stream);
raft::update_device(out_size, out_size_h.data(), n_edges, stream);
raft::update_device(out_delta, out_delta_h.data(), n_edges, stream);
}
template <typename value_idx>
RAFT_KERNEL write_levels_kernel(const value_idx* children, value_idx* parents, value_idx n_vertices)
{
value_idx tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_vertices) {
value_idx level = tid / 2;
value_idx child = children[tid];
parents[child] = level;
}
}
/**
* Instead of propagating a label from roots to children,
* the children each iterate up the tree until they find
* the label of their parent. This increases the potential
* parallelism.
* @tparam value_idx
* @param children
* @param parents
* @param n_leaves
* @param labels
*/
template <typename value_idx>
RAFT_KERNEL inherit_labels(const value_idx* children,
const value_idx* levels,
std::size_t n_leaves,
value_idx* labels,
int cut_level,
value_idx n_vertices)
{
value_idx tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_vertices) {
value_idx node = children[tid];
value_idx cur_level = tid / 2;
/**
* Any roots above the cut level should be ignored.
* Any leaves at the cut level should already be labeled
*/
if (cur_level > cut_level) return;
value_idx cur_parent = node;
value_idx label = labels[cur_parent];
while (label == -1) {
cur_parent = cur_level + n_leaves;
cur_level = levels[cur_parent];
label = labels[cur_parent];
}
labels[node] = label;
}
}
template <typename value_idx>
struct init_label_roots {
init_label_roots(value_idx* labels_) : labels(labels_) {}
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
labels[thrust::get<1>(t)] = thrust::get<0>(t);
}
private:
value_idx* labels;
};
/**
* Cuts the dendrogram at a particular level where the number of nodes
* is equal to n_clusters, then propagates the resulting labels
* to all the children.
*
* @tparam value_idx
* @param handle
* @param labels
* @param children
* @param n_clusters
* @param n_leaves
*/
template <typename value_idx, int tpb = 256>
void extract_flattened_clusters(raft::resources const& handle,
value_idx* labels,
const value_idx* children,
size_t n_clusters,
size_t n_leaves)
{
auto stream = resource::get_cuda_stream(handle);
auto thrust_policy = resource::get_thrust_policy(handle);
// Handle special case where n_clusters == 1
if (n_clusters == 1) {
thrust::fill(thrust_policy, labels, labels + n_leaves, 0);
} else {
/**
* Compute levels for each node
*
* 1. Initialize "levels" array of size n_leaves * 2
*
* 2. For each entry in children, write parent
* out for each of the children
*/
auto n_edges = (n_leaves - 1) * 2;
thrust::device_ptr<const value_idx> d_ptr = thrust::device_pointer_cast(children);
value_idx n_vertices = *(thrust::max_element(thrust_policy, d_ptr, d_ptr + n_edges)) + 1;
// Prevent potential infinite loop from labeling disconnected
// connectivities graph.
RAFT_EXPECTS(n_leaves > 0, "n_leaves must be positive");
RAFT_EXPECTS(
static_cast<std::size_t>(n_vertices) == static_cast<std::size_t>((n_leaves - 1) * 2),
"Multiple components found in MST or MST is invalid. "
"Cannot find single-linkage solution.");
rmm::device_uvector<value_idx> levels(n_vertices, stream);
value_idx n_blocks = ceildiv(n_vertices, (value_idx)tpb);
write_levels_kernel<<<n_blocks, tpb, 0, stream>>>(children, levels.data(), n_vertices);
/**
* Step 1: Find label roots:
*
* 1. Copying children[children.size()-(n_clusters-1):] entries to
* separate arrayo
* 2. sort array
* 3. take first n_clusters entries
*/
value_idx child_size = (n_clusters - 1) * 2;
rmm::device_uvector<value_idx> label_roots(child_size, stream);
value_idx children_cpy_start = n_edges - child_size;
raft::copy_async(label_roots.data(), children + children_cpy_start, child_size, stream);
thrust::sort(thrust_policy,
label_roots.data(),
label_roots.data() + (child_size),
thrust::greater<value_idx>());
rmm::device_uvector<value_idx> tmp_labels(n_vertices, stream);
// Init labels to -1
thrust::fill(thrust_policy, tmp_labels.data(), tmp_labels.data() + n_vertices, -1);
// Write labels for cluster roots to "labels"
thrust::counting_iterator<uint> first(0);
auto z_iter = thrust::make_zip_iterator(
thrust::make_tuple(first, label_roots.data() + (label_roots.size() - n_clusters)));
thrust::for_each(
thrust_policy, z_iter, z_iter + n_clusters, init_label_roots<value_idx>(tmp_labels.data()));
/**
* Step 2: Propagate labels by having children iterate through their parents
* 1. Initialize labels to -1
* 2. For each element in levels array, propagate until parent's
* label is !=-1
*/
value_idx cut_level = (n_edges / 2) - (n_clusters - 1);
inherit_labels<<<n_blocks, tpb, 0, stream>>>(
children, levels.data(), n_leaves, tmp_labels.data(), cut_level, n_vertices);
// copy tmp labels to actual labels
raft::copy_async(labels, tmp_labels.data(), n_leaves, stream);
}
}
}; // namespace cuvs::cluster::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/detail/kmeans_balanced.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <type_traits>
#include <cuvs/cluster/detail/kmeans_common.cuh>
#include <cuvs/cluster/kmeans_balanced_types.hpp>
#include <cuvs/distance/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/distance/fused_l2_nn.cuh>
#include <raft/common/nvtx.hpp>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/operators.hpp>
#include <raft/linalg/add.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/map.cuh>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/normalize.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/matrix/argmin.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/device_atomics.cuh>
#include <raft/util/integer_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/gather.h>
#include <thrust/transform.h>
#include <tuple>
namespace cuvs::cluster::detail {
constexpr static inline float kAdjustCentersWeight = 7.0f;
/**
* @brief Predict labels for the dataset; floating-point types only.
*
* NB: no minibatch splitting is done here, it may require large amount of temporary memory (n_rows
* * n_cluster * sizeof(MathT)).
*
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
*
* @param[in] handle The raft handle.
* @param[in] params Structure containing the hyper-parameters
* @param[in] centers Pointer to the row-major matrix of cluster centers [n_clusters, dim]
* @param[in] n_clusters Number of clusters/centers
* @param[in] dim Dimensionality of the data
* @param[in] dataset Pointer to the data [n_rows, dim]
* @param[in] dataset_norm Pointer to the precomputed norm (for L2 metrics only) [n_rows]
* @param[in] n_rows Number samples in the `dataset`
* @param[out] labels Output predictions [n_rows]
* @param[inout] mr (optional) Memory resource to use for temporary allocations
*/
template <typename MathT, typename IdxT, typename LabelT>
inline std::enable_if_t<std::is_floating_point_v<MathT>> predict_core(
const raft::resources& handle,
const kmeans_balanced_params& params,
const MathT* centers,
IdxT n_clusters,
IdxT dim,
const MathT* dataset,
const MathT* dataset_norm,
IdxT n_rows,
LabelT* labels,
rmm::mr::device_memory_resource* mr)
{
auto stream = resource::get_cuda_stream(handle);
switch (params.metric) {
case cuvs::distance::DistanceType::L2Expanded:
case cuvs::distance::DistanceType::L2SqrtExpanded: {
auto workspace = raft::make_device_mdarray<char, IdxT>(
handle, mr, make_extents<IdxT>((sizeof(int)) * n_rows));
auto minClusterAndDistance = raft::make_device_mdarray<raft::KeyValuePair<IdxT, MathT>, IdxT>(
handle, mr, make_extents<IdxT>(n_rows));
raft::KeyValuePair<IdxT, MathT> initial_value(0, std::numeric_limits<MathT>::max());
thrust::fill(raft::resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
initial_value);
auto centroidsNorm =
raft::make_device_mdarray<MathT, IdxT>(handle, mr, make_extents<IdxT>(n_clusters));
raft::linalg::rowNorm<MathT, IdxT>(
centroidsNorm.data_handle(), centers, dim, n_clusters, raft::linalg::L2Norm, true, stream);
cuvs::distance::fusedL2NNMinReduce<MathT, raft::KeyValuePair<IdxT, MathT>, IdxT>(
minClusterAndDistance.data_handle(),
dataset,
centers,
dataset_norm,
centroidsNorm.data_handle(),
n_rows,
n_clusters,
dim,
(void*)workspace.data_handle(),
(params.metric == cuvs::distance::DistanceType::L2Expanded) ? false : true,
false,
stream);
// todo(lsugy): use KVP + iterator in caller.
// Copy keys to output labels
thrust::transform(raft::resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + n_rows,
labels,
raft::compose_op<raft::cast_op<LabelT>, raft::key_op>());
break;
}
case cuvs::distance::DistanceType::InnerProduct: {
// TODO: pass buffer
rmm::device_uvector<MathT> distances(n_rows * n_clusters, stream, mr);
MathT alpha = -1.0;
MathT beta = 0.0;
linalg::gemm(handle,
true,
false,
n_clusters,
n_rows,
dim,
&alpha,
centers,
dim,
dataset,
dim,
&beta,
distances.data(),
n_clusters,
stream);
auto distances_const_view = raft::make_device_matrix_view<const MathT, IdxT, row_major>(
distances.data(), n_rows, n_clusters);
auto labels_view = raft::make_device_vector_view<LabelT, IdxT>(labels, n_rows);
raft::matrix::argmin(handle, distances_const_view, labels_view);
break;
}
default: {
RAFT_FAIL("The chosen distance metric is not supported (%d)", int(params.metric));
}
}
}
/**
* @brief Suggest a minibatch size for kmeans prediction.
*
* This function is used as a heuristic to split the work over a large dataset
* to reduce the size of temporary memory allocations.
*
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
*
* @param[in] n_clusters number of clusters in kmeans clustering
* @param[in] n_rows Number of samples in the dataset
* @param[in] dim Number of features in the dataset
* @param[in] metric Distance metric
* @param[in] needs_conversion Whether the data needs to be converted to MathT
* @return A suggested minibatch size and the expected memory cost per-row (in bytes)
*/
template <typename MathT, typename IdxT>
constexpr auto calc_minibatch_size(IdxT n_clusters,
IdxT n_rows,
IdxT dim,
cuvs::distance::DistanceType metric,
bool needs_conversion) -> std::tuple<IdxT, size_t>
{
n_clusters = std::max<IdxT>(1, n_clusters);
// Estimate memory needs per row (i.e element of the batch).
size_t mem_per_row = 0;
switch (metric) {
// fusedL2NN needs a mutex and a key-value pair for each row.
case distance::DistanceType::L2Expanded:
case distance::DistanceType::L2SqrtExpanded: {
mem_per_row += sizeof(int);
mem_per_row += sizeof(raft::KeyValuePair<IdxT, MathT>);
} break;
// Other metrics require storing a distance matrix.
default: {
mem_per_row += sizeof(MathT) * n_clusters;
}
}
// If we need to convert to MathT, space required for the converted batch.
if (!needs_conversion) { mem_per_row += sizeof(MathT) * dim; }
// Heuristic: calculate the minibatch size in order to use at most 1GB of memory.
IdxT minibatch_size = (1 << 30) / mem_per_row;
minibatch_size = 64 * div_rounding_up_safe(minibatch_size, IdxT{64});
minibatch_size = std::min<IdxT>(minibatch_size, n_rows);
return std::make_tuple(minibatch_size, mem_per_row);
}
/**
* @brief Given the data and labels, calculate cluster centers and sizes in one sweep.
*
* @note all pointers must be accessible on the device.
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam CounterT counter type supported by CUDA's native atomicAdd
* @tparam MappingOpT type of the mapping operation
*
* @param[in] handle The raft handle.
* @param[inout] centers Pointer to the output [n_clusters, dim]
* @param[inout] cluster_sizes Number of rows in each cluster [n_clusters]
* @param[in] n_clusters Number of clusters/centers
* @param[in] dim Dimensionality of the data
* @param[in] dataset Pointer to the data [n_rows, dim]
* @param[in] n_rows Number of samples in the `dataset`
* @param[in] labels Output predictions [n_rows]
* @param[in] reset_counters Whether to clear the output arrays before calculating.
* When set to `false`, this function may be used to update existing centers and sizes using
* the weighted average principle.
* @param[in] mapping_op Mapping operation from T to MathT
* @param[inout] mr (optional) Memory resource to use for temporary allocations on the device
*/
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
void calc_centers_and_sizes(const raft::resources& handle,
MathT* centers,
CounterT* cluster_sizes,
IdxT n_clusters,
IdxT dim,
const T* dataset,
IdxT n_rows,
const LabelT* labels,
bool reset_counters,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* mr = nullptr)
{
auto stream = resource::get_cuda_stream(handle);
if (mr == nullptr) { mr = resource::get_workspace_resource(handle); }
if (!reset_counters) {
raft::linalg::matrixVectorOp(
centers, centers, cluster_sizes, dim, n_clusters, true, false, raft::mul_op(), stream);
}
rmm::device_uvector<char> workspace(0, stream, mr);
// If we reset the counters, we can compute directly the new sizes in cluster_sizes.
// If we don't reset, we compute in a temporary buffer and add in a separate step.
rmm::device_uvector<CounterT> temp_cluster_sizes(0, stream, mr);
CounterT* temp_sizes = cluster_sizes;
if (!reset_counters) {
temp_cluster_sizes.resize(n_clusters, stream);
temp_sizes = temp_cluster_sizes.data();
}
// Apply mapping only when the data and math types are different.
if constexpr (std::is_same_v<T, MathT>) {
raft::linalg::reduce_rows_by_key(
dataset, dim, labels, nullptr, n_rows, dim, n_clusters, centers, stream, reset_counters);
} else {
// todo(lsugy): use iterator from KV output of fusedL2NN
cub::TransformInputIterator<MathT, MappingOpT, const T*> mapping_itr(dataset, mapping_op);
raft::linalg::reduce_rows_by_key(
mapping_itr, dim, labels, nullptr, n_rows, dim, n_clusters, centers, stream, reset_counters);
}
// Compute weight of each cluster
cuvs::cluster::detail::countLabels(handle, labels, temp_sizes, n_rows, n_clusters, workspace);
// Add previous sizes if necessary
if (!reset_counters) {
raft::linalg::add(cluster_sizes, cluster_sizes, temp_sizes, n_clusters, stream);
}
raft::linalg::matrixVectorOp(centers,
centers,
cluster_sizes,
dim,
n_clusters,
true,
false,
raft::div_checkzero_op(),
stream);
}
/** Computes the L2 norm of the dataset, converting to MathT if necessary */
template <typename T, typename MathT, typename IdxT, typename MappingOpT>
void compute_norm(const raft::resources& handle,
MathT* dataset_norm,
const T* dataset,
IdxT dim,
IdxT n_rows,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* mr = nullptr)
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope("compute_norm");
auto stream = resource::get_cuda_stream(handle);
if (mr == nullptr) { mr = resource::get_workspace_resource(handle); }
rmm::device_uvector<MathT> mapped_dataset(0, stream, mr);
const MathT* dataset_ptr = nullptr;
if (std::is_same_v<MathT, T>) {
dataset_ptr = reinterpret_cast<const MathT*>(dataset);
} else {
mapped_dataset.resize(n_rows * dim, stream);
linalg::unaryOp(mapped_dataset.data(), dataset, n_rows * dim, mapping_op, stream);
dataset_ptr = (const MathT*)mapped_dataset.data();
}
raft::linalg::rowNorm<MathT, IdxT>(
dataset_norm, dataset_ptr, dim, n_rows, raft::linalg::L2Norm, true, stream);
}
/**
* @brief Predict labels for the dataset.
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam MappingOpT type of the mapping operation
*
* @param[in] handle The raft handle
* @param[in] params Structure containing the hyper-parameters
* @param[in] centers Pointer to the row-major matrix of cluster centers [n_clusters, dim]
* @param[in] n_clusters Number of clusters/centers
* @param[in] dim Dimensionality of the data
* @param[in] dataset Pointer to the data [n_rows, dim]
* @param[in] n_rows Number samples in the `dataset`
* @param[out] labels Output predictions [n_rows]
* @param[in] mapping_op Mapping operation from T to MathT
* @param[inout] mr (optional) memory resource to use for temporary allocations
* @param[in] dataset_norm (optional) Pre-computed norms of each row in the dataset [n_rows]
*/
template <typename T, typename MathT, typename IdxT, typename LabelT, typename MappingOpT>
void predict(const raft::resources& handle,
const kmeans_balanced_params& params,
const MathT* centers,
IdxT n_clusters,
IdxT dim,
const T* dataset,
IdxT n_rows,
LabelT* labels,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* mr = nullptr,
const MathT* dataset_norm = nullptr)
{
auto stream = resource::get_cuda_stream(handle);
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope(
"predict(%zu, %u)", static_cast<size_t>(n_rows), n_clusters);
if (mr == nullptr) { mr = resource::get_workspace_resource(handle); }
auto [max_minibatch_size, _mem_per_row] =
calc_minibatch_size<MathT>(n_clusters, n_rows, dim, params.metric, std::is_same_v<T, MathT>);
rmm::device_uvector<MathT> cur_dataset(
std::is_same_v<T, MathT> ? 0 : max_minibatch_size * dim, stream, mr);
bool need_compute_norm =
dataset_norm == nullptr && (params.metric == cuvs::distance::DistanceType::L2Expanded ||
params.metric == cuvs::distance::DistanceType::L2SqrtExpanded);
rmm::device_uvector<MathT> cur_dataset_norm(
need_compute_norm ? max_minibatch_size : 0, stream, mr);
const MathT* dataset_norm_ptr = nullptr;
auto cur_dataset_ptr = cur_dataset.data();
for (IdxT offset = 0; offset < n_rows; offset += max_minibatch_size) {
IdxT minibatch_size = std::min<IdxT>(max_minibatch_size, n_rows - offset);
if constexpr (std::is_same_v<T, MathT>) {
cur_dataset_ptr = const_cast<MathT*>(dataset + offset * dim);
} else {
linalg::unaryOp(
cur_dataset_ptr, dataset + offset * dim, minibatch_size * dim, mapping_op, stream);
}
// Compute the norm now if it hasn't been pre-computed.
if (need_compute_norm) {
compute_norm(
handle, cur_dataset_norm.data(), cur_dataset_ptr, dim, minibatch_size, mapping_op, mr);
dataset_norm_ptr = cur_dataset_norm.data();
} else if (dataset_norm != nullptr) {
dataset_norm_ptr = dataset_norm + offset;
}
predict_core(handle,
params,
centers,
n_clusters,
dim,
cur_dataset_ptr,
dataset_norm_ptr,
minibatch_size,
labels + offset,
mr);
}
}
template <uint32_t BlockDimY,
typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
__launch_bounds__((WarpSize * BlockDimY)) RAFT_KERNEL
adjust_centers_kernel(MathT* centers, // [n_clusters, dim]
IdxT n_clusters,
IdxT dim,
const T* dataset, // [n_rows, dim]
IdxT n_rows,
const LabelT* labels, // [n_rows]
const CounterT* cluster_sizes, // [n_clusters]
MathT threshold,
IdxT average,
IdxT seed,
IdxT* count,
MappingOpT mapping_op)
{
IdxT l = threadIdx.y + BlockDimY * static_cast<IdxT>(blockIdx.y);
if (l >= n_clusters) return;
auto csize = static_cast<IdxT>(cluster_sizes[l]);
// skip big clusters
if (csize > static_cast<IdxT>(average * threshold)) return;
// choose a "random" i that belongs to a rather large cluster
IdxT i;
IdxT j = laneId();
if (j == 0) {
do {
auto old = atomicAdd(count, IdxT{1});
i = (seed * (old + 1)) % n_rows;
} while (static_cast<IdxT>(cluster_sizes[labels[i]]) < average);
}
i = raft::shfl(i, 0);
// Adjust the center of the selected smaller cluster to gravitate towards
// a sample from the selected larger cluster.
const IdxT li = static_cast<IdxT>(labels[i]);
// Weight of the current center for the weighted average.
// We dump it for anomalously small clusters, but keep constant otherwise.
const MathT wc = min(static_cast<MathT>(csize), static_cast<MathT>(kAdjustCentersWeight));
// Weight for the datapoint used to shift the center.
const MathT wd = 1.0;
for (; j < dim; j += raft::WarpSize) {
MathT val = 0;
val += wc * centers[j + dim * li];
val += wd * mapping_op(dataset[j + dim * i]);
val /= wc + wd;
centers[j + dim * l] = val;
}
}
/**
* @brief Adjust centers for clusters that have small number of entries.
*
* For each cluster, where the cluster size is not bigger than a threshold, the center is moved
* towards a data point that belongs to a large cluster.
*
* NB: if this function returns `true`, you should update the labels.
*
* NB: all pointers must be on the device side.
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam CounterT counter type supported by CUDA's native atomicAdd
* @tparam MappingOpT type of the mapping operation
*
* @param[inout] centers cluster centers [n_clusters, dim]
* @param[in] n_clusters number of rows in `centers`
* @param[in] dim number of columns in `centers` and `dataset`
* @param[in] dataset a host pointer to the row-major data matrix [n_rows, dim]
* @param[in] n_rows number of rows in `dataset`
* @param[in] labels a host pointer to the cluster indices [n_rows]
* @param[in] cluster_sizes number of rows in each cluster [n_clusters]
* @param[in] threshold defines a criterion for adjusting a cluster
* (cluster_sizes <= average_size * threshold)
* 0 <= threshold < 1
* @param[in] mapping_op Mapping operation from T to MathT
* @param[in] stream CUDA stream
* @param[inout] device_memory memory resource to use for temporary allocations
*
* @return whether any of the centers has been updated (and thus, `labels` need to be recalculated).
*/
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
auto adjust_centers(MathT* centers,
IdxT n_clusters,
IdxT dim,
const T* dataset,
IdxT n_rows,
const LabelT* labels,
const CounterT* cluster_sizes,
MathT threshold,
MappingOpT mapping_op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* device_memory) -> bool
{
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope(
"adjust_centers(%zu, %u)", static_cast<size_t>(n_rows), n_clusters);
if (n_clusters == 0) { return false; }
constexpr static std::array kPrimes{29, 71, 113, 173, 229, 281, 349, 409, 463, 541,
601, 659, 733, 809, 863, 941, 1013, 1069, 1151, 1223,
1291, 1373, 1451, 1511, 1583, 1657, 1733, 1811, 1889, 1987,
2053, 2129, 2213, 2287, 2357, 2423, 2531, 2617, 2687, 2741};
static IdxT i = 0;
static IdxT i_primes = 0;
bool adjusted = false;
IdxT average = n_rows / n_clusters;
IdxT ofst;
do {
i_primes = (i_primes + 1) % kPrimes.size();
ofst = kPrimes[i_primes];
} while (n_rows % ofst == 0);
constexpr uint32_t kBlockDimY = 4;
const dim3 block_dim(WarpSize, kBlockDimY, 1);
const dim3 grid_dim(1, raft::ceildiv(n_clusters, static_cast<IdxT>(kBlockDimY)), 1);
rmm::device_scalar<IdxT> update_count(0, stream, device_memory);
adjust_centers_kernel<kBlockDimY><<<grid_dim, block_dim, 0, stream>>>(centers,
n_clusters,
dim,
dataset,
n_rows,
labels,
cluster_sizes,
threshold,
average,
ofst,
update_count.data(),
mapping_op);
adjusted = update_count.value(stream) > 0; // NB: rmm scalar performs the sync
return adjusted;
}
/**
* @brief Expectation-maximization-balancing combined in an iterative process.
*
* Note, the `cluster_centers` is assumed to be already initialized here.
* Thus, this function can be used for fine-tuning existing clusters;
* to train from scratch, use `build_clusters` function below.
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam CounterT counter type supported by CUDA's native atomicAdd
* @tparam MappingOpT type of the mapping operation
*
* @param[in] handle The raft handle
* @param[in] params Structure containing the hyper-parameters
* @param[in] n_iters Requested number of iterations (can differ from params.n_iter!)
* @param[in] dim Dimensionality of the dataset
* @param[in] dataset Pointer to a managed row-major array [n_rows, dim]
* @param[in] dataset_norm Pointer to the precomputed norm (for L2 metrics only) [n_rows]
* @param[in] n_rows Number of rows in the dataset
* @param[in] n_cluster Requested number of clusters
* @param[inout] cluster_centers Pointer to a managed row-major array [n_clusters, dim]
* @param[out] cluster_labels Pointer to a managed row-major array [n_rows]
* @param[out] cluster_sizes Pointer to a managed row-major array [n_clusters]
* @param[in] balancing_pullback
* if the cluster centers are rebalanced on this number of iterations,
* one extra iteration is performed (this could happen several times) (default should be `2`).
* In other words, the first and then every `ballancing_pullback`-th rebalancing operation adds
* one more iteration to the main cycle.
* @param[in] balancing_threshold
* the rebalancing takes place if any cluster is smaller than `avg_size * balancing_threshold`
* on a given iteration (default should be `~ 0.25`).
* @param[in] mapping_op Mapping operation from T to MathT
* @param[inout] device_memory
* A memory resource for device allocations (makes sense to provide a memory pool here)
*/
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
void balancing_em_iters(const raft::resources& handle,
const kmeans_balanced_params& params,
uint32_t n_iters,
IdxT dim,
const T* dataset,
const MathT* dataset_norm,
IdxT n_rows,
IdxT n_clusters,
MathT* cluster_centers,
LabelT* cluster_labels,
CounterT* cluster_sizes,
uint32_t balancing_pullback,
MathT balancing_threshold,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* device_memory)
{
auto stream = resource::get_cuda_stream(handle);
uint32_t balancing_counter = balancing_pullback;
for (uint32_t iter = 0; iter < n_iters; iter++) {
// Balancing step - move the centers around to equalize cluster sizes
// (but not on the first iteration)
if (iter > 0 && adjust_centers(cluster_centers,
n_clusters,
dim,
dataset,
n_rows,
cluster_labels,
cluster_sizes,
balancing_threshold,
mapping_op,
stream,
device_memory)) {
if (balancing_counter++ >= balancing_pullback) {
balancing_counter -= balancing_pullback;
n_iters++;
}
}
switch (params.metric) {
// For some metrics, cluster calculation and adjustment tends to favor zero center vectors.
// To avoid converging to zero, we normalize the center vectors on every iteration.
case cuvs::distance::DistanceType::InnerProduct:
case cuvs::distance::DistanceType::CosineExpanded:
case cuvs::distance::DistanceType::CorrelationExpanded: {
auto clusters_in_view = raft::make_device_matrix_view<const MathT, IdxT, raft::row_major>(
cluster_centers, n_clusters, dim);
auto clusters_out_view = raft::make_device_matrix_view<MathT, IdxT, raft::row_major>(
cluster_centers, n_clusters, dim);
raft::linalg::row_normalize(
handle, clusters_in_view, clusters_out_view, raft::linalg::L2Norm);
break;
}
default: break;
}
// E: Expectation step - predict labels
predict(handle,
params,
cluster_centers,
n_clusters,
dim,
dataset,
n_rows,
cluster_labels,
mapping_op,
device_memory,
dataset_norm);
// M: Maximization step - calculate optimal cluster centers
calc_centers_and_sizes(handle,
cluster_centers,
cluster_sizes,
n_clusters,
dim,
dataset,
n_rows,
cluster_labels,
true,
mapping_op,
device_memory);
}
}
/** Randomly initialize cluster centers and then call `balancing_em_iters`. */
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
void build_clusters(const raft::resources& handle,
const kmeans_balanced_params& params,
IdxT dim,
const T* dataset,
IdxT n_rows,
IdxT n_clusters,
MathT* cluster_centers,
LabelT* cluster_labels,
CounterT* cluster_sizes,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* device_memory,
const MathT* dataset_norm = nullptr)
{
auto stream = resource::get_cuda_stream(handle);
// "randomly" initialize labels
auto labels_view = raft::make_device_vector_view<LabelT, IdxT>(cluster_labels, n_rows);
linalg::map_offset(
handle,
labels_view,
raft::compose_op(raft::cast_op<LabelT>(), raft::mod_const_op<IdxT>(n_clusters)));
// update centers to match the initialized labels.
calc_centers_and_sizes(handle,
cluster_centers,
cluster_sizes,
n_clusters,
dim,
dataset,
n_rows,
cluster_labels,
true,
mapping_op,
device_memory);
// run EM
balancing_em_iters(handle,
params,
params.n_iters,
dim,
dataset,
dataset_norm,
n_rows,
n_clusters,
cluster_centers,
cluster_labels,
cluster_sizes,
2,
MathT{0.25},
mapping_op,
device_memory);
}
/** Calculate how many fine clusters should belong to each mesocluster. */
template <typename IdxT, typename CounterT>
inline auto arrange_fine_clusters(IdxT n_clusters,
IdxT n_mesoclusters,
IdxT n_rows,
const CounterT* mesocluster_sizes)
{
std::vector<IdxT> fine_clusters_nums(n_mesoclusters);
std::vector<IdxT> fine_clusters_csum(n_mesoclusters + 1);
fine_clusters_csum[0] = 0;
IdxT n_lists_rem = n_clusters;
IdxT n_nonempty_ms_rem = 0;
for (IdxT i = 0; i < n_mesoclusters; i++) {
n_nonempty_ms_rem += mesocluster_sizes[i] > CounterT{0} ? 1 : 0;
}
IdxT n_rows_rem = n_rows;
CounterT mesocluster_size_sum = 0;
CounterT mesocluster_size_max = 0;
IdxT fine_clusters_nums_max = 0;
for (IdxT i = 0; i < n_mesoclusters; i++) {
if (i < n_mesoclusters - 1) {
// Although the algorithm is meant to produce balanced clusters, when something
// goes wrong, we may get empty clusters (e.g. during development/debugging).
// The code below ensures a proportional arrangement of fine cluster numbers
// per mesocluster, even if some clusters are empty.
if (mesocluster_sizes[i] == 0) {
fine_clusters_nums[i] = 0;
} else {
n_nonempty_ms_rem--;
auto s = static_cast<IdxT>(
static_cast<double>(n_lists_rem * mesocluster_sizes[i]) / n_rows_rem + .5);
s = std::min<IdxT>(s, n_lists_rem - n_nonempty_ms_rem);
fine_clusters_nums[i] = std::max(s, IdxT{1});
}
} else {
fine_clusters_nums[i] = n_lists_rem;
}
n_lists_rem -= fine_clusters_nums[i];
n_rows_rem -= mesocluster_sizes[i];
mesocluster_size_max = max(mesocluster_size_max, mesocluster_sizes[i]);
mesocluster_size_sum += mesocluster_sizes[i];
fine_clusters_nums_max = max(fine_clusters_nums_max, fine_clusters_nums[i]);
fine_clusters_csum[i + 1] = fine_clusters_csum[i] + fine_clusters_nums[i];
}
RAFT_EXPECTS(static_cast<IdxT>(mesocluster_size_sum) == n_rows,
"mesocluster sizes do not add up (%zu) to the total trainset size (%zu)",
static_cast<size_t>(mesocluster_size_sum),
static_cast<size_t>(n_rows));
RAFT_EXPECTS(fine_clusters_csum[n_mesoclusters] == n_clusters,
"fine cluster numbers do not add up (%zu) to the total number of clusters (%zu)",
static_cast<size_t>(fine_clusters_csum[n_mesoclusters]),
static_cast<size_t>(n_clusters));
return std::make_tuple(static_cast<IdxT>(mesocluster_size_max),
fine_clusters_nums_max,
std::move(fine_clusters_nums),
std::move(fine_clusters_csum));
}
/**
* Given the (coarse) mesoclusters and the distribution of fine clusters within them,
* build the fine clusters.
*
* Processing one mesocluster at a time:
* 1. Copy mesocluster data into a separate buffer
* 2. Predict fine cluster
* 3. Refince the fine cluster centers
*
* As a result, the fine clusters are what is returned by `build_hierarchical`;
* this function returns the total number of fine clusters, which can be checked to be
* the same as the requested number of clusters.
*
* Note: this function uses at most `fine_clusters_nums_max` points per mesocluster for training;
* if one of the clusters is larger than that (as given by `mesocluster_sizes`), the extra data
* is ignored and a warning is reported.
*/
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
auto build_fine_clusters(const raft::resources& handle,
const kmeans_balanced_params& params,
IdxT dim,
const T* dataset_mptr,
const MathT* dataset_norm_mptr,
const LabelT* labels_mptr,
IdxT n_rows,
const IdxT* fine_clusters_nums,
const IdxT* fine_clusters_csum,
const CounterT* mesocluster_sizes,
IdxT n_mesoclusters,
IdxT mesocluster_size_max,
IdxT fine_clusters_nums_max,
MathT* cluster_centers,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* managed_memory,
rmm::mr::device_memory_resource* device_memory) -> IdxT
{
auto stream = resource::get_cuda_stream(handle);
rmm::device_uvector<IdxT> mc_trainset_ids_buf(mesocluster_size_max, stream, managed_memory);
rmm::device_uvector<MathT> mc_trainset_buf(mesocluster_size_max * dim, stream, device_memory);
rmm::device_uvector<MathT> mc_trainset_norm_buf(mesocluster_size_max, stream, device_memory);
auto mc_trainset_ids = mc_trainset_ids_buf.data();
auto mc_trainset = mc_trainset_buf.data();
auto mc_trainset_norm = mc_trainset_norm_buf.data();
// label (cluster ID) of each vector
rmm::device_uvector<LabelT> mc_trainset_labels(mesocluster_size_max, stream, device_memory);
rmm::device_uvector<MathT> mc_trainset_ccenters(
fine_clusters_nums_max * dim, stream, device_memory);
// number of vectors in each cluster
rmm::device_uvector<CounterT> mc_trainset_csizes_tmp(
fine_clusters_nums_max, stream, device_memory);
// Training clusters in each meso-cluster
IdxT n_clusters_done = 0;
for (IdxT i = 0; i < n_mesoclusters; i++) {
IdxT k = 0;
for (IdxT j = 0; j < n_rows && k < mesocluster_size_max; j++) {
if (labels_mptr[j] == LabelT(i)) { mc_trainset_ids[k++] = j; }
}
if (k != static_cast<IdxT>(mesocluster_sizes[i]))
RAFT_LOG_WARN("Incorrect mesocluster size at %d. %zu vs %zu",
static_cast<int>(i),
static_cast<size_t>(k),
static_cast<size_t>(mesocluster_sizes[i]));
if (k == 0) {
RAFT_LOG_DEBUG("Empty cluster %d", i);
RAFT_EXPECTS(fine_clusters_nums[i] == 0,
"Number of fine clusters must be zero for the empty mesocluster (got %d)",
static_cast<int>(fine_clusters_nums[i]));
continue;
} else {
RAFT_EXPECTS(fine_clusters_nums[i] > 0,
"Number of fine clusters must be non-zero for a non-empty mesocluster");
}
cub::TransformInputIterator<MathT, MappingOpT, const T*> mapping_itr(dataset_mptr, mapping_op);
raft::matrix::gather(mapping_itr, dim, n_rows, mc_trainset_ids, k, mc_trainset, stream);
if (params.metric == cuvs::distance::DistanceType::L2Expanded ||
params.metric == cuvs::distance::DistanceType::L2SqrtExpanded) {
thrust::gather(raft::resource::get_thrust_policy(handle),
mc_trainset_ids,
mc_trainset_ids + k,
dataset_norm_mptr,
mc_trainset_norm);
}
build_clusters(handle,
params,
dim,
mc_trainset,
k,
fine_clusters_nums[i],
mc_trainset_ccenters.data(),
mc_trainset_labels.data(),
mc_trainset_csizes_tmp.data(),
mapping_op,
device_memory,
mc_trainset_norm);
raft::copy(cluster_centers + (dim * fine_clusters_csum[i]),
mc_trainset_ccenters.data(),
fine_clusters_nums[i] * dim,
stream);
resource::sync_stream(handle, stream);
n_clusters_done += fine_clusters_nums[i];
}
return n_clusters_done;
}
/**
* @brief Hierarchical balanced k-means
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam MappingOpT type of the mapping operation
*
* @param[in] handle The raft handle.
* @param[in] params Structure containing the hyper-parameters
* @param dim number of columns in `centers` and `dataset`
* @param[in] dataset a device pointer to the source dataset [n_rows, dim]
* @param n_rows number of rows in the input
* @param[out] cluster_centers a device pointer to the found cluster centers [n_cluster, dim]
* @param n_cluster
* @param metric the distance type
* @param mapping_op Mapping operation from T to MathT
* @param stream
*/
template <typename T, typename MathT, typename IdxT, typename MappingOpT>
void build_hierarchical(const raft::resources& handle,
const kmeans_balanced_params& params,
IdxT dim,
const T* dataset,
IdxT n_rows,
MathT* cluster_centers,
IdxT n_clusters,
MappingOpT mapping_op)
{
auto stream = resource::get_cuda_stream(handle);
using LabelT = uint32_t;
raft::common::nvtx::range<raft::common::nvtx::domain::raft> fun_scope(
"build_hierarchical(%zu, %u)", static_cast<size_t>(n_rows), n_clusters);
IdxT n_mesoclusters = std::min(n_clusters, static_cast<IdxT>(std::sqrt(n_clusters) + 0.5));
RAFT_LOG_DEBUG("build_hierarchical: n_mesoclusters: %u", n_mesoclusters);
rmm::mr::managed_memory_resource managed_memory;
rmm::mr::device_memory_resource* device_memory = resource::get_workspace_resource(handle);
auto [max_minibatch_size, mem_per_row] =
calc_minibatch_size<MathT>(n_clusters, n_rows, dim, params.metric, std::is_same_v<T, MathT>);
auto pool_guard =
raft::get_pool_memory_resource(device_memory, mem_per_row * size_t(max_minibatch_size));
if (pool_guard) {
RAFT_LOG_DEBUG("build_hierarchical: using pool memory resource with initial size %zu bytes",
mem_per_row * size_t(max_minibatch_size));
}
// Precompute the L2 norm of the dataset if relevant.
const MathT* dataset_norm = nullptr;
rmm::device_uvector<MathT> dataset_norm_buf(0, stream, device_memory);
if (params.metric == cuvs::distance::DistanceType::L2Expanded ||
params.metric == cuvs::distance::DistanceType::L2SqrtExpanded) {
dataset_norm_buf.resize(n_rows, stream);
for (IdxT offset = 0; offset < n_rows; offset += max_minibatch_size) {
IdxT minibatch_size = std::min<IdxT>(max_minibatch_size, n_rows - offset);
compute_norm(handle,
dataset_norm_buf.data() + offset,
dataset + dim * offset,
dim,
minibatch_size,
mapping_op,
device_memory);
}
dataset_norm = (const MathT*)dataset_norm_buf.data();
}
/* Temporary workaround to cub::DeviceHistogram not supporting any type that isn't natively
* supported by atomicAdd: find a supported CounterT based on the IdxT. */
typedef typename std::conditional_t<sizeof(IdxT) == 8, unsigned long long int, unsigned int>
CounterT;
// build coarse clusters (mesoclusters)
rmm::device_uvector<LabelT> mesocluster_labels_buf(n_rows, stream, &managed_memory);
rmm::device_uvector<CounterT> mesocluster_sizes_buf(n_mesoclusters, stream, &managed_memory);
{
rmm::device_uvector<MathT> mesocluster_centers_buf(n_mesoclusters * dim, stream, device_memory);
build_clusters(handle,
params,
dim,
dataset,
n_rows,
n_mesoclusters,
mesocluster_centers_buf.data(),
mesocluster_labels_buf.data(),
mesocluster_sizes_buf.data(),
mapping_op,
device_memory,
dataset_norm);
}
auto mesocluster_sizes = mesocluster_sizes_buf.data();
auto mesocluster_labels = mesocluster_labels_buf.data();
resource::sync_stream(handle, stream);
// build fine clusters
auto [mesocluster_size_max, fine_clusters_nums_max, fine_clusters_nums, fine_clusters_csum] =
arrange_fine_clusters(n_clusters, n_mesoclusters, n_rows, mesocluster_sizes);
const IdxT mesocluster_size_max_balanced = div_rounding_up_safe<size_t>(
2lu * size_t(n_rows), std::max<size_t>(size_t(n_mesoclusters), 1lu));
if (mesocluster_size_max > mesocluster_size_max_balanced) {
RAFT_LOG_WARN(
"build_hierarchical: built unbalanced mesoclusters (max_mesocluster_size == %u > %u). "
"At most %u points will be used for training within each mesocluster. "
"Consider increasing the number of training iterations `n_iters`.",
mesocluster_size_max,
mesocluster_size_max_balanced,
mesocluster_size_max_balanced);
RAFT_LOG_TRACE_VEC(mesocluster_sizes, n_mesoclusters);
RAFT_LOG_TRACE_VEC(fine_clusters_nums.data(), n_mesoclusters);
mesocluster_size_max = mesocluster_size_max_balanced;
}
auto n_clusters_done = build_fine_clusters(handle,
params,
dim,
dataset,
dataset_norm,
mesocluster_labels,
n_rows,
fine_clusters_nums.data(),
fine_clusters_csum.data(),
mesocluster_sizes,
n_mesoclusters,
mesocluster_size_max,
fine_clusters_nums_max,
cluster_centers,
mapping_op,
&managed_memory,
device_memory);
RAFT_EXPECTS(n_clusters_done == n_clusters, "Didn't process all clusters.");
rmm::device_uvector<CounterT> cluster_sizes(n_clusters, stream, device_memory);
rmm::device_uvector<LabelT> labels(n_rows, stream, device_memory);
// Fine-tuning k-means for all clusters
//
// (*) Since the likely cluster centroids have been calculated hierarchically already, the number
// of iterations for fine-tuning kmeans for whole clusters should be reduced. However, there is a
// possibility that the clusters could be unbalanced here, in which case the actual number of
// iterations would be increased.
//
balancing_em_iters(handle,
params,
std::max<uint32_t>(params.n_iters / 10, 2),
dim,
dataset,
dataset_norm,
n_rows,
n_clusters,
cluster_centers,
labels.data(),
cluster_sizes.data(),
5,
MathT{0.2},
mapping_op,
device_memory);
}
} // namespace cuvs::cluster::detail
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster | rapidsai_public_repos/cuvs/cpp/include/cuvs/cluster/detail/kmeans_auto_find_k.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <thrust/host_vector.h>
#include <raft/core/logger.hpp>
#include <cuvs/cluster/detail/kmeans.cuh>
#include <raft/core/error.hpp>
#include <cuvs/stats/dispersion.cuh>
#include <raft/core/resources.hpp>
namespace cuvs::cluster::detail {
template <typename value_t, typename idx_t>
void compute_dispersion(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t> X,
KMeansParams& params,
raft::device_matrix_view<value_t, idx_t> centroids_view,
raft::device_vector_view<idx_t> labels,
raft::device_vector_view<idx_t> clusterSizes,
rmm::device_uvector<char>& workspace,
raft::host_vector_view<value_t> clusterDispertionView,
raft::host_vector_view<value_t> resultsView,
raft::host_scalar_view<value_t> residual,
raft::host_scalar_view<idx_t> n_iter,
int val,
idx_t n,
idx_t d)
{
auto centroids_const_view =
raft::make_device_matrix_view<const value_t, idx_t>(centroids_view.data_handle(), val, d);
idx_t* clusterSizes_ptr = clusterSizes.data_handle();
auto cluster_sizes_view =
raft::make_device_vector_view<const idx_t, idx_t>(clusterSizes_ptr, val);
params.n_clusters = val;
cuvs::cluster::detail::kmeans_fit_predict<value_t, idx_t>(
handle, params, X, std::nullopt, std::make_optional(centroids_view), labels, residual, n_iter);
detail::countLabels(handle, labels.data_handle(), clusterSizes.data_handle(), n, val, workspace);
resultsView[val] = residual[0];
clusterDispertionView[val] = raft::stats::cluster_dispersion(
handle, centroids_const_view, cluster_sizes_view, std::nullopt, n);
}
template <typename idx_t, typename value_t>
void find_k(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t> X,
raft::host_scalar_view<idx_t> best_k,
raft::host_scalar_view<value_t> residual,
raft::host_scalar_view<idx_t> n_iter,
idx_t kmax,
idx_t kmin = 1,
idx_t maxiter = 100,
value_t tol = 1e-2)
{
idx_t n = X.extent(0);
idx_t d = X.extent(1);
RAFT_EXPECTS(n >= 1, "n must be >= 1");
RAFT_EXPECTS(d >= 1, "d must be >= 1");
RAFT_EXPECTS(kmin >= 1, "kmin must be >= 1");
RAFT_EXPECTS(kmax <= n, "kmax must be <= number of data samples in X");
RAFT_EXPECTS(tol >= 0, "tolerance must be >= 0");
RAFT_EXPECTS(maxiter >= 0, "maxiter must be >= 0");
// Allocate memory
// Device memory
auto centroids = raft::make_device_matrix<value_t, idx_t>(handle, kmax, X.extent(1));
auto clusterSizes = raft::make_device_vector<idx_t>(handle, kmax);
auto labels = raft::make_device_vector<idx_t>(handle, n);
rmm::device_uvector<char> workspace(0, resource::get_cuda_stream(handle));
idx_t* clusterSizes_ptr = clusterSizes.data_handle();
// Host memory
auto results = raft::make_host_vector<value_t>(kmax + 1);
auto clusterDispersion = raft::make_host_vector<value_t>(kmax + 1);
auto clusterDispertionView = clusterDispersion.view();
auto resultsView = results.view();
// Loop to find *best* k
// Perform k-means in binary search
int left = kmin; // must be at least 2
int right = kmax; // int(floor(len(data)/2)) #assumption of clusters of size 2 at least
int mid = ((unsigned int)left + (unsigned int)right) >> 1;
int oldmid = mid;
int tests = 0;
double objective[3]; // 0= left of mid, 1= right of mid
if (left == 1) left = 2; // at least do 2 clusters
KMeansParams params;
params.max_iter = maxiter;
params.tol = tol;
auto centroids_view =
raft::make_device_matrix_view<value_t, idx_t>(centroids.data_handle(), left, d);
compute_dispersion<value_t, idx_t>(handle,
X,
params,
centroids_view,
labels.view(),
clusterSizes.view(),
workspace,
clusterDispertionView,
resultsView,
residual,
n_iter,
left,
n,
d);
// eval right edge0
resultsView[right] = 1e20;
while (resultsView[right] > resultsView[left] && tests < 3) {
centroids_view =
raft::make_device_matrix_view<value_t, idx_t>(centroids.data_handle(), right, d);
compute_dispersion<value_t, idx_t>(handle,
X,
params,
centroids_view,
labels.view(),
clusterSizes.view(),
workspace,
clusterDispertionView,
resultsView,
residual,
n_iter,
right,
n,
d);
tests += 1;
}
objective[0] = (n - left) / (left - 1) * clusterDispertionView[left] / resultsView[left];
objective[1] = (n - right) / (right - 1) * clusterDispertionView[right] / resultsView[right];
while (left < right - 1) {
resultsView[mid] = 1e20;
tests = 0;
while (resultsView[mid] > resultsView[left] && tests < 3) {
centroids_view =
raft::make_device_matrix_view<value_t, idx_t>(centroids.data_handle(), mid, d);
compute_dispersion<value_t, idx_t>(handle,
X,
params,
centroids_view,
labels.view(),
clusterSizes.view(),
workspace,
clusterDispertionView,
resultsView,
residual,
n_iter,
mid,
n,
d);
if (resultsView[mid] > resultsView[left] && (mid + 1) < right) {
mid += 1;
resultsView[mid] = 1e20;
} else if (resultsView[mid] > resultsView[left] && (mid - 1) > left) {
mid -= 1;
resultsView[mid] = 1e20;
}
tests += 1;
}
// maximize Calinski-Harabasz Index, minimize resid/ cluster
objective[0] = (n - left) / (left - 1) * clusterDispertionView[left] / resultsView[left];
objective[1] = (n - right) / (right - 1) * clusterDispertionView[right] / resultsView[right];
objective[2] = (n - mid) / (mid - 1) * clusterDispertionView[mid] / resultsView[mid];
objective[0] = (objective[2] - objective[0]) / (mid - left);
objective[1] = (objective[1] - objective[2]) / (right - mid);
if (objective[0] > 0 && objective[1] < 0) {
// our point is in the left-of-mid side
right = mid;
} else {
left = mid;
}
oldmid = mid;
mid = ((unsigned int)right + (unsigned int)left) >> 1;
}
best_k[0] = right;
objective[0] = (n - left) / (left - 1) * clusterDispertionView[left] / resultsView[left];
objective[1] = (n - oldmid) / (oldmid - 1) * clusterDispertionView[oldmid] / resultsView[oldmid];
if (objective[1] < objective[0]) { best_k[0] = left; }
// if best_k isn't what we just ran, re-run to get correct centroids and dist data on return->
// this saves memory
if (best_k[0] != oldmid) {
auto centroids_view =
raft::make_device_matrix_view<value_t, idx_t>(centroids.data_handle(), best_k[0], d);
params.n_clusters = best_k[0];
cuvs::cluster::detail::kmeans_fit_predict<value_t, idx_t>(handle,
params,
X,
std::nullopt,
std::make_optional(centroids_view),
labels.view(),
residual,
n_iter);
}
}
} // namespace cuvs::cluster::detail | 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/distance/pairwise_distance.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/distance/distance_types.hpp>
namespace raft::runtime::distance {
/**
* @defgroup pairwise_distance_runtime Pairwise Distances Runtime API
* @{
*/
void pairwise_distance(raft::resources const& handle,
float* x,
float* y,
float* dists,
int m,
int n,
int k,
cuvs::distance::DistanceType metric,
bool isRowMajor,
float metric_arg);
void pairwise_distance(raft::resources const& handle,
double* x,
double* y,
double* dists,
int m,
int n,
int k,
cuvs::distance::DistanceType metric,
bool isRowMajor,
float metric_arg);
/** @} */ // end group pairwise_distance_runtime
} // namespace raft::runtime::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/distance/fused_l2_nn.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/resources.hpp>
namespace raft::runtime::distance {
/**
* @defgroup fused_l2_nn_min_arg_runtime Fused L2 1NN Runtime API
* @{
*/
/**
* @brief Wrapper around fusedL2NN with minimum reduction operators.
*
* fusedL2NN cannot be compiled in the distance library due to the lambda
* operators, so this wrapper covers the most common case (minimum).
*
* @param[in] handle raft handle
* @param[out] min will contain the reduced output (Length = `m`)
* (on device)
* @param[in] x first matrix. Row major. Dim = `m x k`.
* (on device).
* @param[in] y second matrix. Row major. Dim = `n x k`.
* (on device).
* @param[in] m gemm m
* @param[in] n gemm n
* @param[in] k gemm k
* @param[in] sqrt Whether the output `minDist` should contain L2-sqrt
*/
void fused_l2_nn_min_arg(raft::resources const& handle,
int* min,
const float* x,
const float* y,
int m,
int n,
int k,
bool sqrt);
void fused_l2_nn_min_arg(raft::resources const& handle,
int* min,
const double* x,
const double* y,
int m,
int n,
int k,
bool sqrt);
/** @} */ // end group fused_l2_nn_min_arg_runtime
} // end namespace raft::runtime::distance
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/neighbors/ivf_pq.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/neighbors/ivf_pq_types.hpp>
namespace raft::runtime::neighbors::ivf_pq {
// We define overloads for build and extend with void return type. This is used in the Cython
// wrappers, where exception handling is not compatible with return type that has nontrivial
// constructor.
#define RAFT_DECL_BUILD_EXTEND(T, IdxT) \
[[nodiscard]] cuvs::neighbors::ivf_pq::index<IdxT> build( \
raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset); \
\
void build(raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset, \
cuvs::neighbors::ivf_pq::index<IdxT>* idx); \
\
[[nodiscard]] cuvs::neighbors::ivf_pq::index<IdxT> extend( \
raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx); \
\
void extend(raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
cuvs::neighbors::ivf_pq::index<IdxT>* idx);
RAFT_DECL_BUILD_EXTEND(float, int64_t);
RAFT_DECL_BUILD_EXTEND(int8_t, int64_t);
RAFT_DECL_BUILD_EXTEND(uint8_t, int64_t);
#undef RAFT_DECL_BUILD_EXTEND
#define RAFT_DECL_SEARCH(T, IdxT) \
void search(raft::resources const& handle, \
const cuvs::neighbors::ivf_pq::search_params& params, \
const cuvs::neighbors::ivf_pq::index<IdxT>& idx, \
raft::device_matrix_view<const T, IdxT, row_major> queries, \
raft::device_matrix_view<IdxT, IdxT, row_major> neighbors, \
raft::device_matrix_view<float, IdxT, row_major> distances);
RAFT_DECL_SEARCH(float, int64_t);
RAFT_DECL_SEARCH(int8_t, int64_t);
RAFT_DECL_SEARCH(uint8_t, int64_t);
#undef RAFT_DECL_SEARCH
/**
* Save the index to file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @param[in] handle the raft handle
* @param[in] filename the filename for saving the index
* @param[in] index IVF-PQ index
*
*/
void serialize(raft::resources const& handle,
const std::string& filename,
const cuvs::neighbors::ivf_pq::index<int64_t>& index);
/**
* Load index from file.
*
* Experimental, both the API and the serialization format are subject to change.
*
* @param[in] handle the raft handle
* @param[in] filename the name of the file that stores the index
* @param[in] index IVF-PQ index
*
*/
void deserialize(raft::resources const& handle,
const std::string& filename,
cuvs::neighbors::ivf_pq::index<int64_t>* index);
} // namespace raft::runtime::neighbors::ivf_pq
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/neighbors/ivf_flat.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/neighbors/ivf_flat_types.hpp>
#include <string>
namespace raft::runtime::neighbors::ivf_flat {
// We define overloads for build and extend with void return type. This is used in the Cython
// wrappers, where exception handling is not compatible with return type that has nontrivial
// constructor.
#define RAFT_INST_BUILD_EXTEND(T, IdxT) \
auto build(raft::resources const& handle, \
const cuvs::neighbors::ivf_flat::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset) \
->cuvs::neighbors::ivf_flat::index<T, IdxT>; \
\
auto extend(raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
const cuvs::neighbors::ivf_flat::index<T, IdxT>& orig_index) \
->cuvs::neighbors::ivf_flat::index<T, IdxT>; \
\
void build(raft::resources const& handle, \
const cuvs::neighbors::ivf_flat::index_params& params, \
raft::device_matrix_view<const T, IdxT, row_major> dataset, \
cuvs::neighbors::ivf_flat::index<T, IdxT>& idx); \
\
void extend(raft::resources const& handle, \
raft::device_matrix_view<const T, IdxT, row_major> new_vectors, \
std::optional<raft::device_vector_view<const IdxT, IdxT>> new_indices, \
cuvs::neighbors::ivf_flat::index<T, IdxT>* idx); \
\
void serialize_file(raft::resources const& handle, \
const std::string& filename, \
const cuvs::neighbors::ivf_flat::index<T, IdxT>& index); \
\
void deserialize_file(raft::resources const& handle, \
const std::string& filename, \
cuvs::neighbors::ivf_flat::index<T, IdxT>* index); \
void serialize(raft::resources const& handle, \
std::string& str, \
const cuvs::neighbors::ivf_flat::index<T, IdxT>& index); \
void deserialize(raft::resources const& handle, \
const std::string& str, \
cuvs::neighbors::ivf_flat::index<T, IdxT>*);
RAFT_INST_BUILD_EXTEND(float, int64_t)
RAFT_INST_BUILD_EXTEND(int8_t, int64_t)
RAFT_INST_BUILD_EXTEND(uint8_t, int64_t)
#undef RAFT_INST_BUILD_EXTEND
#define RAFT_INST_SEARCH(T, IdxT) \
void search(raft::resources const&, \
cuvs::neighbors::ivf_flat::search_params const&, \
cuvs::neighbors::ivf_flat::index<T, IdxT> const&, \
raft::device_matrix_view<const T, IdxT, row_major>, \
raft::device_matrix_view<IdxT, IdxT, row_major>, \
raft::device_matrix_view<float, IdxT, row_major>);
RAFT_INST_SEARCH(float, int64_t);
RAFT_INST_SEARCH(int8_t, int64_t);
RAFT_INST_SEARCH(uint8_t, int64_t);
#undef RAFT_INST_SEARCH
} // namespace raft::runtime::neighbors::ivf_flat
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/neighbors/refine.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
// #include <raft/core/host_mdspan.hpp>
namespace raft::runtime::neighbors {
#define RAFT_INST_REFINE(IDX_T, DATA_T) \
void refine(raft::resources const& handle, \
raft::device_matrix_view<const DATA_T, int64_t, row_major> dataset, \
raft::device_matrix_view<const DATA_T, int64_t, row_major> queries, \
raft::device_matrix_view<const IDX_T, int64_t, row_major> neighbor_candidates, \
raft::device_matrix_view<IDX_T, int64_t, row_major> indices, \
raft::device_matrix_view<float, int64_t, row_major> distances, \
distance::DistanceType metric); \
\
void refine(raft::resources const& handle, \
raft::host_matrix_view<const DATA_T, int64_t, row_major> dataset, \
raft::host_matrix_view<const DATA_T, int64_t, row_major> queries, \
raft::host_matrix_view<const IDX_T, int64_t, row_major> neighbor_candidates, \
raft::host_matrix_view<IDX_T, int64_t, row_major> indices, \
raft::host_matrix_view<float, int64_t, row_major> distances, \
distance::DistanceType metric);
RAFT_INST_REFINE(int64_t, float);
RAFT_INST_REFINE(int64_t, uint8_t);
RAFT_INST_REFINE(int64_t, int8_t);
#undef RAFT_INST_REFINE
} // namespace raft::runtime::neighbors
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/neighbors/brute_force.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
namespace raft::runtime::neighbors::brute_force {
#define RAFT_INST_BFKNN(IDX_T, DATA_T, MATRIX_IDX_T, INDEX_LAYOUT, SEARCH_LAYOUT) \
void knn(raft::resources const& handle, \
raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, INDEX_LAYOUT> index, \
raft::device_matrix_view<const DATA_T, MATRIX_IDX_T, SEARCH_LAYOUT> search, \
raft::device_matrix_view<IDX_T, MATRIX_IDX_T, row_major> indices, \
raft::device_matrix_view<DATA_T, MATRIX_IDX_T, row_major> distances, \
distance::DistanceType metric = distance::DistanceType::L2Unexpanded, \
std::optional<float> metric_arg = std::make_optional<float>(2.0f), \
std::optional<IDX_T> global_id_offset = std::nullopt);
RAFT_INST_BFKNN(int64_t, float, int64_t, raft::row_major, raft::row_major);
#undef RAFT_INST_BFKNN
} // namespace raft::runtime::neighbors::brute_force
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/neighbors/cagra.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/neighbors/cagra_types.hpp>
#include <cuvs/neighbors/ivf_pq_types.hpp>
#include <string>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_device_accessor.hpp>
#include <raft/core/mdspan.hpp>
namespace raft::runtime::neighbors::cagra {
// Using device and host_matrix_view avoids needing to typedef mutltiple mdspans based on accessors
#define RAFT_INST_CAGRA_FUNCS(T, IdxT) \
auto build(raft::resources const& handle, \
const cuvs::neighbors::cagra::index_params& params, \
raft::device_matrix_view<const T, int64_t, row_major> dataset) \
->cuvs::neighbors::cagra::index<T, IdxT>; \
\
auto build(raft::resources const& handle, \
const cuvs::neighbors::cagra::index_params& params, \
raft::host_matrix_view<const T, int64_t, row_major> dataset) \
->cuvs::neighbors::cagra::index<T, IdxT>; \
\
void build_device(raft::resources const& handle, \
const cuvs::neighbors::cagra::index_params& params, \
raft::device_matrix_view<const T, int64_t, row_major> dataset, \
cuvs::neighbors::cagra::index<T, IdxT>& idx); \
\
void build_host(raft::resources const& handle, \
const cuvs::neighbors::cagra::index_params& params, \
raft::host_matrix_view<const T, int64_t, row_major> dataset, \
cuvs::neighbors::cagra::index<T, IdxT>& idx); \
\
void search(raft::resources const& handle, \
cuvs::neighbors::cagra::search_params const& params, \
const cuvs::neighbors::cagra::index<T, IdxT>& index, \
raft::device_matrix_view<const T, int64_t, row_major> queries, \
raft::device_matrix_view<IdxT, int64_t, row_major> neighbors, \
raft::device_matrix_view<float, int64_t, row_major> distances); \
void serialize_file(raft::resources const& handle, \
const std::string& filename, \
const cuvs::neighbors::cagra::index<T, IdxT>& index, \
bool include_dataset = true); \
\
void deserialize_file(raft::resources const& handle, \
const std::string& filename, \
cuvs::neighbors::cagra::index<T, IdxT>* index); \
void serialize(raft::resources const& handle, \
std::string& str, \
const cuvs::neighbors::cagra::index<T, IdxT>& index, \
bool include_dataset = true); \
\
void deserialize(raft::resources const& handle, \
const std::string& str, \
cuvs::neighbors::cagra::index<T, IdxT>* index);
RAFT_INST_CAGRA_FUNCS(float, uint32_t);
RAFT_INST_CAGRA_FUNCS(int8_t, uint32_t);
RAFT_INST_CAGRA_FUNCS(uint8_t, uint32_t);
#undef RAFT_INST_CAGRA_FUNCS
#define RAFT_INST_CAGRA_OPTIMIZE(IdxT) \
void optimize_device(raft::resources const& res, \
raft::device_matrix_view<IdxT, int64_t, row_major> knn_graph, \
raft::host_matrix_view<IdxT, int64_t, row_major> new_graph); \
\
void optimize_host(raft::resources const& res, \
raft::host_matrix_view<IdxT, int64_t, row_major> knn_graph, \
raft::host_matrix_view<IdxT, int64_t, row_major> new_graph);
RAFT_INST_CAGRA_OPTIMIZE(uint32_t);
#undef RAFT_INST_CAGRA_OPTIMIZE
} // namespace raft::runtime::neighbors::cagra
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/matrix/select_k.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <optional>
namespace raft::runtime::matrix {
void select_k(const resources& handle,
raft::device_matrix_view<const float, int64_t, row_major> in_val,
std::optional<raft::device_matrix_view<const int64_t, int64_t, row_major>> in_idx,
raft::device_matrix_view<float, int64_t, row_major> out_val,
raft::device_matrix_view<int64_t, int64_t, row_major> out_idx,
bool select_min);
} // namespace raft::runtime::matrix
| 0 |
rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime | rapidsai_public_repos/cuvs/cpp/include/cuvs_runtime/cluster/kmeans.hpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuvs/distance/distance_types.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <cuvs/cluster/kmeans_types.hpp>
namespace cuvs::runtime::cluster::kmeans {
/**
* @defgroup kmeans_runtime Kmeans Runtime API
* @{
*/
void update_centroids(raft::resources const& handle,
const float* X,
int n_samples,
int n_features,
int n_clusters,
const float* sample_weights,
const float* centroids,
const int* labels,
float* new_centroids,
float* weight_per_cluster);
void update_centroids(raft::resources const& handle,
const double* X,
int n_samples,
int n_features,
int n_clusters,
const double* sample_weights,
const double* centroids,
const int* labels,
double* new_centroids,
double* weight_per_cluster);
void fit(raft::resources const& handle,
const cuvs::cluster::kmeans::KMeansParams& params,
raft::device_matrix_view<const float, int, row_major> X,
std::optional<raft::device_vector_view<const float, int>> sample_weight,
raft::device_matrix_view<float, int, row_major> centroids,
raft::host_scalar_view<float, int> inertia,
raft::host_scalar_view<int, int> n_iter);
void fit(raft::resources const& handle,
const cuvs::cluster::kmeans::KMeansParams& params,
raft::device_matrix_view<const double, int, row_major> X,
std::optional<raft::device_vector_view<const double, int>> sample_weight,
raft::device_matrix_view<double, int, row_major> centroids,
raft::host_scalar_view<double, int> inertia,
raft::host_scalar_view<int, int> n_iter);
void init_plus_plus(raft::resources const& handle,
const cuvs::cluster::kmeans::KMeansParams& params,
raft::device_matrix_view<const float, int, row_major> X,
raft::device_matrix_view<float, int, row_major> centroids);
void init_plus_plus(raft::resources const& handle,
const cuvs::cluster::kmeans::KMeansParams& params,
raft::device_matrix_view<const double, int, row_major> X,
raft::device_matrix_view<double, int, row_major> centroids);
void cluster_cost(raft::resources const& handle,
const float* X,
int n_samples,
int n_features,
int n_clusters,
const float* centroids,
float* cost);
void cluster_cost(raft::resources const& handle,
const double* X,
int n_samples,
int n_features,
int n_clusters,
const double* centroids,
double* cost);
/** @} */ // end group kmeans_runtime
} // namespace cuvs::runtime::cluster::kmeans
| 0 |
rapidsai_public_repos/cuvs/cpp/bench | rapidsai_public_repos/cuvs/cpp/bench/ann/CMakeLists.txt | # =============================================================================
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# ##################################################################################################
# * benchmark options ------------------------------------------------------------------------------
option(CUVS_BENCH_USE_FAISS_GPU_FLAT "Include faiss' brute-force knn algorithm in benchmark" ON)
option(CUVS_BENCH_USE_FAISS_GPU_IVF_FLAT "Include faiss' ivf flat algorithm in benchmark" ON)
option(CUVS_BENCH_USE_FAISS_GPU_IVF_PQ "Include faiss' ivf pq algorithm in benchmark" ON)
option(CUVS_BENCH_USE_FAISS_CPU_FLAT "Include faiss' cpu brute-force knn algorithm in benchmark" ON)
option(CUVS_BENCH_USE_FAISS_CPU_FLAT "Include faiss' cpu brute-force algorithm in benchmark" ON)
option(CUVS_BENCH_USE_FAISS_CPU_IVF_FLAT "Include faiss' cpu ivf flat algorithm in benchmark" ON)
option(CUVS_BENCH_USE_FAISS_CPU_IVF_PQ "Include faiss' cpu ivf pq algorithm in benchmark" ON)
option(CUVS_BENCH_USE_RAFT_IVF_FLAT "Include raft's ivf flat algorithm in benchmark" ON)
option(CUVS_BENCH_USE_RAFT_IVF_PQ "Include raft's ivf pq algorithm in benchmark" ON)
option(CUVS_BENCH_USE_RAFT_CAGRA "Include raft's CAGRA in benchmark" ON)
option(CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB "Include raft's CAGRA in benchmark" ON)
option(CUVS_BENCH_USE_HNSWLIB "Include hnsw algorithm in benchmark" ON)
option(CUVS_BENCH_USE_GGNN "Include ggnn algorithm in benchmark" ON)
option(CUVS_BENCH_SINGLE_EXE "Make a single executable with benchmark as shared library modules"
OFF
)
# ##################################################################################################
# * Process options ----------------------------------------------------------
find_package(Threads REQUIRED)
if(BUILD_CPU_ONLY)
# Include necessary logging dependencies
include(cmake/thirdparty/get_fmt.cmake)
include(cmake/thirdparty/get_spdlog.cmake)
set(CUVS_FAISS_ENABLE_GPU OFF)
set(CUVS_BENCH_USE_FAISS_GPU_FLAT OFF)
set(CUVS_BENCH_USE_FAISS_GPU_IVF_FLAT OFF)
set(CUVS_BENCH_USE_FAISS_GPU_IVF_PQ OFF)
set(CUVS_BENCH_USE_RAFT_IVF_FLAT OFF)
set(CUVS_BENCH_USE_RAFT_IVF_PQ OFF)
set(CUVS_BENCH_USE_RAFT_CAGRA OFF)
set(CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB OFF)
set(CUVS_BENCH_USE_GGNN OFF)
else()
# Disable faiss benchmarks on CUDA 12 since faiss is not yet CUDA 12-enabled.
# https://github.com/rapidsai/raft/issues/1627
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.0.0)
set(CUVS_FAISS_ENABLE_GPU OFF)
set(CUVS_BENCH_USE_FAISS_GPU_FLAT OFF)
set(CUVS_BENCH_USE_FAISS_GPU_IVF_FLAT OFF)
set(CUVS_BENCH_USE_FAISS_GPU_IVF_PQ OFF)
set(CUVS_BENCH_USE_FAISS_CPU_FLAT OFF)
set(CUVS_BENCH_USE_FAISS_CPU_IVF_PQ OFF)
set(CUVS_BENCH_USE_FAISS_CPU_IVF_FLAT OFF)
else()
set(CUVS_FAISS_ENABLE_GPU ON)
endif()
endif()
set(CUVS_BENCH_USE_FAISS OFF)
if(CUVS_BENCH_USE_FAISS_GPU_FLAT
OR CUVS_BENCH_USE_FAISS_GPU_IVF_PQ
OR CUVS_BENCH_USE_FAISS_GPU_IVF_FLAT
OR CUVS_BENCH_USE_FAISS_CPU_FLAT
OR CUVS_BENCH_USE_FAISS_CPU_IVF_PQ
OR CUVS_BENCH_USE_FAISS_CPU_IVF_FLAT
)
set(CUVS_BENCH_USE_FAISS ON)
set(RAFT_USE_FAISS_STATIC ON)
endif()
set(CUVS_BENCH_USE_RAFT OFF)
if(CUVS_BENCH_USE_RAFT_IVF_PQ
OR CUVS_BENCH_USE_RAFT_BRUTE_FORCE
OR CUVS_BENCH_USE_RAFT_IVF_FLAT
OR CUVS_BENCH_USE_RAFT_CAGRA
OR CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB
)
set(CUVS_BENCH_USE_RAFT ON)
endif()
# ##################################################################################################
# * Fetch requirements -------------------------------------------------------------
if(CUVS_BENCH_USE_HNSWLIB OR CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB)
include(cmake/thirdparty/get_hnswlib.cmake)
endif()
include(cmake/thirdparty/get_nlohmann_json.cmake)
if(CUVS_BENCH_USE_GGNN)
include(cmake/thirdparty/get_ggnn.cmake)
endif()
if(CUVS_BENCH_USE_FAISS)
# We need to ensure that faiss has all the conda information. So we currently use the very ugly
# hammer of `link_libraries` to ensure that all targets in this directory and the faiss directory
# will have the conda includes/link dirs
link_libraries($<TARGET_NAME_IF_EXISTS:conda_env>)
include(cmake/thirdparty/get_faiss.cmake)
endif()
# ##################################################################################################
# * Configure tests function-------------------------------------------------------------
function(ConfigureAnnBench)
set(oneValueArgs NAME)
set(multiValueArgs PATH LINKS CXXFLAGS INCLUDES)
if(NOT BUILD_CPU_ONLY)
set(GPU_BUILD ON)
endif()
cmake_parse_arguments(
ConfigureAnnBench "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}
)
set(BENCH_NAME ${ConfigureAnnBench_NAME}_CUVS_BENCH)
if(CUVS_BENCH_SINGLE_EXE)
add_library(${BENCH_NAME} SHARED ${ConfigureAnnBench_PATH})
string(TOLOWER ${BENCH_NAME} BENCH_LIB_NAME)
set_target_properties(${BENCH_NAME} PROPERTIES OUTPUT_NAME ${BENCH_LIB_NAME})
add_dependencies(${BENCH_NAME} CUVS_BENCH)
else()
add_executable(${BENCH_NAME} ${ConfigureAnnBench_PATH})
target_compile_definitions(${BENCH_NAME} PRIVATE CUVS_BENCH_BUILD_MAIN)
target_link_libraries(${BENCH_NAME} PRIVATE benchmark::benchmark)
endif()
target_link_libraries(
${BENCH_NAME}
PRIVATE raft::raft
nlohmann_json::nlohmann_json
${ConfigureAnnBench_LINKS}
Threads::Threads
$<$<BOOL:${GPU_BUILD}>:${RAFT_CTK_MATH_DEPENDENCIES}>
$<TARGET_NAME_IF_EXISTS:OpenMP::OpenMP_CXX>
$<TARGET_NAME_IF_EXISTS:conda_env>
-static-libgcc
-static-libstdc++
$<$<BOOL:${BUILD_CPU_ONLY}>:fmt::fmt-header-only>
$<$<BOOL:${BUILD_CPU_ONLY}>:spdlog::spdlog_header_only>
)
set_target_properties(
${BENCH_NAME}
PROPERTIES # set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
)
set(${ConfigureAnnBench_CXXFLAGS} ${RAFT_CXX_FLAGS} ${ConfigureAnnBench_CXXFLAGS})
target_compile_options(
${BENCH_NAME} PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${ConfigureAnnBench_CXXFLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${RAFT_CUDA_FLAGS}>"
)
if(CUVS_BENCH_USE_${ConfigureAnnBench_NAME})
target_compile_definitions(
${BENCH_NAME}
PUBLIC CUVS_BENCH_USE_${ConfigureAnnBench_NAME}=CUVS_BENCH_USE_${ConfigureAnnBench_NAME}
)
endif()
target_include_directories(
${BENCH_NAME}
PUBLIC "$<BUILD_INTERFACE:${RAFT_SOURCE_DIR}/include>"
PRIVATE ${ConfigureAnnBench_INCLUDES}
)
install(
TARGETS ${BENCH_NAME}
COMPONENT ann_bench
DESTINATION bin/ann
)
endfunction()
# ##################################################################################################
# * Configure tests-------------------------------------------------------------
if(CUVS_BENCH_USE_HNSWLIB)
ConfigureAnnBench(
NAME HNSWLIB PATH bench/ann/src/hnswlib/hnswlib_benchmark.cpp INCLUDES
${CMAKE_CURRENT_BINARY_DIR}/_deps/hnswlib-src/hnswlib CXXFLAGS "${HNSW_CXX_FLAGS}"
)
endif()
if(CUVS_BENCH_USE_RAFT_IVF_PQ)
ConfigureAnnBench(
NAME RAFT_IVF_PQ PATH bench/ann/src/raft/raft_benchmark.cu
$<$<BOOL:${CUVS_BENCH_USE_RAFT_IVF_PQ}>:bench/ann/src/raft/raft_ivf_pq.cu> LINKS cuvs
)
endif()
if(CUVS_BENCH_USE_RAFT_IVF_FLAT)
ConfigureAnnBench(
NAME RAFT_IVF_FLAT PATH bench/ann/src/raft/raft_benchmark.cu
$<$<BOOL:${CUVS_BENCH_USE_RAFT_IVF_FLAT}>:bench/ann/src/raft/raft_ivf_flat.cu> LINKS cuvs
)
endif()
if(CUVS_BENCH_USE_RAFT_BRUTE_FORCE)
ConfigureAnnBench(NAME RAFT_BRUTE_FORCE PATH bench/ann/src/raft/raft_benchmark.cu LINKS cuvs)
endif()
if(CUVS_BENCH_USE_RAFT_CAGRA)
ConfigureAnnBench(
NAME RAFT_CAGRA PATH bench/ann/src/raft/raft_benchmark.cu
$<$<BOOL:${CUVS_BENCH_USE_RAFT_CAGRA}>:bench/ann/src/raft/raft_cagra.cu> LINKS cuvs
)
endif()
if(CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB)
ConfigureAnnBench(
NAME RAFT_CAGRA_HNSWLIB PATH bench/ann/src/raft/raft_cagra_hnswlib.cu INCLUDES
${CMAKE_CURRENT_BINARY_DIR}/_deps/hnswlib-src/hnswlib LINKS cuvs CXXFLAGS "${HNSW_CXX_FLAGS}"
)
endif()
set(RAFT_FAISS_TARGETS faiss::faiss)
if(TARGET faiss::faiss_avx2)
set(RAFT_FAISS_TARGETS faiss::faiss_avx2)
endif()
message("RAFT_FAISS_TARGETS: ${RAFT_FAISS_TARGETS}")
message("CUDAToolkit_LIBRARY_DIR: ${CUDAToolkit_LIBRARY_DIR}")
if(CUVS_BENCH_USE_FAISS_CPU_FLAT)
ConfigureAnnBench(
NAME FAISS_CPU_FLAT PATH bench/ann/src/faiss/faiss_cpu_benchmark.cpp LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(CUVS_BENCH_USE_FAISS_CPU_IVF_FLAT)
ConfigureAnnBench(
NAME FAISS_CPU_IVF_FLAT PATH bench/ann/src/faiss/faiss_cpu_benchmark.cpp LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(CUVS_BENCH_USE_FAISS_CPU_IVF_PQ)
ConfigureAnnBench(
NAME FAISS_CPU_IVF_PQ PATH bench/ann/src/faiss/faiss_cpu_benchmark.cpp LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(CUVS_BENCH_USE_FAISS_GPU_IVF_FLAT)
ConfigureAnnBench(
NAME FAISS_GPU_IVF_FLAT PATH bench/ann/src/faiss/faiss_gpu_benchmark.cu LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(CUVS_BENCH_USE_FAISS_GPU_IVF_PQ)
ConfigureAnnBench(
NAME FAISS_GPU_IVF_PQ PATH bench/ann/src/faiss/faiss_gpu_benchmark.cu LINKS
${RAFT_FAISS_TARGETS}
)
endif()
if(CUVS_BENCH_USE_FAISS_GPU_FLAT)
ConfigureAnnBench(
NAME FAISS_GPU_FLAT PATH bench/ann/src/faiss/faiss_gpu_benchmark.cu LINKS ${RAFT_FAISS_TARGETS}
)
endif()
if(CUVS_BENCH_USE_GGNN)
include(cmake/thirdparty/get_glog.cmake)
ConfigureAnnBench(
NAME GGNN PATH bench/ann/src/ggnn/ggnn_benchmark.cu INCLUDES
${CMAKE_CURRENT_BINARY_DIR}/_deps/ggnn-src/include LINKS glog::glog
)
endif()
# ##################################################################################################
# * Dynamically-loading CUVS_BENCH executable
# -------------------------------------------------------
if(CUVS_BENCH_SINGLE_EXE)
add_executable(CUVS_BENCH bench/ann/src/common/benchmark.cpp)
# Build and link static version of the GBench to keep CUVS_BENCH self-contained.
get_target_property(TMP_PROP benchmark::benchmark SOURCES)
add_library(benchmark_static STATIC ${TMP_PROP})
get_target_property(TMP_PROP benchmark::benchmark INCLUDE_DIRECTORIES)
target_include_directories(benchmark_static PUBLIC ${TMP_PROP})
get_target_property(TMP_PROP benchmark::benchmark LINK_LIBRARIES)
target_link_libraries(benchmark_static PUBLIC ${TMP_PROP})
target_include_directories(CUVS_BENCH PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
target_link_libraries(
CUVS_BENCH PRIVATE nlohmann_json::nlohmann_json benchmark_static dl -static-libgcc
-static-libstdc++ CUDA::nvtx3
)
set_target_properties(
CUVS_BENCH
PROPERTIES # set target compile options
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
BUILD_RPATH "\$ORIGIN"
INSTALL_RPATH "\$ORIGIN"
)
# Disable NVTX when the nvtx3 headers are missing
set(_CMAKE_REQUIRED_INCLUDES_ORIG ${CMAKE_REQUIRED_INCLUDES})
get_target_property(CMAKE_REQUIRED_INCLUDES CUVS_BENCH INCLUDE_DIRECTORIES)
CHECK_INCLUDE_FILE_CXX(nvtx3/nvToolsExt.h NVTX3_HEADERS_FOUND)
set(CMAKE_REQUIRED_INCLUDES ${_CMAKE_REQUIRED_INCLUDES_ORIG})
target_compile_definitions(
CUVS_BENCH
PRIVATE
$<$<BOOL:${CUDAToolkit_FOUND}>:CUVS_BENCH_LINK_CUDART="libcudart.so.${CUDAToolkit_VERSION_MAJOR}.${CUDAToolkit_VERSION_MINOR}.${CUDAToolkit_VERSION_PATCH}
">
$<$<BOOL:${NVTX3_HEADERS_FOUND}>:CUVS_BENCH_NVTX3_HEADERS_FOUND>
)
target_link_options(CUVS_BENCH PRIVATE -export-dynamic)
install(
TARGETS CUVS_BENCH
COMPONENT ann_bench
DESTINATION bin/ann
EXCLUDE_FROM_ALL
)
endif()
| 0 |
rapidsai_public_repos/cuvs/cpp/bench | rapidsai_public_repos/cuvs/cpp/bench/ann/README.md | # RAFT CUDA ANN Benchmarks
Please see the [ANN Benchmarks](https://docs.rapids.ai/api/raft/stable/cuda_ann_benchmarks.html) section of the RAFT documentation for instructions on building and using the ANN benchmarks. | 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_ivf_flat_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/neighbors/ivf_flat.cuh>
#include <cuvs/neighbors/ivf_flat_types.hpp>
#include <fstream>
#include <iostream>
#include <memory>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "../common/ann_types.hpp"
#include "raft_ann_bench_utils.h"
#include <raft/util/cudart_utils.hpp>
namespace cuvs::bench {
template <typename T, typename IdxT>
class RaftIvfFlatGpu : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
cuvs::neighbors::ivf_flat::search_params ivf_flat_params;
};
using BuildParam = cuvs::neighbors::ivf_flat::index_params;
RaftIvfFlatGpu(Metric metric, int dim, const BuildParam& param)
: ANN<T>(metric, dim), index_params_(param), dimension_(dim)
{
index_params_.metric = parse_metric_type(metric);
index_params_.conservative_memory_allocation = true;
RAFT_CUDA_TRY(cudaGetDevice(&device_));
}
~RaftIvfFlatGpu() noexcept {}
void build(const T* dataset, size_t nrow, cudaStream_t stream) final;
void set_search_param(const AnnSearchParam& param) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Device;
property.query_memory_type = MemoryType::Device;
return property;
}
void save(const std::string& file) const override;
void load(const std::string&) override;
private:
raft::device_resources handle_;
BuildParam index_params_;
cuvs::neighbors::ivf_flat::search_params search_params_;
std::optional<cuvs::neighbors::ivf_flat::index<T, IdxT>> index_;
int device_;
int dimension_;
};
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::build(const T* dataset, size_t nrow, cudaStream_t)
{
index_.emplace(
cuvs::neighbors::ivf_flat::build(handle_, index_params_, dataset, IdxT(nrow), dimension_));
return;
}
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::set_search_param(const AnnSearchParam& param)
{
auto search_param = dynamic_cast<const SearchParam&>(param);
search_params_ = search_param.ivf_flat_params;
assert(search_params_.n_probes <= index_params_.n_lists);
}
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::save(const std::string& file) const
{
cuvs::neighbors::ivf_flat::serialize(handle_, file, *index_);
return;
}
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::load(const std::string& file)
{
index_ = cuvs::neighbors::ivf_flat::deserialize<T, IdxT>(handle_, file);
return;
}
template <typename T, typename IdxT>
void RaftIvfFlatGpu<T, IdxT>::search(
const T* queries, int batch_size, int k, size_t* neighbors, float* distances, cudaStream_t) const
{
static_assert(sizeof(size_t) == sizeof(IdxT), "IdxT is incompatible with size_t");
cuvs::neighbors::ivf_flat::search(
handle_, search_params_, *index_, queries, batch_size, k, (IdxT*)neighbors, distances);
resource::sync_stream(handle_);
return;
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_cagra_hnswlib.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../common/ann_types.hpp"
#include "raft_ann_bench_param_parser.h"
#include "raft_cagra_hnswlib_wrapper.h"
#include <rmm/mr/device/pool_memory_resource.hpp>
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace cuvs::bench {
template <typename T, typename IdxT>
void parse_search_param(const nlohmann::json& conf,
typename cuvs::bench::RaftCagraHnswlib<T, IdxT>::SearchParam& param)
{
param.ef = conf.at("ef");
if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); }
}
template <typename T>
std::unique_ptr<cuvs::bench::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
cuvs::bench::Metric metric = parse_metric(distance);
std::unique_ptr<cuvs::bench::ANN<T>> ann;
if constexpr (std::is_same_v<T, float> or std::is_same_v<T, std::uint8_t>) {
if (algo == "raft_cagra_hnswlib") {
typename cuvs::bench::RaftCagraHnswlib<T, uint32_t>::BuildParam param;
parse_build_param<T, uint32_t>(conf, param);
ann = std::make_unique<cuvs::bench::RaftCagraHnswlib<T, uint32_t>>(metric, dim, param);
}
}
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename cuvs::bench::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "raft_cagra_hnswlib") {
auto param =
std::make_unique<typename cuvs::bench::RaftCagraHnswlib<T, uint32_t>::SearchParam>();
parse_search_param<T, uint32_t>(conf, *param);
return param;
}
throw std::runtime_error("invalid algo: '" + algo + "'");
}
} // namespace cuvs::bench
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef CUVS_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv)
{
rmm::mr::cuda_memory_resource cuda_mr;
// Construct a resource that uses a coalescing best-fit pool allocator
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> pool_mr{&cuda_mr};
rmm::mr::set_current_device_resource(
&pool_mr); // Updates the current device resource pointer to `pool_mr`
rmm::mr::device_memory_resource* mr =
rmm::mr::get_current_device_resource(); // Points to `pool_mr`
return cuvs::bench::run_main(argc, argv);
}
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_cagra.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "raft_cagra_wrapper.h"
namespace cuvs::bench {
template class RaftCagra<uint8_t, uint32_t>;
template class RaftCagra<int8_t, uint32_t>;
template class RaftCagra<float, uint32_t>;
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_cagra_hnswlib_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../hnswlib/hnswlib_wrapper.h"
#include "raft_cagra_wrapper.h"
#include <memory>
namespace cuvs::bench {
template <typename T, typename IdxT>
class RaftCagraHnswlib : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
using BuildParam = typename RaftCagra<T, IdxT>::BuildParam;
using SearchParam = typename HnswLib<T>::SearchParam;
RaftCagraHnswlib(Metric metric, int dim, const BuildParam& param, int concurrent_searches = 1)
: ANN<T>(metric, dim),
metric_(metric),
index_params_(param),
dimension_(dim),
handle_(cudaStreamPerThread)
{
}
~RaftCagraHnswlib() noexcept {}
void build(const T* dataset, size_t nrow, cudaStream_t stream) final;
void set_search_param(const AnnSearchParam& param) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::HostMmap;
property.query_memory_type = MemoryType::Host;
return property;
}
void save(const std::string& file) const override;
void load(const std::string&) override;
private:
raft::device_resources handle_;
Metric metric_;
BuildParam index_params_;
int dimension_;
std::unique_ptr<RaftCagra<T, IdxT>> cagra_build_;
std::unique_ptr<HnswLib<T>> hnswlib_search_;
Objective metric_objective_;
};
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::build(const T* dataset, size_t nrow, cudaStream_t stream)
{
if (not cagra_build_) {
cagra_build_ = std::make_unique<RaftCagra<T, IdxT>>(metric_, dimension_, index_params_);
}
cagra_build_->build(dataset, nrow, stream);
}
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::set_search_param(const AnnSearchParam& param_)
{
hnswlib_search_->set_search_param(param_);
}
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::save(const std::string& file) const
{
cagra_build_->save_to_hnswlib(file);
}
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::load(const std::string& file)
{
typename HnswLib<T>::BuildParam param;
// these values don't matter since we don't build with HnswLib
param.M = 50;
param.ef_construction = 100;
if (not hnswlib_search_) {
hnswlib_search_ = std::make_unique<HnswLib<T>>(metric_, dimension_, param);
}
hnswlib_search_->load(file);
hnswlib_search_->set_base_layer_only();
}
template <typename T, typename IdxT>
void RaftCagraHnswlib<T, IdxT>::search(
const T* queries, int batch_size, int k, size_t* neighbors, float* distances, cudaStream_t) const
{
hnswlib_search_->search(queries, batch_size, k, neighbors, distances);
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_ivf_pq.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "raft_ivf_pq_wrapper.h"
namespace cuvs::bench {
template class RaftIvfPQ<float, int64_t>;
template class RaftIvfPQ<uint8_t, int64_t>;
template class RaftIvfPQ<int8_t, int64_t>;
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_ann_bench_param_parser.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
#undef WARP_SIZE
#ifdef CUVS_BENCH_USE_RAFT_BFKNN
#include "raft_wrapper.h"
#endif
#ifdef CUVS_BENCH_USE_RAFT_IVF_FLAT
#include "raft_ivf_flat_wrapper.h"
extern template class cuvs::bench::RaftIvfFlatGpu<float, int64_t>;
extern template class cuvs::bench::RaftIvfFlatGpu<uint8_t, int64_t>;
extern template class cuvs::bench::RaftIvfFlatGpu<int8_t, int64_t>;
#endif
#if defined(CUVS_BENCH_USE_RAFT_IVF_PQ) || defined(CUVS_BENCH_USE_RAFT_CAGRA) || \
defined(CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB)
#include "raft_ivf_pq_wrapper.h"
#endif
#ifdef CUVS_BENCH_USE_RAFT_IVF_PQ
extern template class cuvs::bench::RaftIvfPQ<float, int64_t>;
extern template class cuvs::bench::RaftIvfPQ<uint8_t, int64_t>;
extern template class cuvs::bench::RaftIvfPQ<int8_t, int64_t>;
#endif
#if defined(CUVS_BENCH_USE_RAFT_CAGRA) || defined(CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB)
#include "raft_cagra_wrapper.h"
#endif
#ifdef CUVS_BENCH_USE_RAFT_CAGRA
extern template class cuvs::bench::RaftCagra<float, uint32_t>;
extern template class cuvs::bench::RaftCagra<uint8_t, uint32_t>;
extern template class cuvs::bench::RaftCagra<int8_t, uint32_t>;
#endif
#ifdef CUVS_BENCH_USE_RAFT_IVF_FLAT
template <typename T, typename IdxT>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::RaftIvfFlatGpu<T, IdxT>::BuildParam& param)
{
param.n_lists = conf.at("nlist");
if (conf.contains("niter")) { param.kmeans_n_iters = conf.at("niter"); }
if (conf.contains("ratio")) { param.kmeans_trainset_fraction = 1.0 / (double)conf.at("ratio"); }
}
template <typename T, typename IdxT>
void parse_search_param(const nlohmann::json& conf,
typename cuvs::bench::RaftIvfFlatGpu<T, IdxT>::SearchParam& param)
{
param.ivf_flat_params.n_probes = conf.at("nprobe");
}
#endif
#if defined(CUVS_BENCH_USE_RAFT_IVF_PQ) || defined(CUVS_BENCH_USE_RAFT_CAGRA) || \
defined(CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB)
template <typename T, typename IdxT>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::RaftIvfPQ<T, IdxT>::BuildParam& param)
{
if (conf.contains("nlist")) { param.n_lists = conf.at("nlist"); }
if (conf.contains("niter")) { param.kmeans_n_iters = conf.at("niter"); }
if (conf.contains("ratio")) { param.kmeans_trainset_fraction = 1.0 / (double)conf.at("ratio"); }
if (conf.contains("pq_bits")) { param.pq_bits = conf.at("pq_bits"); }
if (conf.contains("pq_dim")) { param.pq_dim = conf.at("pq_dim"); }
if (conf.contains("codebook_kind")) {
std::string kind = conf.at("codebook_kind");
if (kind == "cluster") {
param.codebook_kind = cuvs::neighbors::ivf_pq::codebook_gen::PER_CLUSTER;
} else if (kind == "subspace") {
param.codebook_kind = cuvs::neighbors::ivf_pq::codebook_gen::PER_SUBSPACE;
} else {
throw std::runtime_error("codebook_kind: '" + kind +
"', should be either 'cluster' or 'subspace'");
}
}
}
template <typename T, typename IdxT>
void parse_search_param(const nlohmann::json& conf,
typename cuvs::bench::RaftIvfPQ<T, IdxT>::SearchParam& param)
{
if (conf.contains("nprobe")) { param.pq_param.n_probes = conf.at("nprobe"); }
if (conf.contains("internalDistanceDtype")) {
std::string type = conf.at("internalDistanceDtype");
if (type == "float") {
param.pq_param.internal_distance_dtype = CUDA_R_32F;
} else if (type == "half") {
param.pq_param.internal_distance_dtype = CUDA_R_16F;
} else {
throw std::runtime_error("internalDistanceDtype: '" + type +
"', should be either 'float' or 'half'");
}
} else {
// set half as default type
param.pq_param.internal_distance_dtype = CUDA_R_16F;
}
if (conf.contains("smemLutDtype")) {
std::string type = conf.at("smemLutDtype");
if (type == "float") {
param.pq_param.lut_dtype = CUDA_R_32F;
} else if (type == "half") {
param.pq_param.lut_dtype = CUDA_R_16F;
} else if (type == "fp8") {
param.pq_param.lut_dtype = CUDA_R_8U;
} else {
throw std::runtime_error("smemLutDtype: '" + type +
"', should be either 'float', 'half' or 'fp8'");
}
} else {
// set half as default
param.pq_param.lut_dtype = CUDA_R_16F;
}
if (conf.contains("refine_ratio")) {
param.refine_ratio = conf.at("refine_ratio");
if (param.refine_ratio < 1.0f) { throw std::runtime_error("refine_ratio should be >= 1.0"); }
}
}
#endif
#if defined(CUVS_BENCH_USE_RAFT_CAGRA) || defined(CUVS_BENCH_USE_RAFT_CAGRA_HNSWLIB)
template <typename T, typename IdxT>
void parse_build_param(const nlohmann::json& conf,
cuvs::neighbors::experimental::nn_descent::index_params& param)
{
if (conf.contains("graph_degree")) { param.graph_degree = conf.at("graph_degree"); }
if (conf.contains("intermediate_graph_degree")) {
param.intermediate_graph_degree = conf.at("intermediate_graph_degree");
}
// we allow niter shorthand for max_iterations
if (conf.contains("niter")) { param.max_iterations = conf.at("niter"); }
if (conf.contains("max_iterations")) { param.max_iterations = conf.at("max_iterations"); }
if (conf.contains("termination_threshold")) {
param.termination_threshold = conf.at("termination_threshold");
}
}
nlohmann::json collect_conf_with_prefix(const nlohmann::json& conf,
const std::string& prefix,
bool remove_prefix = true)
{
nlohmann::json out;
for (auto& i : conf.items()) {
if (i.key().compare(0, prefix.size(), prefix) == 0) {
auto new_key = remove_prefix ? i.key().substr(prefix.size()) : i.key();
out[new_key] = i.value();
}
}
return out;
}
template <typename T, typename IdxT>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::RaftCagra<T, IdxT>::BuildParam& param)
{
if (conf.contains("graph_degree")) {
param.cagra_params.graph_degree = conf.at("graph_degree");
param.cagra_params.intermediate_graph_degree = param.cagra_params.graph_degree * 2;
}
if (conf.contains("intermediate_graph_degree")) {
param.cagra_params.intermediate_graph_degree = conf.at("intermediate_graph_degree");
}
if (conf.contains("graph_build_algo")) {
if (conf.at("graph_build_algo") == "IVF_PQ") {
param.cagra_params.build_algo = cuvs::neighbors::cagra::graph_build_algo::IVF_PQ;
} else if (conf.at("graph_build_algo") == "NN_DESCENT") {
param.cagra_params.build_algo = cuvs::neighbors::cagra::graph_build_algo::NN_DESCENT;
}
}
nlohmann::json ivf_pq_build_conf = collect_conf_with_prefix(conf, "ivf_pq_build_");
if (!ivf_pq_build_conf.empty()) {
cuvs::neighbors::ivf_pq::index_params bparam;
parse_build_param<T, IdxT>(ivf_pq_build_conf, bparam);
param.ivf_pq_build_params = bparam;
}
nlohmann::json ivf_pq_search_conf = collect_conf_with_prefix(conf, "ivf_pq_search_");
if (!ivf_pq_search_conf.empty()) {
typename cuvs::bench::RaftIvfPQ<T, IdxT>::SearchParam sparam;
parse_search_param<T, IdxT>(ivf_pq_search_conf, sparam);
param.ivf_pq_search_params = sparam.pq_param;
param.ivf_pq_refine_rate = sparam.refine_ratio;
}
nlohmann::json nn_descent_conf = collect_conf_with_prefix(conf, "nn_descent_");
if (!nn_descent_conf.empty()) {
cuvs::neighbors::experimental::nn_descent::index_params nn_param;
nn_param.intermediate_graph_degree = 1.5 * param.cagra_params.intermediate_graph_degree;
parse_build_param<T, IdxT>(nn_descent_conf, nn_param);
if (nn_param.graph_degree != param.cagra_params.intermediate_graph_degree) {
nn_param.graph_degree = param.cagra_params.intermediate_graph_degree;
}
param.nn_descent_params = nn_param;
}
}
cuvs::bench::AllocatorType parse_allocator(std::string mem_type)
{
if (mem_type == "device") {
return cuvs::bench::AllocatorType::Device;
} else if (mem_type == "host_pinned") {
return cuvs::bench::AllocatorType::HostPinned;
} else if (mem_type == "host_huge_page") {
return cuvs::bench::AllocatorType::HostHugePage;
}
THROW(
"Invalid value for memory type %s, must be one of [\"device\", \"host_pinned\", "
"\"host_huge_page\"",
mem_type.c_str());
}
template <typename T, typename IdxT>
void parse_search_param(const nlohmann::json& conf,
typename cuvs::bench::RaftCagra<T, IdxT>::SearchParam& param)
{
if (conf.contains("itopk")) { param.p.itopk_size = conf.at("itopk"); }
if (conf.contains("search_width")) { param.p.search_width = conf.at("search_width"); }
if (conf.contains("max_iterations")) { param.p.max_iterations = conf.at("max_iterations"); }
if (conf.contains("algo")) {
if (conf.at("algo") == "single_cta") {
param.p.algo = cuvs::neighbors::experimental::cagra::search_algo::SINGLE_CTA;
} else if (conf.at("algo") == "multi_cta") {
param.p.algo = cuvs::neighbors::experimental::cagra::search_algo::MULTI_CTA;
} else if (conf.at("algo") == "multi_kernel") {
param.p.algo = cuvs::neighbors::experimental::cagra::search_algo::MULTI_KERNEL;
} else if (conf.at("algo") == "auto") {
param.p.algo = cuvs::neighbors::experimental::cagra::search_algo::AUTO;
} else {
std::string tmp = conf.at("algo");
THROW("Invalid value for algo: %s", tmp.c_str());
}
}
if (conf.contains("graph_memory_type")) {
param.graph_mem = parse_allocator(conf.at("graph_memory_type"));
}
if (conf.contains("internal_dataset_memory_type")) {
param.dataset_mem = parse_allocator(conf.at("internal_dataset_memory_type"));
}
}
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_ivf_flat.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "raft_ivf_flat_wrapper.h"
namespace cuvs::bench {
template class RaftIvfFlatGpu<float, int64_t>;
template class RaftIvfFlatGpu<uint8_t, int64_t>;
template class RaftIvfFlatGpu<int8_t, int64_t>;
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_ann_bench_utils.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <cuvs/distance/distance_types.hpp>
#include <fstream>
#include <iostream>
#include <memory>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/logger.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <sstream>
#include <stdexcept>
#include <string>
#include <type_traits>
namespace cuvs::bench {
inline cuvs::distance::DistanceType parse_metric_type(cuvs::bench::Metric metric)
{
if (metric == cuvs::bench::Metric::kInnerProduct) {
return cuvs::distance::DistanceType::InnerProduct;
} else if (metric == cuvs::bench::Metric::kEuclidean) {
// Even for L2 expanded RAFT IVF Flat uses unexpanded formula
return cuvs::distance::DistanceType::L2Expanded;
} else {
throw std::runtime_error("raft supports only metric type of inner product and L2");
}
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_benchmark.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../common/ann_types.hpp"
#include "raft_ann_bench_param_parser.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include <raft/core/logger.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace cuvs::bench {
template <typename T>
std::unique_ptr<cuvs::bench::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
cuvs::bench::Metric metric = parse_metric(distance);
std::unique_ptr<cuvs::bench::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {
#ifdef CUVS_BENCH_USE_RAFT_BFKNN
if (algo == "raft_bfknn") { ann = std::make_unique<cuvs::bench::RaftGpu<T>>(metric, dim); }
#endif
}
if constexpr (std::is_same_v<T, uint8_t>) {}
#ifdef CUVS_BENCH_USE_RAFT_IVF_FLAT
if (algo == "raft_ivf_flat") {
typename cuvs::bench::RaftIvfFlatGpu<T, int64_t>::BuildParam param;
parse_build_param<T, int64_t>(conf, param);
ann = std::make_unique<cuvs::bench::RaftIvfFlatGpu<T, int64_t>>(metric, dim, param);
}
#endif
#ifdef CUVS_BENCH_USE_RAFT_IVF_PQ
if (algo == "raft_ivf_pq") {
typename cuvs::bench::RaftIvfPQ<T, int64_t>::BuildParam param;
parse_build_param<T, int64_t>(conf, param);
ann = std::make_unique<cuvs::bench::RaftIvfPQ<T, int64_t>>(metric, dim, param);
}
#endif
#ifdef CUVS_BENCH_USE_RAFT_CAGRA
if (algo == "raft_cagra") {
typename cuvs::bench::RaftCagra<T, uint32_t>::BuildParam param;
parse_build_param<T, uint32_t>(conf, param);
ann = std::make_unique<cuvs::bench::RaftCagra<T, uint32_t>>(metric, dim, param);
}
#endif
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename cuvs::bench::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
#ifdef CUVS_BENCH_USE_RAFT_BFKNN
if (algo == "raft_brute_force") {
auto param = std::make_unique<typename cuvs::bench::ANN<T>::AnnSearchParam>();
return param;
}
#endif
#ifdef CUVS_BENCH_USE_RAFT_IVF_FLAT
if (algo == "raft_ivf_flat") {
auto param = std::make_unique<typename cuvs::bench::RaftIvfFlatGpu<T, int64_t>::SearchParam>();
parse_search_param<T, int64_t>(conf, *param);
return param;
}
#endif
#ifdef CUVS_BENCH_USE_RAFT_IVF_PQ
if (algo == "raft_ivf_pq") {
auto param = std::make_unique<typename cuvs::bench::RaftIvfPQ<T, int64_t>::SearchParam>();
parse_search_param<T, int64_t>(conf, *param);
return param;
}
#endif
#ifdef CUVS_BENCH_USE_RAFT_CAGRA
if (algo == "raft_cagra") {
auto param = std::make_unique<typename cuvs::bench::RaftCagra<T, uint32_t>::SearchParam>();
parse_search_param<T, uint32_t>(conf, *param);
return param;
}
#endif
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
}; // namespace cuvs::bench
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef CUVS_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv)
{
rmm::mr::cuda_memory_resource cuda_mr;
// Construct a resource that uses a coalescing best-fit pool allocator
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> pool_mr{&cuda_mr};
rmm::mr::set_current_device_resource(
&pool_mr); // Updates the current device resource pointer to `pool_mr`
rmm::mr::device_memory_resource* mr =
rmm::mr::get_current_device_resource(); // Points to `pool_mr`
return cuvs::bench::run_main(argc, argv);
}
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_ivf_pq_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/neighbors/ivf_pq_types.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft_runtime/neighbors/ivf_pq.hpp>
#include <raft_runtime/neighbors/refine.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <type_traits>
#include "../common/ann_types.hpp"
#include "raft_ann_bench_utils.h"
#include <raft/util/cudart_utils.hpp>
namespace cuvs::bench {
template <typename T, typename IdxT>
class RaftIvfPQ : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
using ANN<T>::dim_;
struct SearchParam : public AnnSearchParam {
cuvs::neighbors::ivf_pq::search_params pq_param;
float refine_ratio = 1.0f;
auto needs_dataset() const -> bool override { return refine_ratio > 1.0f; }
};
using BuildParam = cuvs::neighbors::ivf_pq::index_params;
RaftIvfPQ(Metric metric, int dim, const BuildParam& param)
: ANN<T>(metric, dim), index_params_(param), dimension_(dim)
{
index_params_.metric = parse_metric_type(metric);
RAFT_CUDA_TRY(cudaGetDevice(&device_));
RAFT_CUDA_TRY(cudaEventCreate(&sync_, cudaEventDisableTiming));
}
~RaftIvfPQ() noexcept { RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(sync_)); }
void build(const T* dataset, size_t nrow, cudaStream_t stream) final;
void set_search_param(const AnnSearchParam& param) override;
void set_search_dataset(const T* dataset, size_t nrow) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Host;
property.query_memory_type = MemoryType::Device;
return property;
}
void save(const std::string& file) const override;
void load(const std::string&) override;
private:
raft::device_resources handle_;
cudaEvent_t sync_{nullptr};
BuildParam index_params_;
cuvs::neighbors::ivf_pq::search_params search_params_;
std::optional<cuvs::neighbors::ivf_pq::index<IdxT>> index_;
int device_;
int dimension_;
float refine_ratio_ = 1.0;
raft::device_matrix_view<const T, IdxT> dataset_;
void stream_wait(cudaStream_t stream) const
{
RAFT_CUDA_TRY(cudaEventRecord(sync_, resource::get_cuda_stream(handle_)));
RAFT_CUDA_TRY(cudaStreamWaitEvent(stream, sync_));
}
};
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::save(const std::string& file) const
{
raft::runtime::neighbors::ivf_pq::serialize(handle_, file, *index_);
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::load(const std::string& file)
{
auto index_tmp = cuvs::neighbors::ivf_pq::index<IdxT>(handle_, index_params_, dimension_);
raft::runtime::neighbors::ivf_pq::deserialize(handle_, file, &index_tmp);
index_.emplace(std::move(index_tmp));
return;
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::build(const T* dataset, size_t nrow, cudaStream_t stream)
{
auto dataset_v = raft::make_device_matrix_view<const T, IdxT>(dataset, IdxT(nrow), dim_);
index_.emplace(raft::runtime::neighbors::ivf_pq::build(handle_, index_params_, dataset_v));
stream_wait(stream);
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::set_search_param(const AnnSearchParam& param)
{
auto search_param = dynamic_cast<const SearchParam&>(param);
search_params_ = search_param.pq_param;
refine_ratio_ = search_param.refine_ratio;
assert(search_params_.n_probes <= index_params_.n_lists);
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::set_search_dataset(const T* dataset, size_t nrow)
{
dataset_ = raft::make_device_matrix_view<const T, IdxT>(dataset, nrow, index_->dim());
}
template <typename T, typename IdxT>
void RaftIvfPQ<T, IdxT>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
if (refine_ratio_ > 1.0f) {
uint32_t k0 = static_cast<uint32_t>(refine_ratio_ * k);
auto queries_v =
raft::make_device_matrix_view<const T, IdxT>(queries, batch_size, index_->dim());
auto distances_tmp = raft::make_device_matrix<float, IdxT>(handle_, batch_size, k0);
auto candidates = raft::make_device_matrix<IdxT, IdxT>(handle_, batch_size, k0);
raft::runtime::neighbors::ivf_pq::search(
handle_, search_params_, *index_, queries_v, candidates.view(), distances_tmp.view());
if (raft::get_device_for_address(dataset_.data_handle()) >= 0) {
auto queries_v =
raft::make_device_matrix_view<const T, IdxT>(queries, batch_size, index_->dim());
auto neighbors_v = raft::make_device_matrix_view<IdxT, IdxT>((IdxT*)neighbors, batch_size, k);
auto distances_v = raft::make_device_matrix_view<float, IdxT>(distances, batch_size, k);
raft::runtime::neighbors::refine(handle_,
dataset_,
queries_v,
candidates.view(),
neighbors_v,
distances_v,
index_->metric());
stream_wait(stream); // RAFT stream -> bench stream
} else {
auto queries_host = raft::make_host_matrix<T, IdxT>(batch_size, index_->dim());
auto candidates_host = raft::make_host_matrix<IdxT, IdxT>(batch_size, k0);
auto neighbors_host = raft::make_host_matrix<IdxT, IdxT>(batch_size, k);
auto distances_host = raft::make_host_matrix<float, IdxT>(batch_size, k);
raft::copy(queries_host.data_handle(), queries, queries_host.size(), stream);
raft::copy(candidates_host.data_handle(),
candidates.data_handle(),
candidates_host.size(),
resource::get_cuda_stream(handle_));
auto dataset_v = raft::make_host_matrix_view<const T, IdxT>(
dataset_.data_handle(), dataset_.extent(0), dataset_.extent(1));
// wait for the queries to copy to host in 'stream` and for IVF-PQ::search to finish
RAFT_CUDA_TRY(cudaEventRecord(sync_, resource::get_cuda_stream(handle_)));
RAFT_CUDA_TRY(cudaEventRecord(sync_, stream));
RAFT_CUDA_TRY(cudaEventSynchronize(sync_));
raft::runtime::neighbors::refine(handle_,
dataset_v,
queries_host.view(),
candidates_host.view(),
neighbors_host.view(),
distances_host.view(),
index_->metric());
raft::copy(neighbors, (size_t*)neighbors_host.data_handle(), neighbors_host.size(), stream);
raft::copy(distances, distances_host.data_handle(), distances_host.size(), stream);
}
} else {
auto queries_v =
raft::make_device_matrix_view<const T, IdxT>(queries, batch_size, index_->dim());
auto neighbors_v = raft::make_device_matrix_view<IdxT, IdxT>((IdxT*)neighbors, batch_size, k);
auto distances_v = raft::make_device_matrix_view<float, IdxT>(distances, batch_size, k);
raft::runtime::neighbors::ivf_pq::search(
handle_, search_params_, *index_, queries_v, neighbors_v, distances_v);
stream_wait(stream); // RAFT stream -> bench stream
}
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_cagra_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/neighbors/cagra.cuh>
#include <cuvs/neighbors/cagra_serialize.cuh>
#include <cuvs/neighbors/cagra_types.hpp>
#include <cuvs/neighbors/detail/cagra/cagra_build.cuh>
#include <cuvs/neighbors/ivf_pq_types.hpp>
#include <cuvs/neighbors/nn_descent_types.hpp>
#include <fstream>
#include <iostream>
#include <memory>
#include <optional>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/operators.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "../common/ann_types.hpp"
#include "raft_ann_bench_utils.h"
#include <raft/util/cudart_utils.hpp>
#include "../common/cuda_huge_page_resource.hpp"
#include "../common/cuda_pinned_resource.hpp"
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
namespace cuvs::bench {
enum class AllocatorType { HostPinned, HostHugePage, Device };
template <typename T, typename IdxT>
class RaftCagra : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
cuvs::neighbors::experimental::cagra::search_params p;
AllocatorType graph_mem = AllocatorType::Device;
AllocatorType dataset_mem = AllocatorType::Device;
auto needs_dataset() const -> bool override { return true; }
};
struct BuildParam {
cuvs::neighbors::cagra::index_params cagra_params;
std::optional<cuvs::neighbors::experimental::nn_descent::index_params> nn_descent_params =
std::nullopt;
std::optional<float> ivf_pq_refine_rate = std::nullopt;
std::optional<cuvs::neighbors::ivf_pq::index_params> ivf_pq_build_params = std::nullopt;
std::optional<cuvs::neighbors::ivf_pq::search_params> ivf_pq_search_params = std::nullopt;
};
RaftCagra(Metric metric, int dim, const BuildParam& param, int concurrent_searches = 1)
: ANN<T>(metric, dim),
index_params_(param),
dimension_(dim),
handle_(cudaStreamPerThread),
need_dataset_update_(true),
dataset_(make_device_matrix<T, int64_t>(handle_, 0, 0)),
graph_(make_device_matrix<IdxT, int64_t>(handle_, 0, 0)),
input_dataset_v_(nullptr, 0, 0),
graph_mem_(AllocatorType::Device),
dataset_mem_(AllocatorType::Device)
{
index_params_.cagra_params.metric = parse_metric_type(metric);
index_params_.ivf_pq_build_params->metric = parse_metric_type(metric);
RAFT_CUDA_TRY(cudaGetDevice(&device_));
}
~RaftCagra() noexcept {}
void build(const T* dataset, size_t nrow, cudaStream_t stream) final;
void set_search_param(const AnnSearchParam& param) override;
void set_search_dataset(const T* dataset, size_t nrow) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::HostMmap;
property.query_memory_type = MemoryType::Device;
return property;
}
void save(const std::string& file) const override;
void load(const std::string&) override;
void save_to_hnswlib(const std::string& file) const;
private:
inline rmm::mr::device_memory_resource* get_mr(AllocatorType mem_type)
{
switch (mem_type) {
case (AllocatorType::HostPinned): return &mr_pinned_;
case (AllocatorType::HostHugePage): return &mr_huge_page_;
default: return rmm::mr::get_current_device_resource();
}
}
raft ::mr::cuda_pinned_resource mr_pinned_;
raft ::mr::cuda_huge_page_resource mr_huge_page_;
raft::device_resources handle_;
AllocatorType graph_mem_;
AllocatorType dataset_mem_;
BuildParam index_params_;
bool need_dataset_update_;
cuvs::neighbors::cagra::search_params search_params_;
std::optional<cuvs::neighbors::cagra::index<T, IdxT>> index_;
int device_;
int dimension_;
raft::device_matrix<IdxT, int64_t, row_major> graph_;
raft::device_matrix<T, int64_t, row_major> dataset_;
raft::device_matrix_view<const T, int64_t, row_major> input_dataset_v_;
};
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::build(const T* dataset, size_t nrow, cudaStream_t)
{
auto dataset_view =
raft::make_host_matrix_view<const T, int64_t>(dataset, IdxT(nrow), dimension_);
auto& params = index_params_.cagra_params;
index_.emplace(cuvs::neighbors::cagra::detail::build(handle_,
params,
dataset_view,
index_params_.nn_descent_params,
index_params_.ivf_pq_refine_rate,
index_params_.ivf_pq_build_params,
index_params_.ivf_pq_search_params));
return;
}
inline std::string allocator_to_string(AllocatorType mem_type)
{
if (mem_type == AllocatorType::Device) {
return "device";
} else if (mem_type == AllocatorType::HostPinned) {
return "host_pinned";
} else if (mem_type == AllocatorType::HostHugePage) {
return "host_huge_page";
}
return "<invalid allocator type>";
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::set_search_param(const AnnSearchParam& param)
{
auto search_param = dynamic_cast<const SearchParam&>(param);
search_params_ = search_param.p;
if (search_param.graph_mem != graph_mem_) {
// Move graph to correct memory space
graph_mem_ = search_param.graph_mem;
RAFT_LOG_INFO("moving graph to new memory space: %s", allocator_to_string(graph_mem_).c_str());
// We create a new graph and copy to it from existing graph
auto mr = get_mr(graph_mem_);
auto new_graph = make_device_mdarray<IdxT, int64_t>(
handle_, mr, make_extents<int64_t>(index_->graph().extent(0), index_->graph_degree()));
raft::copy(new_graph.data_handle(),
index_->graph().data_handle(),
index_->graph().size(),
resource::get_cuda_stream(handle_));
index_->update_graph(handle_, make_const_mdspan(new_graph.view()));
// update_graph() only stores a view in the index. We need to keep the graph object alive.
graph_ = std::move(new_graph);
}
if (search_param.dataset_mem != dataset_mem_ || need_dataset_update_) {
dataset_mem_ = search_param.dataset_mem;
// First free up existing memory
dataset_ = make_device_matrix<T, int64_t>(handle_, 0, 0);
index_->update_dataset(handle_, make_const_mdspan(dataset_.view()));
// Allocate space using the correct memory resource.
RAFT_LOG_INFO("moving dataset to new memory space: %s",
allocator_to_string(dataset_mem_).c_str());
auto mr = get_mr(dataset_mem_);
cuvs::neighbors::cagra::detail::copy_with_padding(handle_, dataset_, input_dataset_v_, mr);
index_->update_dataset(handle_, make_const_mdspan(dataset_.view()));
// Ideally, instead of dataset_.view(), we should pass a strided matrix view to update.
// See Issue https://github.com/rapidsai/raft/issues/1972 for details.
// auto dataset_view = make_device_strided_matrix_view<const T, int64_t>(
// dataset_.data_handle(), dataset_.extent(0), this->dim_, dataset_.extent(1));
// index_->update_dataset(handle_, dataset_view);
need_dataset_update_ = false;
}
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::set_search_dataset(const T* dataset, size_t nrow)
{
// It can happen that we are re-using a previous algo object which already has
// the dataset set. Check if we need update.
if (static_cast<size_t>(input_dataset_v_.extent(0)) != nrow ||
input_dataset_v_.data_handle() != dataset) {
input_dataset_v_ = make_device_matrix_view<const T, int64_t>(dataset, nrow, this->dim_);
need_dataset_update_ = true;
}
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::save(const std::string& file) const
{
cuvs::neighbors::cagra::serialize<T, IdxT>(handle_, file, *index_);
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::save_to_hnswlib(const std::string& file) const
{
cuvs::neighbors::cagra::serialize_to_hnswlib<T, IdxT>(handle_, file, *index_);
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::load(const std::string& file)
{
index_ = cuvs::neighbors::cagra::deserialize<T, IdxT>(handle_, file);
}
template <typename T, typename IdxT>
void RaftCagra<T, IdxT>::search(
const T* queries, int batch_size, int k, size_t* neighbors, float* distances, cudaStream_t) const
{
IdxT* neighbors_IdxT;
rmm::device_uvector<IdxT> neighbors_storage(0, resource::get_cuda_stream(handle_));
if constexpr (std::is_same<IdxT, size_t>::value) {
neighbors_IdxT = neighbors;
} else {
neighbors_storage.resize(batch_size * k, resource::get_cuda_stream(handle_));
neighbors_IdxT = neighbors_storage.data();
}
auto queries_view =
raft::make_device_matrix_view<const T, int64_t>(queries, batch_size, dimension_);
auto neighbors_view = raft::make_device_matrix_view<IdxT, int64_t>(neighbors_IdxT, batch_size, k);
auto distances_view = raft::make_device_matrix_view<float, int64_t>(distances, batch_size, k);
cuvs::neighbors::cagra::search(
handle_, search_params_, *index_, queries_view, neighbors_view, distances_view);
if (!std::is_same<IdxT, size_t>::value) {
raft::linalg::unaryOp(neighbors,
neighbors_IdxT,
batch_size * k,
raft::cast_op<size_t>(),
raft::resource::get_cuda_stream(handle_));
}
handle_.sync_stream();
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/raft/raft_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cassert>
#include <cuvs/distance/detail/distance.cuh>
#include <cuvs/distance/distance_types.hpp>
#include <cuvs/spatial/knn/detail/fused_l2_knn.cuh>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include "../common/ann_types.hpp"
namespace raft_temp {
inline cuvs::distance::DistanceType parse_metric_type(cuvs::bench::Metric metric)
{
if (metric == cuvs::bench::Metric::kInnerProduct) {
return cuvs::distance::DistanceType::InnerProduct;
} else if (metric == cuvs::bench::Metric::kEuclidean) {
return cuvs::distance::DistanceType::L2Expanded;
} else {
throw std::runtime_error("raft supports only metric type of inner product and L2");
}
}
} // namespace raft_temp
namespace cuvs::bench {
// brute force fused L2 KNN - RAFT
template <typename T>
class RaftGpu : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
RaftGpu(Metric metric, int dim);
void build(const T*, size_t, cudaStream_t) final;
void set_search_param(const AnnSearchParam& param) override;
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const final;
// to enable dataset access from GPU memory
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Device;
property.query_memory_type = MemoryType::Device;
return property;
}
void set_search_dataset(const T* dataset, size_t nrow) override;
void save(const std::string& file) const override;
void load(const std::string&) override { return; };
protected:
cuvs::distance::DistanceType metric_type_;
int device_;
const T* dataset_;
size_t nrow_;
};
template <typename T>
RaftGpu<T>::RaftGpu(Metric metric, int dim)
: ANN<T>(metric, dim), metric_type_(raft_temp::parse_metric_type(metric))
{
static_assert(std::is_same_v<T, float>, "raft support only float type");
assert(metric_type_ == cuvs::distance::DistanceType::L2Expanded);
RAFT_CUDA_TRY(cudaGetDevice(&device_));
}
template <typename T>
void RaftGpu<T>::build(const T*, size_t, cudaStream_t)
{
// as this is brute force algo so no index building required
return;
}
template <typename T>
void RaftGpu<T>::set_search_param(const AnnSearchParam&)
{
// Nothing to set here as it is brute force implementation
}
template <typename T>
void RaftGpu<T>::set_search_dataset(const T* dataset, size_t nrow)
{
dataset_ = dataset;
nrow_ = nrow;
}
template <typename T>
void RaftGpu<T>::save(const std::string& file) const
{
// create a empty index file as no index to store.
std::fstream fp;
fp.open(file.c_str(), std::ios::out);
if (!fp) {
printf("Error in creating file!!!\n");
;
return;
}
fp.close();
}
template <typename T>
void RaftGpu<T>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
// TODO: Integrate new `raft::brute_force::index` (from
// https://github.com/rapidsai/raft/pull/1817)
cuvs::spatial::knn::detail::fusedL2Knn(this->dim_,
reinterpret_cast<int64_t*>(neighbors),
distances,
dataset_,
queries,
nrow_,
static_cast<size_t>(batch_size),
k,
true,
true,
stream,
metric_type_);
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/hnswlib/hnswlib_benchmark.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../common/ann_types.hpp"
#include <algorithm>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include "hnswlib_wrapper.h"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace cuvs::bench {
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::HnswLib<T>::BuildParam& param)
{
param.ef_construction = conf.at("efConstruction");
param.M = conf.at("M");
if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); }
}
template <typename T>
void parse_search_param(const nlohmann::json& conf,
typename cuvs::bench::HnswLib<T>::SearchParam& param)
{
param.ef = conf.at("ef");
if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); }
}
template <typename T, template <typename> class Algo>
std::unique_ptr<cuvs::bench::ANN<T>> make_algo(cuvs::bench::Metric metric,
int dim,
const nlohmann::json& conf)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T, template <typename> class Algo>
std::unique_ptr<cuvs::bench::ANN<T>> make_algo(cuvs::bench::Metric metric,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
(void)dev_list;
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T>
std::unique_ptr<cuvs::bench::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
cuvs::bench::Metric metric = parse_metric(distance);
std::unique_ptr<cuvs::bench::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {
if (algo == "hnswlib") { ann = make_algo<T, cuvs::bench::HnswLib>(metric, dim, conf); }
}
if constexpr (std::is_same_v<T, uint8_t>) {
if (algo == "hnswlib") { ann = make_algo<T, cuvs::bench::HnswLib>(metric, dim, conf); }
}
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename cuvs::bench::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "hnswlib") {
auto param = std::make_unique<typename cuvs::bench::HnswLib<T>::SearchParam>();
parse_search_param<T>(conf, *param);
return param;
}
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
}; // namespace cuvs::bench
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef CUVS_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv) { return cuvs::bench::run_main(argc, argv); }
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/hnswlib/hnswlib_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cmath>
#include <condition_variable>
#include <cstdio>
#include <ctime>
#include <future>
#include <memory>
#include <mutex>
#include <numeric>
#include <stdexcept>
#include <thread>
#include <utility>
#include <vector>
#include "../common/ann_types.hpp"
#include "../common/thread_pool.hpp"
#include <hnswlib.h>
namespace cuvs::bench {
template <typename T>
struct hnsw_dist_t {
using type = void;
};
template <>
struct hnsw_dist_t<float> {
using type = float;
};
template <>
struct hnsw_dist_t<uint8_t> {
using type = int;
};
template <typename T>
class HnswLib : public ANN<T> {
public:
// https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
struct BuildParam {
int M;
int ef_construction;
int num_threads = omp_get_num_procs();
};
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
int ef;
int num_threads = 1;
};
HnswLib(Metric metric, int dim, const BuildParam& param);
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) override;
void set_search_param(const AnnSearchParam& param) override;
void search(const T* query,
int batch_size,
int k,
size_t* indices,
float* distances,
cudaStream_t stream = 0) const override;
void save(const std::string& path_to_index) const override;
void load(const std::string& path_to_index) override;
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Host;
property.query_memory_type = MemoryType::Host;
return property;
}
void set_base_layer_only() { appr_alg_->base_layer_only = true; }
private:
void get_search_knn_results_(const T* query, int k, size_t* indices, float* distances) const;
std::unique_ptr<hnswlib::HierarchicalNSW<typename hnsw_dist_t<T>::type>> appr_alg_;
std::unique_ptr<hnswlib::SpaceInterface<typename hnsw_dist_t<T>::type>> space_;
using ANN<T>::metric_;
using ANN<T>::dim_;
int ef_construction_;
int m_;
int num_threads_;
std::unique_ptr<FixedThreadPool> thread_pool_;
Objective metric_objective_;
};
template <typename T>
HnswLib<T>::HnswLib(Metric metric, int dim, const BuildParam& param) : ANN<T>(metric, dim)
{
assert(dim_ > 0);
static_assert(std::is_same_v<T, float> || std::is_same_v<T, uint8_t>);
if constexpr (std::is_same_v<T, uint8_t>) {
if (metric_ != Metric::kEuclidean) {
throw std::runtime_error("hnswlib<uint8_t> only supports Euclidean distance");
}
}
ef_construction_ = param.ef_construction;
m_ = param.M;
num_threads_ = param.num_threads;
}
template <typename T>
void HnswLib<T>::build(const T* dataset, size_t nrow, cudaStream_t)
{
if constexpr (std::is_same_v<T, float>) {
if (metric_ == Metric::kInnerProduct) {
space_ = std::make_unique<hnswlib::InnerProductSpace>(dim_);
} else {
space_ = std::make_unique<hnswlib::L2Space>(dim_);
}
} else if constexpr (std::is_same_v<T, uint8_t>) {
space_ = std::make_unique<hnswlib::L2SpaceI>(dim_);
}
appr_alg_ = std::make_unique<hnswlib::HierarchicalNSW<typename hnsw_dist_t<T>::type>>(
space_.get(), nrow, m_, ef_construction_);
thread_pool_ = std::make_unique<FixedThreadPool>(num_threads_);
const size_t items_per_thread = nrow / (num_threads_ + 1);
thread_pool_->submit(
[&](size_t i) {
if (i < items_per_thread && i % 10000 == 0) {
char buf[20];
std::time_t now = std::time(nullptr);
std::strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", std::localtime(&now));
printf("%s building %zu / %zu\n", buf, i, items_per_thread);
fflush(stdout);
}
appr_alg_->addPoint(dataset + i * dim_, i);
},
nrow);
}
template <typename T>
void HnswLib<T>::set_search_param(const AnnSearchParam& param_)
{
auto param = dynamic_cast<const SearchParam&>(param_);
appr_alg_->ef_ = param.ef;
metric_objective_ = param.metric_objective;
num_threads_ = param.num_threads;
// Create a pool if multiple query threads have been set and the pool hasn't been created already
bool create_pool = (metric_objective_ == Objective::LATENCY && num_threads_ > 1 && !thread_pool_);
if (create_pool) { thread_pool_ = std::make_unique<FixedThreadPool>(num_threads_); }
}
template <typename T>
void HnswLib<T>::search(
const T* query, int batch_size, int k, size_t* indices, float* distances, cudaStream_t) const
{
auto f = [&](int i) {
// hnsw can only handle a single vector at a time.
get_search_knn_results_(query + i * dim_, k, indices + i * k, distances + i * k);
};
if (metric_objective_ == Objective::LATENCY && num_threads_ > 1) {
thread_pool_->submit(f, batch_size);
} else {
for (int i = 0; i < batch_size; i++) {
f(i);
}
}
}
template <typename T>
void HnswLib<T>::save(const std::string& path_to_index) const
{
appr_alg_->saveIndex(std::string(path_to_index));
}
template <typename T>
void HnswLib<T>::load(const std::string& path_to_index)
{
if constexpr (std::is_same_v<T, float>) {
if (metric_ == Metric::kInnerProduct) {
space_ = std::make_unique<hnswlib::InnerProductSpace>(dim_);
} else {
space_ = std::make_unique<hnswlib::L2Space>(dim_);
}
} else if constexpr (std::is_same_v<T, uint8_t>) {
space_ = std::make_unique<hnswlib::L2SpaceI>(dim_);
}
appr_alg_ = std::make_unique<hnswlib::HierarchicalNSW<typename hnsw_dist_t<T>::type>>(
space_.get(), path_to_index);
}
template <typename T>
void HnswLib<T>::get_search_knn_results_(const T* query,
int k,
size_t* indices,
float* distances) const
{
auto result = appr_alg_->searchKnn(query, k);
assert(result.size() >= static_cast<size_t>(k));
for (int i = k - 1; i >= 0; --i) {
indices[i] = result.top().second;
distances[i] = result.top().first;
result.pop();
}
}
}; // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/ggnn/ggnn_benchmark.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include "../common/ann_types.hpp"
#include "ggnn_wrapper.cuh"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace cuvs::bench {
template <typename T>
void parse_build_param(const nlohmann::json& conf, typename cuvs::bench::Ggnn<T>::BuildParam& param)
{
param.k = conf.at("k");
if (conf.contains("k_build")) { param.k_build = conf.at("k_build"); }
if (conf.contains("segment_size")) { param.segment_size = conf.at("segment_size"); }
if (conf.contains("num_layers")) { param.num_layers = conf.at("num_layers"); }
if (conf.contains("tau")) { param.tau = conf.at("tau"); }
if (conf.contains("refine_iterations")) {
param.refine_iterations = conf.at("refine_iterations");
}
}
template <typename T>
void parse_search_param(const nlohmann::json& conf,
typename cuvs::bench::Ggnn<T>::SearchParam& param)
{
param.tau = conf.at("tau");
if (conf.contains("block_dim")) { param.block_dim = conf.at("block_dim"); }
if (conf.contains("max_iterations")) { param.max_iterations = conf.at("max_iterations"); }
if (conf.contains("cache_size")) { param.cache_size = conf.at("cache_size"); }
if (conf.contains("sorted_size")) { param.sorted_size = conf.at("sorted_size"); }
}
template <typename T, template <typename> class Algo>
std::unique_ptr<cuvs::bench::ANN<T>> make_algo(cuvs::bench::Metric metric,
int dim,
const nlohmann::json& conf)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T, template <typename> class Algo>
std::unique_ptr<cuvs::bench::ANN<T>> make_algo(cuvs::bench::Metric metric,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
(void)dev_list;
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T>
std::unique_ptr<cuvs::bench::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
cuvs::bench::Metric metric = parse_metric(distance);
std::unique_ptr<cuvs::bench::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {}
if constexpr (std::is_same_v<T, uint8_t>) {}
if (algo == "ggnn") { ann = make_algo<T, cuvs::bench::Ggnn>(metric, dim, conf); }
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename cuvs::bench::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "ggnn") {
auto param = std::make_unique<typename cuvs::bench::Ggnn<T>::SearchParam>();
parse_search_param<T>(conf, *param);
return param;
}
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
} // namespace cuvs::bench
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef CUVS_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv) { return cuvs::bench::run_main(argc, argv); }
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/ggnn/ggnn_wrapper.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../common/ann_types.hpp"
#include <ggnn/cuda_knn_ggnn_gpu_instance.cuh>
#include <raft/util/cudart_utils.hpp>
#include <memory>
#include <stdexcept>
namespace cuvs::bench {
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
class GgnnImpl;
template <typename T>
class Ggnn : public ANN<T> {
public:
struct BuildParam {
int k_build{24}; // KBuild
int segment_size{32}; // S
int num_layers{4}; // L
float tau{0.5};
int refine_iterations{2};
int k; // GGNN requires to know k during building
};
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
float tau;
int block_dim{32};
int max_iterations{400};
int cache_size{512};
int sorted_size{256};
auto needs_dataset() const -> bool override { return true; }
};
Ggnn(Metric metric, int dim, const BuildParam& param);
~Ggnn() { delete impl_; }
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) override
{
impl_->build(dataset, nrow, stream);
}
void set_search_param(const AnnSearchParam& param) override { impl_->set_search_param(param); }
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override
{
impl_->search(queries, batch_size, k, neighbors, distances, stream);
}
void save(const std::string& file) const override { impl_->save(file); }
void load(const std::string& file) override { impl_->load(file); }
AlgoProperty get_preference() const override { return impl_->get_preference(); }
void set_search_dataset(const T* dataset, size_t nrow) override
{
impl_->set_search_dataset(dataset, nrow);
};
private:
ANN<T>* impl_;
};
template <typename T>
Ggnn<T>::Ggnn(Metric metric, int dim, const BuildParam& param) : ANN<T>(metric, dim)
{
// ggnn/src/sift1m.cu
if (metric == Metric::kEuclidean && dim == 128 && param.k_build == 24 && param.k == 10 &&
param.segment_size == 32) {
impl_ = new GgnnImpl<T, Euclidean, 128, 24, 10, 32>(metric, dim, param);
}
// ggnn/src/deep1b_multi_gpu.cu, and adapt it deep1B
else if (metric == Metric::kEuclidean && dim == 96 && param.k_build == 24 && param.k == 10 &&
param.segment_size == 32) {
impl_ = new GgnnImpl<T, Euclidean, 96, 24, 10, 32>(metric, dim, param);
} else if (metric == Metric::kInnerProduct && dim == 96 && param.k_build == 24 && param.k == 10 &&
param.segment_size == 32) {
impl_ = new GgnnImpl<T, Cosine, 96, 24, 10, 32>(metric, dim, param);
} else if (metric == Metric::kInnerProduct && dim == 96 && param.k_build == 96 && param.k == 10 &&
param.segment_size == 64) {
impl_ = new GgnnImpl<T, Cosine, 96, 96, 10, 64>(metric, dim, param);
}
// ggnn/src/glove200.cu, adapt it to glove100
else if (metric == Metric::kInnerProduct && dim == 100 && param.k_build == 96 && param.k == 10 &&
param.segment_size == 64) {
impl_ = new GgnnImpl<T, Cosine, 100, 96, 10, 64>(metric, dim, param);
} else {
throw std::runtime_error(
"ggnn: not supported combination of metric, dim and build param; "
"see Ggnn's constructor in ggnn_wrapper.cuh for available combinations");
}
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
class GgnnImpl : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
GgnnImpl(Metric metric, int dim, const typename Ggnn<T>::BuildParam& param);
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) override;
void set_search_param(const AnnSearchParam& param) override;
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const override;
void save(const std::string& file) const override;
void load(const std::string& file) override;
AlgoProperty get_preference() const override
{
AlgoProperty property;
property.dataset_memory_type = MemoryType::Device;
property.query_memory_type = MemoryType::Device;
return property;
}
void set_search_dataset(const T* dataset, size_t nrow) override;
private:
using ANN<T>::metric_;
using ANN<T>::dim_;
using GGNNGPUInstance = GGNNGPUInstance<measure,
int64_t /* KeyT */,
float /* ValueT */,
size_t /* GAddrT */,
T /* BaseT */,
size_t /* BAddrT */,
D,
KBuild,
KBuild / 2 /* KF */,
KQuery,
S>;
std::unique_ptr<GGNNGPUInstance> ggnn_;
typename Ggnn<T>::BuildParam build_param_;
typename Ggnn<T>::SearchParam search_param_;
};
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
GgnnImpl<T, measure, D, KBuild, KQuery, S>::GgnnImpl(Metric metric,
int dim,
const typename Ggnn<T>::BuildParam& param)
: ANN<T>(metric, dim), build_param_(param)
{
if (metric_ == Metric::kInnerProduct) {
if (measure != Cosine) { throw std::runtime_error("mis-matched metric"); }
} else if (metric_ == Metric::kEuclidean) {
if (measure != Euclidean) { throw std::runtime_error("mis-matched metric"); }
} else {
throw std::runtime_error(
"ggnn supports only metric type of InnerProduct, Cosine and Euclidean");
}
if (dim != D) { throw std::runtime_error("mis-matched dim"); }
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::build(const T* dataset,
size_t nrow,
cudaStream_t stream)
{
int device;
RAFT_CUDA_TRY(cudaGetDevice(&device));
ggnn_ = std::make_unique<GGNNGPUInstance>(
device, nrow, build_param_.num_layers, true, build_param_.tau);
ggnn_->set_base_data(dataset);
ggnn_->set_stream(stream);
ggnn_->build(0);
for (int i = 0; i < build_param_.refine_iterations; ++i) {
ggnn_->refine();
}
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::set_search_dataset(const T* dataset, size_t nrow)
{
ggnn_->set_base_data(dataset);
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::set_search_param(const AnnSearchParam& param)
{
search_param_ = dynamic_cast<const typename Ggnn<T>::SearchParam&>(param);
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
static_assert(sizeof(size_t) == sizeof(int64_t), "sizes of size_t and GGNN's KeyT are different");
if (k != KQuery) {
throw std::runtime_error(
"k = " + std::to_string(k) +
", but this GGNN instance only supports k = " + std::to_string(KQuery));
}
ggnn_->set_stream(stream);
RAFT_CUDA_TRY(cudaMemcpyToSymbol(c_tau_query, &search_param_.tau, sizeof(float)));
const int block_dim = search_param_.block_dim;
const int max_iterations = search_param_.max_iterations;
const int cache_size = search_param_.cache_size;
const int sorted_size = search_param_.sorted_size;
// default value
if (block_dim == 32 && max_iterations == 400 && cache_size == 512 && sorted_size == 256) {
ggnn_->template queryLayer<32, 400, 512, 256, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
}
// ggnn/src/sift1m.cu
else if (block_dim == 32 && max_iterations == 200 && cache_size == 256 && sorted_size == 64) {
ggnn_->template queryLayer<32, 200, 256, 64, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
}
// ggnn/src/sift1m.cu
else if (block_dim == 32 && max_iterations == 400 && cache_size == 448 && sorted_size == 64) {
ggnn_->template queryLayer<32, 400, 448, 64, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
}
// ggnn/src/glove200.cu
else if (block_dim == 128 && max_iterations == 2000 && cache_size == 2048 && sorted_size == 32) {
ggnn_->template queryLayer<128, 2000, 2048, 32, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
}
// for glove100
else if (block_dim == 64 && max_iterations == 400 && cache_size == 512 && sorted_size == 32) {
ggnn_->template queryLayer<64, 400, 512, 32, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
} else if (block_dim == 128 && max_iterations == 2000 && cache_size == 1024 &&
sorted_size == 32) {
ggnn_->template queryLayer<128, 2000, 1024, 32, false>(
queries, batch_size, reinterpret_cast<int64_t*>(neighbors), distances);
} else {
throw std::runtime_error("ggnn: not supported search param");
}
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::save(const std::string& file) const
{
auto& ggnn_host = ggnn_->ggnn_cpu_buffers.at(0);
auto& ggnn_device = ggnn_->ggnn_shards.at(0);
ggnn_->set_stream(0);
ggnn_host.downloadAsync(ggnn_device);
RAFT_CUDA_TRY(cudaStreamSynchronize(ggnn_device.stream));
ggnn_host.store(file);
}
template <typename T, DistanceMeasure measure, int D, int KBuild, int KQuery, int S>
void GgnnImpl<T, measure, D, KBuild, KQuery, S>::load(const std::string& file)
{
auto& ggnn_host = ggnn_->ggnn_cpu_buffers.at(0);
auto& ggnn_device = ggnn_->ggnn_shards.at(0);
ggnn_->set_stream(0);
ggnn_host.load(file);
ggnn_host.uploadAsync(ggnn_device);
RAFT_CUDA_TRY(cudaStreamSynchronize(ggnn_device.stream));
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/faiss/faiss_gpu_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FAISS_WRAPPER_H_
#define FAISS_WRAPPER_H_
#include "../common/ann_types.hpp"
#include <raft/core/logger.hpp>
#include <raft/util/cudart_utils.hpp>
#include <faiss/IndexFlat.h>
#include <faiss/IndexIVFFlat.h>
#include <faiss/IndexIVFPQ.h>
#include <faiss/IndexRefine.h>
#include <faiss/IndexScalarQuantizer.h>
#include <faiss/gpu/GpuIndexFlat.h>
#include <faiss/gpu/GpuIndexIVFFlat.h>
#include <faiss/gpu/GpuIndexIVFPQ.h>
#include <faiss/gpu/GpuIndexIVFScalarQuantizer.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/impl/ScalarQuantizer.h>
#include <faiss/index_io.h>
#include <omp.h>
#include <raft/core/device_resources.hpp>
#include <raft/core/resource/stream_view.hpp>
#include <cassert>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
namespace {
faiss::MetricType parse_metric_type(cuvs::bench::Metric metric)
{
if (metric == cuvs::bench::Metric::kInnerProduct) {
return faiss::METRIC_INNER_PRODUCT;
} else if (metric == cuvs::bench::Metric::kEuclidean) {
return faiss::METRIC_L2;
} else {
throw std::runtime_error("faiss supports only metric type of inner product and L2");
}
}
// note BLAS library can still use multi-threading, and
// setting environment variable like OPENBLAS_NUM_THREADS can control it
class OmpSingleThreadScope {
public:
OmpSingleThreadScope()
{
max_threads_ = omp_get_max_threads();
omp_set_num_threads(1);
}
~OmpSingleThreadScope()
{
// the best we can do
omp_set_num_threads(max_threads_);
}
private:
int max_threads_;
};
} // namespace
namespace cuvs::bench {
template <typename T>
class FaissGpu : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
int nprobe;
float refine_ratio = 1.0;
auto needs_dataset() const -> bool override { return refine_ratio > 1.0f; }
};
struct BuildParam {
int nlist = 1;
int ratio = 2;
};
FaissGpu(Metric metric, int dim, const BuildParam& param)
: ANN<T>(metric, dim),
metric_type_(parse_metric_type(metric)),
nlist_{param.nlist},
training_sample_fraction_{1.0 / double(param.ratio)}
{
static_assert(std::is_same_v<T, float>, "faiss support only float type");
RAFT_CUDA_TRY(cudaGetDevice(&device_));
RAFT_CUDA_TRY(cudaEventCreate(&sync_, cudaEventDisableTiming));
faiss_default_stream_ = gpu_resource_.getDefaultStream(device_);
raft::resource::set_cuda_stream(handle_, faiss_default_stream_);
}
virtual ~FaissGpu() noexcept { RAFT_CUDA_TRY_NO_THROW(cudaEventDestroy(sync_)); }
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) final;
virtual void set_search_param(const FaissGpu<T>::AnnSearchParam& param) {}
void set_search_dataset(const T* dataset, size_t nrow) override { dataset_ = dataset; }
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const final;
AlgoProperty get_preference() const override
{
AlgoProperty property;
// to enable building big dataset which is larger than GPU memory
property.dataset_memory_type = MemoryType::Host;
property.query_memory_type = MemoryType::Host;
return property;
}
protected:
template <typename GpuIndex, typename CpuIndex>
void save_(const std::string& file) const;
template <typename GpuIndex, typename CpuIndex>
void load_(const std::string& file);
void stream_wait(cudaStream_t stream) const
{
RAFT_CUDA_TRY(cudaEventRecord(sync_, faiss_default_stream_));
RAFT_CUDA_TRY(cudaStreamWaitEvent(stream, sync_));
}
mutable faiss::gpu::StandardGpuResources gpu_resource_;
std::unique_ptr<faiss::gpu::GpuIndex> index_;
std::unique_ptr<faiss::IndexRefineFlat> index_refine_{nullptr};
faiss::MetricType metric_type_;
int nlist_;
int device_;
cudaEvent_t sync_{nullptr};
cudaStream_t faiss_default_stream_{nullptr};
double training_sample_fraction_;
std::unique_ptr<faiss::SearchParameters> search_params_;
const T* dataset_;
raft::device_resources handle_;
float refine_ratio_ = 1.0;
};
template <typename T>
void FaissGpu<T>::build(const T* dataset, size_t nrow, cudaStream_t stream)
{
OmpSingleThreadScope omp_single_thread;
auto index_ivf = dynamic_cast<faiss::gpu::GpuIndexIVF*>(index_.get());
if (index_ivf != nullptr) {
// set the min/max training size for clustering to use the whole provided training set.
double trainset_size = training_sample_fraction_ * static_cast<double>(nrow);
double points_per_centroid = trainset_size / static_cast<double>(nlist_);
int max_ppc = std::ceil(points_per_centroid);
int min_ppc = std::floor(points_per_centroid);
if (min_ppc < index_ivf->cp.min_points_per_centroid) {
RAFT_LOG_WARN(
"The suggested training set size %zu (data size %zu, training sample ratio %f) yields %d "
"points per cluster (n_lists = %d). This is smaller than the FAISS default "
"min_points_per_centroid = %d.",
static_cast<size_t>(trainset_size),
nrow,
training_sample_fraction_,
min_ppc,
nlist_,
index_ivf->cp.min_points_per_centroid);
}
index_ivf->cp.max_points_per_centroid = max_ppc;
index_ivf->cp.min_points_per_centroid = min_ppc;
}
index_->train(nrow, dataset); // faiss::gpu::GpuIndexFlat::train() will do nothing
assert(index_->is_trained);
index_->add(nrow, dataset);
stream_wait(stream);
}
template <typename T>
void FaissGpu<T>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
static_assert(sizeof(size_t) == sizeof(faiss::idx_t),
"sizes of size_t and faiss::idx_t are different");
if (this->refine_ratio_ > 1.0) {
// TODO: FAISS changed their search APIs to accept the search parameters as a struct object
// but their refine API doesn't allow the struct to be passed in. Once this is fixed, we
// need to re-enable refinement below
// index_refine_->search(batch_size, queries, k, distances,
// reinterpret_cast<faiss::idx_t*>(neighbors), this->search_params_.get()); Related FAISS issue:
// https://github.com/facebookresearch/faiss/issues/3118
throw std::runtime_error(
"FAISS doesn't support refinement in their new APIs so this feature is disabled in the "
"benchmarks for the time being.");
} else {
index_->search(batch_size,
queries,
k,
distances,
reinterpret_cast<faiss::idx_t*>(neighbors),
this->search_params_.get());
}
stream_wait(stream);
}
template <typename T>
template <typename GpuIndex, typename CpuIndex>
void FaissGpu<T>::save_(const std::string& file) const
{
OmpSingleThreadScope omp_single_thread;
auto cpu_index = std::make_unique<CpuIndex>();
dynamic_cast<GpuIndex*>(index_.get())->copyTo(cpu_index.get());
faiss::write_index(cpu_index.get(), file.c_str());
}
template <typename T>
template <typename GpuIndex, typename CpuIndex>
void FaissGpu<T>::load_(const std::string& file)
{
OmpSingleThreadScope omp_single_thread;
std::unique_ptr<CpuIndex> cpu_index(dynamic_cast<CpuIndex*>(faiss::read_index(file.c_str())));
assert(cpu_index);
try {
dynamic_cast<GpuIndex*>(index_.get())->copyFrom(cpu_index.get());
} catch (const std::exception& e) {
std::cout << "Error loading index file: " << std::string(e.what()) << std::endl;
}
}
template <typename T>
class FaissGpuIVFFlat : public FaissGpu<T> {
public:
using typename FaissGpu<T>::BuildParam;
FaissGpuIVFFlat(Metric metric, int dim, const BuildParam& param) : FaissGpu<T>(metric, dim, param)
{
faiss::gpu::GpuIndexIVFFlatConfig config;
config.device = this->device_;
this->index_ = std::make_unique<faiss::gpu::GpuIndexIVFFlat>(
&(this->gpu_resource_), dim, param.nlist, this->metric_type_, config);
}
void set_search_param(const typename FaissGpu<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissGpu<T>::SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
faiss::IVFSearchParameters faiss_search_params;
faiss_search_params.nprobe = nprobe;
this->search_params_ = std::make_unique<faiss::IVFSearchParameters>(faiss_search_params);
this->refine_ratio_ = search_param.refine_ratio;
}
void save(const std::string& file) const override
{
this->template save_<faiss::gpu::GpuIndexIVFFlat, faiss::IndexIVFFlat>(file);
}
void load(const std::string& file) override
{
this->template load_<faiss::gpu::GpuIndexIVFFlat, faiss::IndexIVFFlat>(file);
}
};
template <typename T>
class FaissGpuIVFPQ : public FaissGpu<T> {
public:
struct BuildParam : public FaissGpu<T>::BuildParam {
int M;
bool useFloat16;
bool usePrecomputed;
};
FaissGpuIVFPQ(Metric metric, int dim, const BuildParam& param) : FaissGpu<T>(metric, dim, param)
{
faiss::gpu::GpuIndexIVFPQConfig config;
config.useFloat16LookupTables = param.useFloat16;
config.usePrecomputedTables = param.usePrecomputed;
config.device = this->device_;
this->index_ =
std::make_unique<faiss::gpu::GpuIndexIVFPQ>(&(this->gpu_resource_),
dim,
param.nlist,
param.M,
8, // FAISS only supports bitsPerCode=8
this->metric_type_,
config);
}
void set_search_param(const typename FaissGpu<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissGpu<T>::SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
this->refine_ratio_ = search_param.refine_ratio;
faiss::IVFPQSearchParameters faiss_search_params;
faiss_search_params.nprobe = nprobe;
this->search_params_ = std::make_unique<faiss::IVFPQSearchParameters>(faiss_search_params);
if (search_param.refine_ratio > 1.0) {
this->index_refine_ =
std::make_unique<faiss::IndexRefineFlat>(this->index_.get(), this->dataset_);
this->index_refine_.get()->k_factor = search_param.refine_ratio;
}
}
void save(const std::string& file) const override
{
this->template save_<faiss::gpu::GpuIndexIVFPQ, faiss::IndexIVFPQ>(file);
}
void load(const std::string& file) override
{
this->template load_<faiss::gpu::GpuIndexIVFPQ, faiss::IndexIVFPQ>(file);
}
};
// TODO: Enable this in cmake
// ref: https://github.com/rapidsai/raft/issues/1876
template <typename T>
class FaissGpuIVFSQ : public FaissGpu<T> {
public:
struct BuildParam : public FaissGpu<T>::BuildParam {
std::string quantizer_type;
};
FaissGpuIVFSQ(Metric metric, int dim, const BuildParam& param) : FaissGpu<T>(metric, dim, param)
{
faiss::ScalarQuantizer::QuantizerType qtype;
if (param.quantizer_type == "fp16") {
qtype = faiss::ScalarQuantizer::QT_fp16;
} else if (param.quantizer_type == "int8") {
qtype = faiss::ScalarQuantizer::QT_8bit;
} else {
throw std::runtime_error("FaissGpuIVFSQ supports only fp16 and int8 but got " +
param.quantizer_type);
}
faiss::gpu::GpuIndexIVFScalarQuantizerConfig config;
config.device = this->device_;
this->index_ = std::make_unique<faiss::gpu::GpuIndexIVFScalarQuantizer>(
&(this->gpu_resource_), dim, param.nlist, qtype, this->metric_type_, true, config);
}
void set_search_param(const typename FaissGpu<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissGpu<T>::SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
faiss::IVFSearchParameters faiss_search_params;
faiss_search_params.nprobe = nprobe;
this->search_params_ = std::make_unique<faiss::IVFSearchParameters>(faiss_search_params);
this->refine_ratio_ = search_param.refine_ratio;
if (search_param.refine_ratio > 1.0) {
this->index_refine_ =
std::make_unique<faiss::IndexRefineFlat>(this->index_.get(), this->dataset_);
this->index_refine_.get()->k_factor = search_param.refine_ratio;
}
}
void save(const std::string& file) const override
{
this->template save_<faiss::gpu::GpuIndexIVFScalarQuantizer, faiss::IndexIVFScalarQuantizer>(
file);
}
void load(const std::string& file) override
{
this->template load_<faiss::gpu::GpuIndexIVFScalarQuantizer, faiss::IndexIVFScalarQuantizer>(
file);
}
};
template <typename T>
class FaissGpuFlat : public FaissGpu<T> {
public:
FaissGpuFlat(Metric metric, int dim)
: FaissGpu<T>(metric, dim, typename FaissGpu<T>::BuildParam{})
{
faiss::gpu::GpuIndexFlatConfig config;
config.device = this->device_;
this->index_ = std::make_unique<faiss::gpu::GpuIndexFlat>(
&(this->gpu_resource_), dim, this->metric_type_, config);
}
void set_search_param(const typename FaissGpu<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissGpu<T>::SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
this->search_params_ = std::make_unique<faiss::SearchParameters>();
}
void save(const std::string& file) const override
{
this->template save_<faiss::gpu::GpuIndexFlat, faiss::IndexFlat>(file);
}
void load(const std::string& file) override
{
this->template load_<faiss::gpu::GpuIndexFlat, faiss::IndexFlat>(file);
}
};
} // namespace cuvs::bench
#endif | 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/faiss/faiss_cpu_wrapper.h | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../common/ann_types.hpp"
#include "../common/thread_pool.hpp"
#include <raft/core/logger.hpp>
#include <faiss/IndexFlat.h>
#include <faiss/IndexIVFFlat.h>
#include <faiss/IndexIVFPQ.h>
#include <faiss/IndexRefine.h>
#include <faiss/IndexScalarQuantizer.h>
#include <faiss/index_io.h>
#include <cassert>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
namespace {
faiss::MetricType parse_metric_type(cuvs::bench::Metric metric)
{
if (metric == cuvs::bench::Metric::kInnerProduct) {
return faiss::METRIC_INNER_PRODUCT;
} else if (metric == cuvs::bench::Metric::kEuclidean) {
return faiss::METRIC_L2;
} else {
throw std::runtime_error("faiss supports only metric type of inner product and L2");
}
}
} // namespace
namespace cuvs::bench {
template <typename T>
class FaissCpu : public ANN<T> {
public:
using typename ANN<T>::AnnSearchParam;
struct SearchParam : public AnnSearchParam {
int nprobe;
float refine_ratio = 1.0;
int num_threads = omp_get_num_procs();
};
struct BuildParam {
int nlist = 1;
int ratio = 2;
};
FaissCpu(Metric metric, int dim, const BuildParam& param)
: ANN<T>(metric, dim),
metric_type_(parse_metric_type(metric)),
nlist_{param.nlist},
training_sample_fraction_{1.0 / double(param.ratio)}
{
static_assert(std::is_same_v<T, float>, "faiss support only float type");
}
virtual ~FaissCpu() noexcept {}
void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) final;
void set_search_param(const AnnSearchParam& param) override;
void init_quantizer(int dim)
{
if (this->metric_type_ == faiss::MetricType::METRIC_L2) {
this->quantizer_ = std::make_unique<faiss::IndexFlatL2>(dim);
} else if (this->metric_type_ == faiss::MetricType::METRIC_INNER_PRODUCT) {
this->quantizer_ = std::make_unique<faiss::IndexFlatIP>(dim);
}
}
// TODO: if the number of results is less than k, the remaining elements of 'neighbors'
// will be filled with (size_t)-1
void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const final;
AlgoProperty get_preference() const override
{
AlgoProperty property;
// to enable building big dataset which is larger than memory
property.dataset_memory_type = MemoryType::Host;
property.query_memory_type = MemoryType::Host;
return property;
}
protected:
template <typename Index>
void save_(const std::string& file) const;
template <typename Index>
void load_(const std::string& file);
std::unique_ptr<faiss::Index> index_;
std::unique_ptr<faiss::Index> quantizer_;
std::unique_ptr<faiss::IndexRefineFlat> index_refine_;
faiss::MetricType metric_type_;
int nlist_;
double training_sample_fraction_;
int num_threads_;
std::unique_ptr<FixedThreadPool> thread_pool_;
};
template <typename T>
void FaissCpu<T>::build(const T* dataset, size_t nrow, cudaStream_t stream)
{
auto index_ivf = dynamic_cast<faiss::IndexIVF*>(index_.get());
if (index_ivf != nullptr) {
// set the min/max training size for clustering to use the whole provided training set.
double trainset_size = training_sample_fraction_ * static_cast<double>(nrow);
double points_per_centroid = trainset_size / static_cast<double>(nlist_);
int max_ppc = std::ceil(points_per_centroid);
int min_ppc = std::floor(points_per_centroid);
if (min_ppc < index_ivf->cp.min_points_per_centroid) {
RAFT_LOG_WARN(
"The suggested training set size %zu (data size %zu, training sample ratio %f) yields %d "
"points per cluster (n_lists = %d). This is smaller than the FAISS default "
"min_points_per_centroid = %d.",
static_cast<size_t>(trainset_size),
nrow,
training_sample_fraction_,
min_ppc,
nlist_,
index_ivf->cp.min_points_per_centroid);
}
index_ivf->cp.max_points_per_centroid = max_ppc;
index_ivf->cp.min_points_per_centroid = min_ppc;
}
index_->train(nrow, dataset); // faiss::IndexFlat::train() will do nothing
assert(index_->is_trained);
index_->add(nrow, dataset);
index_refine_ = std::make_unique<faiss::IndexRefineFlat>(this->index_.get(), dataset);
}
template <typename T>
void FaissCpu<T>::set_search_param(const AnnSearchParam& param)
{
auto search_param = dynamic_cast<const SearchParam&>(param);
int nprobe = search_param.nprobe;
assert(nprobe <= nlist_);
dynamic_cast<faiss::IndexIVF*>(index_.get())->nprobe = nprobe;
if (search_param.refine_ratio > 1.0) {
this->index_refine_.get()->k_factor = search_param.refine_ratio;
}
if (!thread_pool_ || num_threads_ != search_param.num_threads) {
num_threads_ = search_param.num_threads;
thread_pool_ = std::make_unique<FixedThreadPool>(num_threads_);
}
}
template <typename T>
void FaissCpu<T>::search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream) const
{
static_assert(sizeof(size_t) == sizeof(faiss::idx_t),
"sizes of size_t and faiss::idx_t are different");
thread_pool_->submit(
[&](int i) {
// Use thread pool for batch size = 1. FAISS multi-threads internally for batch size > 1.
index_->search(batch_size, queries, k, distances, reinterpret_cast<faiss::idx_t*>(neighbors));
},
1);
}
template <typename T>
template <typename Index>
void FaissCpu<T>::save_(const std::string& file) const
{
faiss::write_index(index_.get(), file.c_str());
}
template <typename T>
template <typename Index>
void FaissCpu<T>::load_(const std::string& file)
{
index_ = std::unique_ptr<Index>(dynamic_cast<Index*>(faiss::read_index(file.c_str())));
}
template <typename T>
class FaissCpuIVFFlat : public FaissCpu<T> {
public:
using typename FaissCpu<T>::BuildParam;
FaissCpuIVFFlat(Metric metric, int dim, const BuildParam& param) : FaissCpu<T>(metric, dim, param)
{
this->init_quantizer(dim);
this->index_ = std::make_unique<faiss::IndexIVFFlat>(
this->quantizer_.get(), dim, param.nlist, this->metric_type_);
}
void save(const std::string& file) const override
{
this->template save_<faiss::IndexIVFFlat>(file);
}
void load(const std::string& file) override { this->template load_<faiss::IndexIVFFlat>(file); }
};
template <typename T>
class FaissCpuIVFPQ : public FaissCpu<T> {
public:
struct BuildParam : public FaissCpu<T>::BuildParam {
int M;
int bitsPerCode;
bool usePrecomputed;
};
FaissCpuIVFPQ(Metric metric, int dim, const BuildParam& param) : FaissCpu<T>(metric, dim, param)
{
this->init_quantizer(dim);
this->index_ = std::make_unique<faiss::IndexIVFPQ>(
this->quantizer_.get(), dim, param.nlist, param.M, param.bitsPerCode, this->metric_type_);
}
void save(const std::string& file) const override
{
this->template save_<faiss::IndexIVFPQ>(file);
}
void load(const std::string& file) override { this->template load_<faiss::IndexIVFPQ>(file); }
};
// TODO: Enable this in cmake
// ref: https://github.com/rapidsai/raft/issues/1876
template <typename T>
class FaissCpuIVFSQ : public FaissCpu<T> {
public:
struct BuildParam : public FaissCpu<T>::BuildParam {
std::string quantizer_type;
};
FaissCpuIVFSQ(Metric metric, int dim, const BuildParam& param) : FaissCpu<T>(metric, dim, param)
{
faiss::ScalarQuantizer::QuantizerType qtype;
if (param.quantizer_type == "fp16") {
qtype = faiss::ScalarQuantizer::QT_fp16;
} else if (param.quantizer_type == "int8") {
qtype = faiss::ScalarQuantizer::QT_8bit;
} else {
throw std::runtime_error("FaissCpuIVFSQ supports only fp16 and int8 but got " +
param.quantizer_type);
}
this->init_quantizer(dim);
this->index_ = std::make_unique<faiss::IndexIVFScalarQuantizer>(
this->quantizer_.get(), dim, param.nlist, qtype, this->metric_type_, true);
}
void save(const std::string& file) const override
{
this->template save_<faiss::IndexIVFScalarQuantizer>(file);
}
void load(const std::string& file) override
{
this->template load_<faiss::IndexIVFScalarQuantizer>(file);
}
};
template <typename T>
class FaissCpuFlat : public FaissCpu<T> {
public:
FaissCpuFlat(Metric metric, int dim)
: FaissCpu<T>(metric, dim, typename FaissCpu<T>::BuildParam{})
{
this->index_ = std::make_unique<faiss::IndexFlat>(dim, this->metric_type_);
}
// class FaissCpu is more like a IVF class, so need special treating here
void set_search_param(const typename ANN<T>::AnnSearchParam& param) override
{
auto search_param = dynamic_cast<const typename FaissCpu<T>::SearchParam&>(param);
if (!this->thread_pool_ || this->num_threads_ != search_param.num_threads) {
this->num_threads_ = search_param.num_threads;
this->thread_pool_ = std::make_unique<FixedThreadPool>(this->num_threads_);
}
};
void save(const std::string& file) const override
{
this->template save_<faiss::IndexFlat>(file);
}
void load(const std::string& file) override { this->template load_<faiss::IndexFlat>(file); }
};
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/faiss/faiss_gpu_benchmark.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include "../common/ann_types.hpp"
#undef WARP_SIZE
#include "faiss_gpu_wrapper.h"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace cuvs::bench {
template <typename T>
void parse_base_build_param(const nlohmann::json& conf,
typename cuvs::bench::FaissGpu<T>::BuildParam& param)
{
param.nlist = conf.at("nlist");
if (conf.contains("ratio")) { param.ratio = conf.at("ratio"); }
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::FaissGpuIVFFlat<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::FaissGpuIVFPQ<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
param.M = conf.at("M");
if (conf.contains("usePrecomputed")) {
param.usePrecomputed = conf.at("usePrecomputed");
} else {
param.usePrecomputed = false;
}
if (conf.contains("useFloat16")) {
param.useFloat16 = conf.at("useFloat16");
} else {
param.useFloat16 = false;
}
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::FaissGpuIVFSQ<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
param.quantizer_type = conf.at("quantizer_type");
}
template <typename T>
void parse_search_param(const nlohmann::json& conf,
typename cuvs::bench::FaissGpu<T>::SearchParam& param)
{
param.nprobe = conf.at("nprobe");
if (conf.contains("refine_ratio")) { param.refine_ratio = conf.at("refine_ratio"); }
}
template <typename T, template <typename> class Algo>
std::unique_ptr<cuvs::bench::ANN<T>> make_algo(cuvs::bench::Metric metric,
int dim,
const nlohmann::json& conf)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T, template <typename> class Algo>
std::unique_ptr<cuvs::bench::ANN<T>> make_algo(cuvs::bench::Metric metric,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
(void)dev_list;
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T>
std::unique_ptr<cuvs::bench::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
std::unique_ptr<cuvs::bench::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {
cuvs::bench::Metric metric = parse_metric(distance);
if (algo == "faiss_gpu_ivf_flat") {
ann = make_algo<T, cuvs::bench::FaissGpuIVFFlat>(metric, dim, conf, dev_list);
} else if (algo == "faiss_gpu_ivf_pq") {
ann = make_algo<T, cuvs::bench::FaissGpuIVFPQ>(metric, dim, conf);
} else if (algo == "faiss_gpu_ivf_sq") {
ann = make_algo<T, cuvs::bench::FaissGpuIVFSQ>(metric, dim, conf);
} else if (algo == "faiss_gpu_flat") {
ann = std::make_unique<cuvs::bench::FaissGpuFlat<T>>(metric, dim);
}
}
if constexpr (std::is_same_v<T, uint8_t>) {}
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename cuvs::bench::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "faiss_gpu_ivf_flat" || algo == "faiss_gpu_ivf_pq" || algo == "faiss_gpu_ivf_sq") {
auto param = std::make_unique<typename cuvs::bench::FaissGpu<T>::SearchParam>();
parse_search_param<T>(conf, *param);
return param;
} else if (algo == "faiss_gpu_flat") {
auto param = std::make_unique<typename cuvs::bench::ANN<T>::AnnSearchParam>();
return param;
}
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
} // namespace cuvs::bench
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef CUVS_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv) { return cuvs::bench::run_main(argc, argv); }
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/faiss/faiss_cpu_benchmark.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <memory>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include "../common/ann_types.hpp"
#include "faiss_cpu_wrapper.h"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace cuvs::bench {
template <typename T>
void parse_base_build_param(const nlohmann::json& conf,
typename cuvs::bench::FaissCpu<T>::BuildParam& param)
{
param.nlist = conf.at("nlist");
if (conf.contains("ratio")) { param.ratio = conf.at("ratio"); }
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::FaissCpuIVFFlat<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::FaissCpuIVFPQ<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
param.M = conf.at("M");
if (conf.contains("usePrecomputed")) {
param.usePrecomputed = conf.at("usePrecomputed");
} else {
param.usePrecomputed = false;
}
if (conf.contains("bitsPerCode")) {
param.bitsPerCode = conf.at("bitsPerCode");
} else {
param.bitsPerCode = 8;
}
}
template <typename T>
void parse_build_param(const nlohmann::json& conf,
typename cuvs::bench::FaissCpuIVFSQ<T>::BuildParam& param)
{
parse_base_build_param<T>(conf, param);
param.quantizer_type = conf.at("quantizer_type");
}
template <typename T>
void parse_search_param(const nlohmann::json& conf,
typename cuvs::bench::FaissCpu<T>::SearchParam& param)
{
param.nprobe = conf.at("nprobe");
if (conf.contains("refine_ratio")) { param.refine_ratio = conf.at("refine_ratio"); }
if (conf.contains("numThreads")) { param.num_threads = conf.at("numThreads"); }
}
template <typename T, template <typename> class Algo>
std::unique_ptr<cuvs::bench::ANN<T>> make_algo(cuvs::bench::Metric metric,
int dim,
const nlohmann::json& conf)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T, template <typename> class Algo>
std::unique_ptr<cuvs::bench::ANN<T>> make_algo(cuvs::bench::Metric metric,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
typename Algo<T>::BuildParam param;
parse_build_param<T>(conf, param);
(void)dev_list;
return std::make_unique<Algo<T>>(metric, dim, param);
}
template <typename T>
std::unique_ptr<cuvs::bench::ANN<T>> create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list)
{
// stop compiler warning; not all algorithms support multi-GPU so it may not be used
(void)dev_list;
std::unique_ptr<cuvs::bench::ANN<T>> ann;
if constexpr (std::is_same_v<T, float>) {
cuvs::bench::Metric metric = parse_metric(distance);
if (algo == "faiss_cpu_ivf_flat") {
ann = make_algo<T, cuvs::bench::FaissCpuIVFFlat>(metric, dim, conf, dev_list);
} else if (algo == "faiss_cpu_ivf_pq") {
ann = make_algo<T, cuvs::bench::FaissCpuIVFPQ>(metric, dim, conf);
} else if (algo == "faiss_cpu_ivf_sq") {
ann = make_algo<T, cuvs::bench::FaissCpuIVFSQ>(metric, dim, conf);
} else if (algo == "faiss_cpu_flat") {
ann = std::make_unique<cuvs::bench::FaissCpuFlat<T>>(metric, dim);
}
}
if constexpr (std::is_same_v<T, uint8_t>) {}
if (!ann) { throw std::runtime_error("invalid algo: '" + algo + "'"); }
return ann;
}
template <typename T>
std::unique_ptr<typename cuvs::bench::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
if (algo == "faiss_cpu_ivf_flat" || algo == "faiss_cpu_ivf_pq" || algo == "faiss_cpu_ivf_sq") {
auto param = std::make_unique<typename cuvs::bench::FaissCpu<T>::SearchParam>();
parse_search_param<T>(conf, *param);
return param;
} else if (algo == "faiss_cpu_flat") {
auto param = std::make_unique<typename cuvs::bench::ANN<T>::AnnSearchParam>();
return param;
}
// else
throw std::runtime_error("invalid algo: '" + algo + "'");
}
} // namespace cuvs::bench
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#ifdef CUVS_BENCH_BUILD_MAIN
#include "../common/benchmark.hpp"
int main(int argc, char** argv) { return cuvs::bench::run_main(argc, argv); }
#endif
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/cuda_huge_page_resource.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <sys/mman.h>
#include <cstddef>
namespace raft::mr {
/**
* @brief `device_memory_resource` derived class that uses mmap to allocate memory.
* This class enables memory allocation using huge pages.
* It is assumed that the allocated memory is directly accessible on device. This currently only
* works on GH systems.
*
* TODO(tfeher): consider improving or removing this helper once we made progress with
* https://github.com/rapidsai/raft/issues/1819
*/
class cuda_huge_page_resource final : public rmm::mr::device_memory_resource {
public:
cuda_huge_page_resource() = default;
~cuda_huge_page_resource() override = default;
cuda_huge_page_resource(cuda_huge_page_resource const&) = default;
cuda_huge_page_resource(cuda_huge_page_resource&&) = default;
cuda_huge_page_resource& operator=(cuda_huge_page_resource const&) = default;
cuda_huge_page_resource& operator=(cuda_huge_page_resource&&) = default;
/**
* @brief Query whether the resource supports use of non-null CUDA streams for
* allocation/deallocation. `cuda_huge_page_resource` does not support streams.
*
* @returns bool false
*/
[[nodiscard]] bool supports_streams() const noexcept override { return false; }
/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return true
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return true; }
private:
/**
* @brief Allocates memory of size at least `bytes` using cudaMalloc.
*
* The returned pointer has at least 256B alignment.
*
* @note Stream argument is ignored
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, rmm::cuda_stream_view) override
{
void* _addr{nullptr};
_addr = mmap(NULL, bytes, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (_addr == MAP_FAILED) { RAFT_FAIL("huge_page_resource::MAP FAILED"); }
if (madvise(_addr, bytes, MADV_HUGEPAGE) == -1) {
munmap(_addr, bytes);
RAFT_FAIL("huge_page_resource::madvise MADV_HUGEPAGE");
}
memset(_addr, 0, bytes);
return _addr;
}
/**
* @brief Deallocate memory pointed to by \p p.
*
* @note Stream argument is ignored.
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
*/
void do_deallocate(void* ptr, std::size_t size, rmm::cuda_stream_view) override
{
if (munmap(ptr, size) == -1) { RAFT_FAIL("huge_page_resource::munmap"); }
}
/**
* @brief Compare this resource to another.
*
* Two cuda_huge_page_resources always compare equal, because they can each
* deallocate memory allocated by the other.
*
* @throws Nothing.
*
* @param other The other resource to compare to
* @return true If the two resources are equivalent
* @return false If the two resources are not equal
*/
[[nodiscard]] bool do_is_equal(device_memory_resource const& other) const noexcept override
{
return dynamic_cast<cuda_huge_page_resource const*>(&other) != nullptr;
}
/**
* @brief Get free and available memory for memory resource
*
* @throws `rmm::cuda_error` if unable to retrieve memory info.
*
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
rmm::cuda_stream_view) const override
{
std::size_t free_size{};
std::size_t total_size{};
RMM_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
return std::make_pair(free_size, total_size);
}
};
} // namespace raft::mr | 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/util.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include "cuda_stub.hpp" // cuda-related utils
#ifdef CUVS_BENCH_NVTX3_HEADERS_FOUND
#include <nvtx3/nvToolsExt.h>
#endif
#include <sys/stat.h>
#include <sys/types.h>
#include <chrono>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <sstream>
#include <string>
#include <vector>
#include <filesystem>
#include <functional>
namespace cuvs::bench {
template <typename T>
struct buf {
MemoryType memory_type;
std::size_t size;
T* data;
buf(MemoryType memory_type, std::size_t size)
: memory_type(memory_type), size(size), data(nullptr)
{
switch (memory_type) {
#ifndef BUILD_CPU_ONLY
case MemoryType::Device: {
cudaMalloc(reinterpret_cast<void**>(&data), size * sizeof(T));
cudaMemset(data, 0, size * sizeof(T));
} break;
#endif
default: {
data = reinterpret_cast<T*>(malloc(size * sizeof(T)));
std::memset(data, 0, size * sizeof(T));
}
}
}
~buf() noexcept
{
if (data == nullptr) { return; }
switch (memory_type) {
#ifndef BUILD_CPU_ONLY
case MemoryType::Device: {
cudaFree(data);
} break;
#endif
default: {
free(data);
}
}
}
[[nodiscard]] auto move(MemoryType target_memory_type) -> buf<T>
{
buf<T> r{target_memory_type, size};
#ifndef BUILD_CPU_ONLY
if ((memory_type == MemoryType::Device && target_memory_type != MemoryType::Device) ||
(memory_type != MemoryType::Device && target_memory_type == MemoryType::Device)) {
cudaMemcpy(r.data, data, size * sizeof(T), cudaMemcpyDefault);
return r;
}
#endif
std::swap(data, r.data);
return r;
}
};
struct cuda_timer {
private:
cudaStream_t stream_{nullptr};
cudaEvent_t start_{nullptr};
cudaEvent_t stop_{nullptr};
double total_time_{0};
public:
struct cuda_lap {
private:
cudaStream_t stream_;
cudaEvent_t start_;
cudaEvent_t stop_;
double& total_time_;
public:
cuda_lap(cudaStream_t stream, cudaEvent_t start, cudaEvent_t stop, double& total_time)
: start_(start), stop_(stop), stream_(stream), total_time_(total_time)
{
#ifndef BUILD_CPU_ONLY
cudaStreamSynchronize(stream_);
cudaEventRecord(start_, stream_);
#endif
}
cuda_lap() = delete;
~cuda_lap() noexcept
{
#ifndef BUILD_CPU_ONLY
cudaEventRecord(stop_, stream_);
cudaEventSynchronize(stop_);
float milliseconds = 0.0f;
cudaEventElapsedTime(&milliseconds, start_, stop_);
total_time_ += milliseconds / 1000.0;
#endif
}
};
cuda_timer()
{
#ifndef BUILD_CPU_ONLY
cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking);
cudaEventCreate(&stop_);
cudaEventCreate(&start_);
#endif
}
~cuda_timer() noexcept
{
#ifndef BUILD_CPU_ONLY
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
cudaStreamDestroy(stream_);
#endif
}
[[nodiscard]] auto stream() const -> cudaStream_t { return stream_; }
[[nodiscard]] auto total_time() const -> double { return total_time_; }
[[nodiscard]] auto lap() -> cuda_timer::cuda_lap
{
return cuda_lap{stream_, start_, stop_, total_time_};
}
};
inline auto cuda_info()
{
std::vector<std::tuple<std::string, std::string>> props;
#ifndef BUILD_CPU_ONLY
int dev, driver = 0, runtime = 0;
cudaDriverGetVersion(&driver);
cudaRuntimeGetVersion(&runtime);
cudaDeviceProp device_prop;
cudaGetDevice(&dev);
cudaGetDeviceProperties(&device_prop, dev);
props.emplace_back("gpu_name", std::string(device_prop.name));
props.emplace_back("gpu_sm_count", std::to_string(device_prop.multiProcessorCount));
props.emplace_back("gpu_sm_freq", std::to_string(device_prop.clockRate * 1e3));
props.emplace_back("gpu_mem_freq", std::to_string(device_prop.memoryClockRate * 1e3));
props.emplace_back("gpu_mem_bus_width", std::to_string(device_prop.memoryBusWidth));
props.emplace_back("gpu_mem_global_size", std::to_string(device_prop.totalGlobalMem));
props.emplace_back("gpu_mem_shared_size", std::to_string(device_prop.sharedMemPerMultiprocessor));
props.emplace_back("gpu_driver_version",
std::to_string(driver / 1000) + "." + std::to_string((driver % 100) / 10));
props.emplace_back("gpu_runtime_version",
std::to_string(runtime / 1000) + "." + std::to_string((runtime % 100) / 10));
#endif
return props;
}
struct nvtx_case {
#ifdef CUVS_BENCH_NVTX3_HEADERS_FOUND
private:
std::string case_name_;
std::array<char, 32> iter_name_{0};
nvtxDomainHandle_t domain_;
int64_t iteration_ = 0;
nvtxEventAttributes_t case_attrib_{0};
nvtxEventAttributes_t iter_attrib_{0};
#endif
public:
struct nvtx_lap {
#ifdef CUVS_BENCH_NVTX3_HEADERS_FOUND
private:
nvtxDomainHandle_t domain_;
public:
nvtx_lap(nvtxDomainHandle_t domain, nvtxEventAttributes_t* attr) : domain_(domain)
{
nvtxDomainRangePushEx(domain_, attr);
}
nvtx_lap() = delete;
~nvtx_lap() noexcept { nvtxDomainRangePop(domain_); }
#endif
};
#ifdef CUVS_BENCH_NVTX3_HEADERS_FOUND
explicit nvtx_case(std::string case_name)
: case_name_(std::move(case_name)), domain_(nvtxDomainCreateA("ANN benchmark"))
{
case_attrib_.version = NVTX_VERSION;
iter_attrib_.version = NVTX_VERSION;
case_attrib_.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
iter_attrib_.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE;
case_attrib_.colorType = NVTX_COLOR_ARGB;
iter_attrib_.colorType = NVTX_COLOR_ARGB;
case_attrib_.messageType = NVTX_MESSAGE_TYPE_ASCII;
iter_attrib_.messageType = NVTX_MESSAGE_TYPE_ASCII;
case_attrib_.message.ascii = case_name_.c_str();
auto c = std::hash<std::string>{}(case_name_);
case_attrib_.color = c | 0xA0A0A0;
nvtxDomainRangePushEx(domain_, &case_attrib_);
}
~nvtx_case()
{
nvtxDomainRangePop(domain_);
nvtxDomainDestroy(domain_);
}
#else
explicit nvtx_case(std::string) {}
#endif
[[nodiscard]] auto lap() -> nvtx_case::nvtx_lap
{
#ifdef CUVS_BENCH_NVTX3_HEADERS_FOUND
auto i = iteration_++;
uint32_t c = (i % 5);
uint32_t r = 150 + c * 20;
uint32_t g = 200 + c * 10;
uint32_t b = 220 + c * 5;
std::snprintf(iter_name_.data(), iter_name_.size(), "Lap %zd", i);
iter_attrib_.message.ascii = iter_name_.data();
iter_attrib_.color = (r << 16) + (g << 8) + b;
return nvtx_lap{domain_, &iter_attrib_};
#else
return nvtx_lap{};
#endif
}
};
inline std::vector<std::string> split(const std::string& s, char delimiter)
{
std::vector<std::string> tokens;
std::string token;
std::istringstream iss(s);
while (getline(iss, token, delimiter)) {
if (!token.empty()) { tokens.push_back(token); }
}
return tokens;
}
inline bool file_exists(const std::string& filename)
{
struct stat statbuf;
if (stat(filename.c_str(), &statbuf) != 0) { return false; }
return S_ISREG(statbuf.st_mode);
}
inline bool dir_exists(const std::string& dir)
{
struct stat statbuf;
if (stat(dir.c_str(), &statbuf) != 0) { return false; }
return S_ISDIR(statbuf.st_mode);
}
inline bool create_dir(const std::string& dir)
{
const auto path = split(dir, '/');
std::string cwd;
if (!dir.empty() && dir[0] == '/') { cwd += '/'; }
for (const auto& p : path) {
cwd += p + "/";
if (!dir_exists(cwd)) {
int ret = mkdir(cwd.c_str(), S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
if (ret != 0) { return false; }
}
}
return true;
}
inline void make_sure_parent_dir_exists(const std::string& file_path)
{
const auto pos = file_path.rfind('/');
if (pos != std::string::npos) {
auto dir = file_path.substr(0, pos);
if (!dir_exists(dir)) { create_dir(dir); }
}
}
inline auto combine_path(const std::string& dir, const std::string& path)
{
std::filesystem::path p_dir(dir);
std::filesystem::path p_suf(path);
return (p_dir / p_suf).string();
}
template <typename... Ts>
void log_(const char* level, const Ts&... vs)
{
char buf[20];
std::time_t now = std::time(nullptr);
std::strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", std::localtime(&now));
printf("%s [%s] ", buf, level);
if constexpr (sizeof...(Ts) == 1) {
printf("%s", vs...);
} else {
printf(vs...);
}
printf("\n");
fflush(stdout);
}
template <typename... Ts>
void log_info(Ts&&... vs)
{
log_("info", std::forward<Ts>(vs)...);
}
template <typename... Ts>
void log_warn(Ts&&... vs)
{
log_("warn", std::forward<Ts>(vs)...);
}
template <typename... Ts>
void log_error(Ts&&... vs)
{
log_("error", std::forward<Ts>(vs)...);
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/conf.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "util.hpp"
#include <iostream>
#include <optional>
#include <string>
#include <unordered_set>
#include <vector>
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
namespace cuvs::bench {
class Configuration {
public:
struct Index {
std::string name;
std::string algo;
nlohmann::json build_param;
std::string file;
std::vector<int> dev_list;
int batch_size;
int k;
std::vector<nlohmann::json> search_params;
};
struct DatasetConf {
std::string name;
std::string base_file;
// use only a subset of base_file,
// the range of rows is [subset_first_row, subset_first_row + subset_size)
// however, subset_size = 0 means using all rows after subset_first_row
// that is, the subset is [subset_first_row, #rows in base_file)
size_t subset_first_row{0};
size_t subset_size{0};
std::string query_file;
std::string distance;
std::optional<std::string> groundtruth_neighbors_file{std::nullopt};
// data type of input dataset, possible values ["float", "int8", "uint8"]
std::string dtype;
};
explicit inline Configuration(std::istream& conf_stream)
{
// to enable comments in json
auto conf = nlohmann::json::parse(conf_stream, nullptr, true, true);
parse_dataset_(conf.at("dataset"));
parse_index_(conf.at("index"), conf.at("search_basic_param"));
}
[[nodiscard]] inline auto get_dataset_conf() const -> DatasetConf { return dataset_conf_; }
[[nodiscard]] inline auto get_indices() const -> std::vector<Index> { return indices_; };
private:
inline void parse_dataset_(const nlohmann::json& conf)
{
dataset_conf_.name = conf.at("name");
dataset_conf_.base_file = conf.at("base_file");
dataset_conf_.query_file = conf.at("query_file");
dataset_conf_.distance = conf.at("distance");
if (conf.contains("groundtruth_neighbors_file")) {
dataset_conf_.groundtruth_neighbors_file = conf.at("groundtruth_neighbors_file");
}
if (conf.contains("subset_first_row")) {
dataset_conf_.subset_first_row = conf.at("subset_first_row");
}
if (conf.contains("subset_size")) { dataset_conf_.subset_size = conf.at("subset_size"); }
if (conf.contains("dtype")) {
dataset_conf_.dtype = conf.at("dtype");
} else {
auto filename = dataset_conf_.base_file;
if (!filename.compare(filename.size() - 4, 4, "fbin")) {
dataset_conf_.dtype = "float";
} else if (!filename.compare(filename.size() - 5, 5, "u8bin")) {
dataset_conf_.dtype = "uint8";
} else if (!filename.compare(filename.size() - 5, 5, "i8bin")) {
dataset_conf_.dtype = "int8";
} else {
log_error("Could not determine data type of the dataset %s", filename.c_str());
}
}
}
inline void parse_index_(const nlohmann::json& index_conf,
const nlohmann::json& search_basic_conf)
{
const int batch_size = search_basic_conf.at("batch_size");
const int k = search_basic_conf.at("k");
for (const auto& conf : index_conf) {
Index index;
index.name = conf.at("name");
index.algo = conf.at("algo");
index.build_param = conf.at("build_param");
index.file = conf.at("file");
index.batch_size = batch_size;
index.k = k;
if (conf.contains("multigpu")) {
for (auto it : conf.at("multigpu")) {
index.dev_list.push_back(it);
}
if (index.dev_list.empty()) { throw std::runtime_error("dev_list shouln't be empty!"); }
index.dev_list.shrink_to_fit();
index.build_param["multigpu"] = conf["multigpu"];
}
for (auto param : conf.at("search_params")) {
/* ### Special parameters for backward compatibility ###
- Local values of `k` and `n_queries` take priority.
- The legacy "batch_size" renamed to `n_queries`.
- Basic search params are used otherwise.
*/
if (!param.contains("k")) { param["k"] = k; }
if (!param.contains("n_queries")) {
if (param.contains("batch_size")) {
param["n_queries"] = param["batch_size"];
param.erase("batch_size");
} else {
param["n_queries"] = batch_size;
}
}
index.search_params.push_back(param);
}
indices_.push_back(index);
}
}
DatasetConf dataset_conf_;
std::vector<Index> indices_;
};
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/ann_types.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "cuda_stub.hpp" // cudaStream_t
#include <stdexcept>
#include <string>
#include <vector>
namespace cuvs::bench {
enum Objective {
THROUGHPUT, // See how many vectors we can push through
LATENCY // See how fast we can push a vector through
};
enum class MemoryType {
Host,
HostMmap,
Device,
};
enum class Metric {
kInnerProduct,
kEuclidean,
};
inline auto parse_metric(const std::string& metric_str) -> Metric
{
if (metric_str == "inner_product") {
return cuvs::bench::Metric::kInnerProduct;
} else if (metric_str == "euclidean") {
return cuvs::bench::Metric::kEuclidean;
} else {
throw std::runtime_error("invalid metric: '" + metric_str + "'");
}
}
inline auto parse_memory_type(const std::string& memory_type) -> MemoryType
{
if (memory_type == "host") {
return MemoryType::Host;
} else if (memory_type == "mmap") {
return MemoryType::HostMmap;
} else if (memory_type == "device") {
return MemoryType::Device;
} else {
throw std::runtime_error("invalid memory type: '" + memory_type + "'");
}
}
class AlgoProperty {
public:
inline AlgoProperty() {}
inline AlgoProperty(MemoryType dataset_memory_type_, MemoryType query_memory_type_)
: dataset_memory_type(dataset_memory_type_), query_memory_type(query_memory_type_)
{
}
MemoryType dataset_memory_type;
// neighbors/distances should have same memory type as queries
MemoryType query_memory_type;
virtual ~AlgoProperty() = default;
};
class AnnBase {
public:
inline AnnBase(Metric metric, int dim) : metric_(metric), dim_(dim) {}
virtual ~AnnBase() = default;
protected:
Metric metric_;
int dim_;
};
template <typename T>
class ANN : public AnnBase {
public:
struct AnnSearchParam {
Objective metric_objective = Objective::LATENCY;
virtual ~AnnSearchParam() = default;
[[nodiscard]] virtual auto needs_dataset() const -> bool { return false; };
};
inline ANN(Metric metric, int dim) : AnnBase(metric, dim) {}
virtual void build(const T* dataset, size_t nrow, cudaStream_t stream = 0) = 0;
virtual void set_search_param(const AnnSearchParam& param) = 0;
// TODO: this assumes that an algorithm can always return k results.
// This is not always possible.
virtual void search(const T* queries,
int batch_size,
int k,
size_t* neighbors,
float* distances,
cudaStream_t stream = 0) const = 0;
virtual void save(const std::string& file) const = 0;
virtual void load(const std::string& file) = 0;
virtual AlgoProperty get_preference() const = 0;
// Some algorithms don't save the building dataset in their indices.
// So they should be given the access to that dataset during searching.
// The advantage of this way is that index has smaller size
// and many indices can share one dataset.
//
// SearchParam::needs_dataset() of such algorithm should be true,
// and set_search_dataset() should save the passed-in pointer somewhere.
// The client code should call set_search_dataset() before searching,
// and should not release dataset before searching is finished.
virtual void set_search_dataset(const T* /*dataset*/, size_t /*nrow*/){};
};
} // namespace cuvs::bench
#define REGISTER_ALGO_INSTANCE(DataT) \
template auto cuvs::bench::create_algo<DataT>( \
const std::string&, const std::string&, int, const nlohmann::json&, const std::vector<int>&) \
->std::unique_ptr<cuvs::bench::ANN<DataT>>; \
template auto cuvs::bench::create_search_param<DataT>(const std::string&, const nlohmann::json&) \
->std::unique_ptr<typename cuvs::bench::ANN<DataT>::AnnSearchParam>;
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/thread_pool.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <atomic>
#include <future>
#include <memory>
#include <mutex>
#include <omp.h>
#include <stdexcept>
#include <thread>
#include <utility>
class FixedThreadPool {
public:
FixedThreadPool(int num_threads)
{
if (num_threads < 1) {
throw std::runtime_error("num_threads must >= 1");
} else if (num_threads == 1) {
return;
}
tasks_ = new Task_[num_threads];
threads_.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
threads_.emplace_back([&, i] {
auto& task = tasks_[i];
while (true) {
std::unique_lock<std::mutex> lock(task.mtx);
task.cv.wait(lock,
[&] { return task.has_task || finished_.load(std::memory_order_relaxed); });
if (finished_.load(std::memory_order_relaxed)) { break; }
task.task();
task.has_task = false;
}
});
}
}
~FixedThreadPool()
{
if (threads_.empty()) { return; }
finished_.store(true, std::memory_order_relaxed);
for (unsigned i = 0; i < threads_.size(); ++i) {
auto& task = tasks_[i];
std::lock_guard<std::mutex>(task.mtx);
task.cv.notify_one();
threads_[i].join();
}
delete[] tasks_;
}
template <typename Func, typename IdxT>
void submit(Func f, IdxT len)
{
// Run functions in main thread if thread pool has no threads
if (threads_.empty()) {
for (IdxT i = 0; i < len; ++i) {
f(i);
}
return;
}
const int num_threads = threads_.size();
// one extra part for competition among threads
const IdxT items_per_thread = len / (num_threads + 1);
std::atomic<IdxT> cnt(items_per_thread * num_threads);
// Wrap function
auto wrapped_f = [&](IdxT start, IdxT end) {
for (IdxT i = start; i < end; ++i) {
f(i);
}
while (true) {
IdxT i = cnt.fetch_add(1, std::memory_order_relaxed);
if (i >= len) { break; }
f(i);
}
};
std::vector<std::future<void>> futures;
futures.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
IdxT start = i * items_per_thread;
auto& task = tasks_[i];
{
std::lock_guard lock(task.mtx);
(void)lock; // stop nvcc warning
task.task = std::packaged_task<void()>([=] { wrapped_f(start, start + items_per_thread); });
futures.push_back(task.task.get_future());
task.has_task = true;
}
task.cv.notify_one();
}
for (auto& fut : futures) {
fut.wait();
}
return;
}
private:
struct alignas(64) Task_ {
std::mutex mtx;
std::condition_variable cv;
bool has_task = false;
std::packaged_task<void()> task;
};
Task_* tasks_;
std::vector<std::thread> threads_;
std::atomic<bool> finished_{false};
};
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/benchmark.cpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_stub.hpp" // must go first
#include "ann_types.hpp"
#define JSON_DIAGNOSTICS 1
#include <nlohmann/json.hpp>
#include <memory>
#include <unordered_map>
#include <dlfcn.h>
#include <filesystem>
namespace cuvs::bench {
struct lib_handle {
void* handle{nullptr};
explicit lib_handle(const std::string& name)
{
handle = dlopen(name.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (handle == nullptr) {
auto error_msg = "Failed to load " + name;
auto err = dlerror();
if (err != nullptr && err[0] != '\0') { error_msg += ": " + std::string(err); }
throw std::runtime_error(error_msg);
}
}
~lib_handle() noexcept
{
if (handle != nullptr) { dlclose(handle); }
}
};
auto load_lib(const std::string& algo) -> void*
{
static std::unordered_map<std::string, lib_handle> libs{};
auto found = libs.find(algo);
if (found != libs.end()) { return found->second.handle; }
auto lib_name = "lib" + algo + "_ann_bench.so";
return libs.emplace(algo, lib_name).first->second.handle;
}
auto get_fun_name(void* addr) -> std::string
{
Dl_info dl_info;
if (dladdr(addr, &dl_info) != 0) {
if (dl_info.dli_sname != nullptr && dl_info.dli_sname[0] != '\0') {
return std::string{dl_info.dli_sname};
}
}
throw std::logic_error("Failed to find out name of the looked up function");
}
template <typename T>
auto create_algo(const std::string& algo,
const std::string& distance,
int dim,
const nlohmann::json& conf,
const std::vector<int>& dev_list) -> std::unique_ptr<cuvs::bench::ANN<T>>
{
static auto fname = get_fun_name(reinterpret_cast<void*>(&create_algo<T>));
auto handle = load_lib(algo);
auto fun_addr = dlsym(handle, fname.c_str());
if (fun_addr == nullptr) {
throw std::runtime_error("Couldn't load the create_algo function (" + algo + ")");
}
auto fun = reinterpret_cast<decltype(&create_algo<T>)>(fun_addr);
return fun(algo, distance, dim, conf, dev_list);
}
template <typename T>
std::unique_ptr<typename cuvs::bench::ANN<T>::AnnSearchParam> create_search_param(
const std::string& algo, const nlohmann::json& conf)
{
static auto fname = get_fun_name(reinterpret_cast<void*>(&create_search_param<T>));
auto handle = load_lib(algo);
auto fun_addr = dlsym(handle, fname.c_str());
if (fun_addr == nullptr) {
throw std::runtime_error("Couldn't load the create_search_param function (" + algo + ")");
}
auto fun = reinterpret_cast<decltype(&create_search_param<T>)>(fun_addr);
return fun(algo, conf);
}
}; // namespace cuvs::bench
REGISTER_ALGO_INSTANCE(float);
REGISTER_ALGO_INSTANCE(std::int8_t);
REGISTER_ALGO_INSTANCE(std::uint8_t);
#include "benchmark.hpp"
int main(int argc, char** argv) { return cuvs::bench::run_main(argc, argv); }
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/dataset.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "util.hpp"
#ifndef BUILD_CPU_ONLY
#include <cuda_fp16.h>
#else
typedef uint16_t half;
#endif
#include <errno.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <cassert>
#include <cstdint>
#include <cstdio>
#include <optional>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <vector>
namespace cuvs::bench {
// http://big-ann-benchmarks.com/index.html:
// binary format that starts with 8 bytes of data consisting of num_points(uint32_t)
// num_dimensions(uint32) followed by num_pts x num_dimensions x sizeof(type) bytes of
// data stored one vector after another.
// Data files will have suffixes .fbin, .u8bin, and .i8bin to represent float32, uint8
// and int8 type data.
// As extensions for this benchmark, half and int data files will have suffixes .f16bin
// and .ibin, respectively.
template <typename T>
class BinFile {
public:
BinFile(const std::string& file,
const std::string& mode,
uint32_t subset_first_row = 0,
uint32_t subset_size = 0);
~BinFile()
{
if (mapped_ptr_ != nullptr) { unmap(); }
if (fp_ != nullptr) { fclose(fp_); }
}
BinFile(const BinFile&) = delete;
BinFile& operator=(const BinFile&) = delete;
void get_shape(size_t* nrows, int* ndims) const
{
assert(read_mode_);
if (!fp_) { open_file_(); }
*nrows = nrows_;
*ndims = ndims_;
}
void read(T* data) const
{
assert(read_mode_);
if (!fp_) { open_file_(); }
size_t total = static_cast<size_t>(nrows_) * ndims_;
if (fread(data, sizeof(T), total, fp_) != total) {
throw std::runtime_error("fread() BinFile " + file_ + " failed");
}
}
void write(const T* data, uint32_t nrows, uint32_t ndims)
{
assert(!read_mode_);
if (!fp_) { open_file_(); }
if (fwrite(&nrows, sizeof(uint32_t), 1, fp_) != 1) {
throw std::runtime_error("fwrite() BinFile " + file_ + " failed");
}
if (fwrite(&ndims, sizeof(uint32_t), 1, fp_) != 1) {
throw std::runtime_error("fwrite() BinFile " + file_ + " failed");
}
size_t total = static_cast<size_t>(nrows) * ndims;
if (fwrite(data, sizeof(T), total, fp_) != total) {
throw std::runtime_error("fwrite() BinFile " + file_ + " failed");
}
}
T* map() const
{
assert(read_mode_);
if (!fp_) { open_file_(); }
int fid = fileno(fp_);
mapped_ptr_ = mmap(nullptr, file_size_, PROT_READ, MAP_PRIVATE, fid, 0);
if (mapped_ptr_ == MAP_FAILED) {
mapped_ptr_ = nullptr;
throw std::runtime_error("mmap error: Value of errno " + std::to_string(errno) + ", " +
std::string(strerror(errno)));
}
return reinterpret_cast<T*>(reinterpret_cast<uint8_t*>(mapped_ptr_) + 2 * sizeof(uint32_t) +
subset_first_row_ * ndims_ * sizeof(T));
}
void unmap() const
{
if (munmap(mapped_ptr_, file_size_) == -1) {
throw std::runtime_error("munmap error: " + std::string(strerror(errno)));
}
}
private:
void check_suffix_();
void open_file_() const;
std::string file_;
bool read_mode_;
uint32_t subset_first_row_;
uint32_t subset_size_;
mutable FILE* fp_{nullptr};
mutable uint32_t nrows_;
mutable uint32_t ndims_;
mutable size_t file_size_;
mutable void* mapped_ptr_{nullptr};
};
template <typename T>
BinFile<T>::BinFile(const std::string& file,
const std::string& mode,
uint32_t subset_first_row,
uint32_t subset_size)
: file_(file),
read_mode_(mode == "r"),
subset_first_row_(subset_first_row),
subset_size_(subset_size),
fp_(nullptr)
{
check_suffix_();
if (!read_mode_) {
if (mode == "w") {
if (subset_first_row != 0) {
throw std::runtime_error("subset_first_row should be zero for write mode");
}
if (subset_size != 0) {
throw std::runtime_error("subset_size should be zero for write mode");
}
} else {
throw std::runtime_error("BinFile's mode must be either 'r' or 'w': " + file_);
}
}
}
template <typename T>
void BinFile<T>::open_file_() const
{
fp_ = fopen(file_.c_str(), read_mode_ ? "r" : "w");
if (!fp_) { throw std::runtime_error("open BinFile failed: " + file_); }
if (read_mode_) {
struct stat statbuf;
if (stat(file_.c_str(), &statbuf) != 0) { throw std::runtime_error("stat() failed: " + file_); }
file_size_ = statbuf.st_size;
uint32_t header[2];
if (fread(header, sizeof(uint32_t), 2, fp_) != 2) {
throw std::runtime_error("read header of BinFile failed: " + file_);
}
nrows_ = header[0];
ndims_ = header[1];
size_t expected_file_size =
2 * sizeof(uint32_t) + static_cast<size_t>(nrows_) * ndims_ * sizeof(T);
if (file_size_ != expected_file_size) {
throw std::runtime_error("expected file size of " + file_ + " is " +
std::to_string(expected_file_size) + ", however, actual size is " +
std::to_string(file_size_));
}
if (subset_first_row_ >= nrows_) {
throw std::runtime_error(file_ + ": subset_first_row (" + std::to_string(subset_first_row_) +
") >= nrows (" + std::to_string(nrows_) + ")");
}
if (subset_first_row_ + subset_size_ > nrows_) {
throw std::runtime_error(file_ + ": subset_first_row (" + std::to_string(subset_first_row_) +
") + subset_size (" + std::to_string(subset_size_) + ") > nrows (" +
std::to_string(nrows_) + ")");
}
if (subset_first_row_) {
static_assert(sizeof(long) == 8, "fseek() don't support 64-bit offset");
if (fseek(fp_, sizeof(T) * subset_first_row_ * ndims_, SEEK_CUR) == -1) {
throw std::runtime_error(file_ + ": fseek failed");
}
nrows_ -= subset_first_row_;
}
if (subset_size_) { nrows_ = subset_size_; }
}
}
template <typename T>
void BinFile<T>::check_suffix_()
{
auto pos = file_.rfind('.');
if (pos == std::string::npos) {
throw std::runtime_error("name of BinFile doesn't have a suffix: " + file_);
}
std::string suffix = file_.substr(pos + 1);
if constexpr (std::is_same_v<T, float>) {
if (suffix != "fbin") {
throw std::runtime_error("BinFile<float> should has .fbin suffix: " + file_);
}
} else if constexpr (std::is_same_v<T, half>) {
if (suffix != "f16bin") {
throw std::runtime_error("BinFile<half> should has .f16bin suffix: " + file_);
}
} else if constexpr (std::is_same_v<T, int>) {
if (suffix != "ibin") {
throw std::runtime_error("BinFile<int> should has .ibin suffix: " + file_);
}
} else if constexpr (std::is_same_v<T, uint8_t>) {
if (suffix != "u8bin") {
throw std::runtime_error("BinFile<uint8_t> should has .u8bin suffix: " + file_);
}
} else if constexpr (std::is_same_v<T, int8_t>) {
if (suffix != "i8bin") {
throw std::runtime_error("BinFile<int8_t> should has .i8bin suffix: " + file_);
}
} else {
throw std::runtime_error(
"T of BinFile<T> should be one of float, half, int, uint8_t, or int8_t");
}
}
template <typename T>
class Dataset {
public:
Dataset(const std::string& name) : name_(name) {}
Dataset(const std::string& name, const std::string& distance) : name_(name), distance_(distance)
{
}
Dataset(const Dataset&) = delete;
Dataset& operator=(const Dataset&) = delete;
virtual ~Dataset();
std::string name() const { return name_; }
std::string distance() const { return distance_; }
virtual int dim() const = 0;
virtual uint32_t max_k() const = 0;
virtual size_t base_set_size() const = 0;
virtual size_t query_set_size() const = 0;
// load data lazily, so don't pay the overhead of reading unneeded set
// e.g. don't load base set when searching
const T* base_set() const
{
if (!base_set_) { load_base_set_(); }
return base_set_;
}
const T* query_set() const
{
if (!query_set_) { load_query_set_(); }
return query_set_;
}
const int32_t* gt_set() const
{
if (!gt_set_) { load_gt_set_(); }
return gt_set_;
}
const T* base_set_on_gpu() const;
const T* query_set_on_gpu() const;
const T* mapped_base_set() const;
auto query_set(MemoryType memory_type) const -> const T*
{
switch (memory_type) {
case MemoryType::Device: return query_set_on_gpu();
default: return query_set();
}
}
auto base_set(MemoryType memory_type) const -> const T*
{
switch (memory_type) {
case MemoryType::Device: return base_set_on_gpu();
case MemoryType::Host: return base_set();
case MemoryType::HostMmap: return mapped_base_set();
default: return nullptr;
}
}
protected:
virtual void load_base_set_() const = 0;
virtual void load_gt_set_() const = 0;
virtual void load_query_set_() const = 0;
virtual void map_base_set_() const = 0;
std::string name_;
std::string distance_;
mutable T* base_set_ = nullptr;
mutable T* query_set_ = nullptr;
mutable T* d_base_set_ = nullptr;
mutable T* d_query_set_ = nullptr;
mutable T* mapped_base_set_ = nullptr;
mutable int32_t* gt_set_ = nullptr;
};
template <typename T>
Dataset<T>::~Dataset()
{
delete[] base_set_;
delete[] query_set_;
delete[] gt_set_;
#ifndef BUILD_CPU_ONLY
if (d_base_set_) { cudaFree(d_base_set_); }
if (d_query_set_) { cudaFree(d_query_set_); }
#endif
}
template <typename T>
const T* Dataset<T>::base_set_on_gpu() const
{
#ifndef BUILD_CPU_ONLY
if (!d_base_set_) {
base_set();
cudaMalloc((void**)&d_base_set_, base_set_size() * dim() * sizeof(T));
cudaMemcpy(d_base_set_, base_set_, base_set_size() * dim() * sizeof(T), cudaMemcpyHostToDevice);
}
#endif
return d_base_set_;
}
template <typename T>
const T* Dataset<T>::query_set_on_gpu() const
{
#ifndef BUILD_CPU_ONLY
if (!d_query_set_) {
query_set();
cudaMalloc((void**)&d_query_set_, query_set_size() * dim() * sizeof(T));
cudaMemcpy(
d_query_set_, query_set_, query_set_size() * dim() * sizeof(T), cudaMemcpyHostToDevice);
}
#endif
return d_query_set_;
}
template <typename T>
const T* Dataset<T>::mapped_base_set() const
{
if (!mapped_base_set_) { map_base_set_(); }
return mapped_base_set_;
}
template <typename T>
class BinDataset : public Dataset<T> {
public:
BinDataset(const std::string& name,
const std::string& base_file,
size_t subset_first_row,
size_t subset_size,
const std::string& query_file,
const std::string& distance,
const std::optional<std::string>& groundtruth_neighbors_file);
int dim() const override;
uint32_t max_k() const override;
size_t base_set_size() const override;
size_t query_set_size() const override;
private:
void load_base_set_() const override;
void load_query_set_() const override;
void load_gt_set_() const override;
void map_base_set_() const override;
mutable int dim_ = 0;
mutable uint32_t max_k_ = 0;
mutable size_t base_set_size_ = 0;
mutable size_t query_set_size_ = 0;
BinFile<T> base_file_;
BinFile<T> query_file_;
std::optional<BinFile<std::int32_t>> gt_file_{std::nullopt};
};
template <typename T>
BinDataset<T>::BinDataset(const std::string& name,
const std::string& base_file,
size_t subset_first_row,
size_t subset_size,
const std::string& query_file,
const std::string& distance,
const std::optional<std::string>& groundtruth_neighbors_file)
: Dataset<T>(name, distance),
base_file_(base_file, "r", subset_first_row, subset_size),
query_file_(query_file, "r")
{
if (groundtruth_neighbors_file.has_value()) {
gt_file_.emplace(groundtruth_neighbors_file.value(), "r");
}
}
template <typename T>
int BinDataset<T>::dim() const
{
if (dim_ > 0) { return dim_; }
if (base_set_size() > 0) { return dim_; }
if (query_set_size() > 0) { return dim_; }
return dim_;
}
template <typename T>
uint32_t BinDataset<T>::max_k() const
{
if (!this->gt_set_) { load_gt_set_(); }
return max_k_;
}
template <typename T>
size_t BinDataset<T>::query_set_size() const
{
if (query_set_size_ > 0) { return query_set_size_; }
int dim;
query_file_.get_shape(&query_set_size_, &dim);
if (query_set_size_ == 0) { throw std::runtime_error("Zero query set size"); }
if (dim == 0) { throw std::runtime_error("Zero query set dim"); }
if (dim_ == 0) {
dim_ = dim;
} else if (dim_ != dim) {
throw std::runtime_error("base set dim (" + std::to_string(dim_) + ") != query set dim (" +
std::to_string(dim));
}
return query_set_size_;
}
template <typename T>
size_t BinDataset<T>::base_set_size() const
{
if (base_set_size_ > 0) { return base_set_size_; }
int dim;
base_file_.get_shape(&base_set_size_, &dim);
if (base_set_size_ == 0) { throw std::runtime_error("Zero base set size"); }
if (dim == 0) { throw std::runtime_error("Zero base set dim"); }
if (dim_ == 0) {
dim_ = dim;
} else if (dim_ != dim) {
throw std::runtime_error("base set dim (" + std::to_string(dim) + ") != query set dim (" +
std::to_string(dim_));
}
return base_set_size_;
}
template <typename T>
void BinDataset<T>::load_base_set_() const
{
this->base_set_ = new T[base_set_size() * dim()];
base_file_.read(this->base_set_);
}
template <typename T>
void BinDataset<T>::load_query_set_() const
{
this->query_set_ = new T[query_set_size() * dim()];
query_file_.read(this->query_set_);
}
template <typename T>
void BinDataset<T>::load_gt_set_() const
{
if (gt_file_.has_value()) {
size_t queries;
int k;
gt_file_->get_shape(&queries, &k);
this->gt_set_ = new std::int32_t[queries * k];
gt_file_->read(this->gt_set_);
max_k_ = k;
}
}
template <typename T>
void BinDataset<T>::map_base_set_() const
{
this->mapped_base_set_ = base_file_.map();
}
} // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/cuda_stub.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
/*
The content of this header is governed by two preprocessor definitions:
- BUILD_CPU_ONLY - whether none of the CUDA functions are used.
- CUVS_BENCH_LINK_CUDART - dynamically link against this string if defined.
______________________________________________________________________________
|BUILD_CPU_ONLY | CUVS_BENCH_LINK_CUDART | cudart | cuda_runtime_api.h |
| | | found | needed | included |
|---------|-----------------------|-----------|---------|--------------------|
| ON | <not defined> | false | false | NO |
| ON | "cudart.so.xx.xx" | false | false | NO |
| OFF | <nod defined> | true | true | YES |
| OFF | "cudart.so.xx.xx" | <runtime> | true | YES |
------------------------------------------------------------------------------
*/
#ifndef BUILD_CPU_ONLY
#include <cuda_runtime_api.h>
#ifdef CUVS_BENCH_LINK_CUDART
#include <cstring>
#include <dlfcn.h>
#endif
#else
typedef void* cudaStream_t;
typedef void* cudaEvent_t;
#endif
namespace cuvs::bench {
struct cuda_lib_handle {
void* handle{nullptr};
explicit cuda_lib_handle()
{
#ifdef CUVS_BENCH_LINK_CUDART
constexpr int kFlags = RTLD_NOW | RTLD_GLOBAL | RTLD_DEEPBIND | RTLD_NODELETE;
// The full name of the linked cudart library 'cudart.so.MAJOR.MINOR.PATCH'
char libname[] = CUVS_BENCH_LINK_CUDART; // NOLINT
handle = dlopen(CUVS_BENCH_LINK_CUDART, kFlags);
if (handle != nullptr) { return; }
// try strip the PATCH
auto p = strrchr(libname, '.');
p[0] = 0;
handle = dlopen(libname, kFlags);
if (handle != nullptr) { return; }
// try set the MINOR version to 0
p = strrchr(libname, '.');
p[1] = '0';
p[2] = 0;
handle = dlopen(libname, kFlags);
if (handle != nullptr) { return; }
// try strip the MINOR
p[0] = 0;
handle = dlopen(libname, kFlags);
if (handle != nullptr) { return; }
// try strip the MAJOR
p = strrchr(libname, '.');
p[0] = 0;
handle = dlopen(libname, kFlags);
#endif
}
~cuda_lib_handle() noexcept
{
#ifdef CUVS_BENCH_LINK_CUDART
if (handle != nullptr) { dlclose(handle); }
#endif
}
template <typename Symbol>
auto sym(const char* name) -> Symbol
{
#ifdef CUVS_BENCH_LINK_CUDART
return reinterpret_cast<Symbol>(dlsym(handle, name));
#else
return nullptr;
#endif
}
/** Whether this is NOT a cpu-only package. */
[[nodiscard]] constexpr inline auto needed() const -> bool
{
#if defined(BUILD_CPU_ONLY)
return false;
#else
return true;
#endif
}
/** CUDA found, either at compile time or at runtime. */
[[nodiscard]] inline auto found() const -> bool
{
#if defined(BUILD_CPU_ONLY)
return false;
#elif defined(CUVS_BENCH_LINK_CUDART)
return handle != nullptr;
#else
return true;
#endif
}
};
static inline cuda_lib_handle cudart{};
#ifdef CUVS_BENCH_LINK_CUDART
namespace stub {
[[gnu::weak, gnu::noinline]] cudaError_t cudaMemcpy(void* dst,
const void* src,
size_t count,
enum cudaMemcpyKind kind)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaMalloc(void** ptr, size_t size)
{
*ptr = nullptr;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaMemset(void* devPtr, int value, size_t count)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaFree(void* devPtr) { return cudaSuccess; }
[[gnu::weak, gnu::noinline]] cudaError_t cudaStreamCreate(cudaStream_t* pStream)
{
*pStream = 0;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaStreamCreateWithFlags(cudaStream_t* pStream,
unsigned int flags)
{
*pStream = 0;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaStreamDestroy(cudaStream_t pStream)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaDeviceSynchronize() { return cudaSuccess; }
[[gnu::weak, gnu::noinline]] cudaError_t cudaStreamSynchronize(cudaStream_t pStream)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventCreate(cudaEvent_t* event)
{
*event = 0;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventRecord(cudaEvent_t event, cudaStream_t stream)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventSynchronize(cudaEvent_t event)
{
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventElapsedTime(float* ms,
cudaEvent_t start,
cudaEvent_t end)
{
*ms = 0;
return cudaSuccess;
}
[[gnu::weak, gnu::noinline]] cudaError_t cudaEventDestroy(cudaEvent_t event) { return cudaSuccess; }
[[gnu::weak, gnu::noinline]] cudaError_t cudaGetDevice(int* device)
{
*device = 0;
return cudaSuccess;
};
[[gnu::weak, gnu::noinline]] cudaError_t cudaDriverGetVersion(int* driver)
{
*driver = 0;
return cudaSuccess;
};
[[gnu::weak, gnu::noinline]] cudaError_t cudaRuntimeGetVersion(int* runtime)
{
*runtime = 0;
return cudaSuccess;
};
[[gnu::weak, gnu::noinline]] cudaError_t cudaGetDeviceProperties(struct cudaDeviceProp* prop,
int device)
{
*prop = cudaDeviceProp{};
return cudaSuccess;
}
} // namespace stub
#define RAFT_DECLARE_CUDART(fun) \
static inline decltype(&stub::fun) fun = \
cudart.found() ? cudart.sym<decltype(&stub::fun)>(#fun) : &stub::fun
RAFT_DECLARE_CUDART(cudaMemcpy);
RAFT_DECLARE_CUDART(cudaMalloc);
RAFT_DECLARE_CUDART(cudaMemset);
RAFT_DECLARE_CUDART(cudaFree);
RAFT_DECLARE_CUDART(cudaStreamCreate);
RAFT_DECLARE_CUDART(cudaStreamCreateWithFlags);
RAFT_DECLARE_CUDART(cudaStreamDestroy);
RAFT_DECLARE_CUDART(cudaDeviceSynchronize);
RAFT_DECLARE_CUDART(cudaStreamSynchronize);
RAFT_DECLARE_CUDART(cudaEventCreate);
RAFT_DECLARE_CUDART(cudaEventRecord);
RAFT_DECLARE_CUDART(cudaEventSynchronize);
RAFT_DECLARE_CUDART(cudaEventElapsedTime);
RAFT_DECLARE_CUDART(cudaEventDestroy);
RAFT_DECLARE_CUDART(cudaGetDevice);
RAFT_DECLARE_CUDART(cudaDriverGetVersion);
RAFT_DECLARE_CUDART(cudaRuntimeGetVersion);
RAFT_DECLARE_CUDART(cudaGetDeviceProperties);
#undef RAFT_DECLARE_CUDART
#endif
}; // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/benchmark.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "ann_types.hpp"
#include "conf.hpp"
#include "dataset.hpp"
#include "util.hpp"
#include <benchmark/benchmark.h>
#include <raft/core/logger.hpp>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cmath>
#include <condition_variable>
#include <cstdint>
#include <fstream>
#include <limits>
#include <memory>
#include <mutex>
#include <numeric>
#include <sstream>
#include <string>
#include <unistd.h>
#include <vector>
namespace cuvs::bench {
std::mutex init_mutex;
std::condition_variable cond_var;
std::atomic_int processed_threads{0};
static inline std::unique_ptr<AnnBase> current_algo{nullptr};
static inline std::shared_ptr<AlgoProperty> current_algo_props{nullptr};
using kv_series = std::vector<std::tuple<std::string, std::vector<nlohmann::json>>>;
inline auto apply_overrides(const std::vector<nlohmann::json>& configs,
const kv_series& overrides,
std::size_t override_idx = 0) -> std::vector<nlohmann::json>
{
std::vector<nlohmann::json> results{};
if (override_idx >= overrides.size()) {
auto n = configs.size();
for (size_t i = 0; i < n; i++) {
auto c = configs[i];
c["override_suffix"] = n > 1 ? "/" + std::to_string(i) : "";
results.push_back(c);
}
return results;
}
auto rec_configs = apply_overrides(configs, overrides, override_idx + 1);
auto [key, vals] = overrides[override_idx];
auto n = vals.size();
for (size_t i = 0; i < n; i++) {
const auto& val = vals[i];
for (auto rc : rec_configs) {
if (n > 1) {
rc["override_suffix"] =
static_cast<std::string>(rc["override_suffix"]) + "/" + std::to_string(i);
}
rc[key] = val;
results.push_back(rc);
}
}
return results;
}
inline auto apply_overrides(const nlohmann::json& config,
const kv_series& overrides,
std::size_t override_idx = 0)
{
return apply_overrides(std::vector{config}, overrides, 0);
}
inline void dump_parameters(::benchmark::State& state, nlohmann::json params)
{
std::string label = "";
bool label_empty = true;
for (auto& [key, val] : params.items()) {
if (val.is_number()) {
state.counters.insert({{key, val}});
} else if (val.is_boolean()) {
state.counters.insert({{key, val ? 1.0 : 0.0}});
} else {
auto kv = key + "=" + val.dump();
if (label_empty) {
label = kv;
} else {
label += "#" + kv;
}
label_empty = false;
}
}
if (!label_empty) { state.SetLabel(label); }
}
inline auto parse_algo_property(AlgoProperty prop, const nlohmann::json& conf) -> AlgoProperty
{
if (conf.contains("dataset_memory_type")) {
prop.dataset_memory_type = parse_memory_type(conf.at("dataset_memory_type"));
}
if (conf.contains("query_memory_type")) {
prop.query_memory_type = parse_memory_type(conf.at("query_memory_type"));
}
return prop;
};
template <typename T>
void bench_build(::benchmark::State& state,
std::shared_ptr<const Dataset<T>> dataset,
Configuration::Index index,
bool force_overwrite)
{
dump_parameters(state, index.build_param);
if (file_exists(index.file)) {
if (force_overwrite) {
log_info("Overwriting file: %s", index.file.c_str());
} else {
return state.SkipWithMessage(
"Index file already exists (use --force to overwrite the index).");
}
}
std::unique_ptr<ANN<T>> algo;
try {
algo = ann::create_algo<T>(
index.algo, dataset->distance(), dataset->dim(), index.build_param, index.dev_list);
} catch (const std::exception& e) {
return state.SkipWithError("Failed to create an algo: " + std::string(e.what()));
}
const auto algo_property = parse_algo_property(algo->get_preference(), index.build_param);
const T* base_set = dataset->base_set(algo_property.dataset_memory_type);
std::size_t index_size = dataset->base_set_size();
cuda_timer gpu_timer;
{
nvtx_case nvtx{state.name()};
for (auto _ : state) {
[[maybe_unused]] auto ntx_lap = nvtx.lap();
[[maybe_unused]] auto gpu_lap = gpu_timer.lap();
try {
algo->build(base_set, index_size, gpu_timer.stream());
} catch (const std::exception& e) {
state.SkipWithError(std::string(e.what()));
}
}
}
state.counters.insert(
{{"GPU", gpu_timer.total_time() / state.iterations()}, {"index_size", index_size}});
if (state.skipped()) { return; }
make_sure_parent_dir_exists(index.file);
algo->save(index.file);
}
template <typename T>
void bench_search(::benchmark::State& state,
Configuration::Index index,
std::size_t search_param_ix,
std::shared_ptr<const Dataset<T>> dataset,
Objective metric_objective)
{
std::size_t queries_processed = 0;
const auto& sp_json = index.search_params[search_param_ix];
if (state.thread_index() == 0) { dump_parameters(state, sp_json); }
// NB: `k` and `n_queries` are guaranteed to be populated in conf.cpp
const std::uint32_t k = sp_json["k"];
// Amount of data processes in one go
const std::size_t n_queries = sp_json["n_queries"];
// Round down the query data to a multiple of the batch size to loop over full batches of data
const std::size_t query_set_size = (dataset->query_set_size() / n_queries) * n_queries;
if (dataset->query_set_size() < n_queries) {
std::stringstream msg;
msg << "Not enough queries in benchmark set. Expected " << n_queries << ", actual "
<< dataset->query_set_size();
return state.SkipWithError(msg.str());
}
// Each thread start from a different offset, so that the queries that they process do not
// overlap.
std::ptrdiff_t batch_offset = (state.thread_index() * n_queries) % query_set_size;
std::ptrdiff_t queries_stride = state.threads() * n_queries;
// Output is saved into a contiguous buffer (separate buffers for each thread).
std::ptrdiff_t out_offset = 0;
const T* query_set = nullptr;
if (!file_exists(index.file)) {
state.SkipWithError("Index file is missing. Run the benchmark in the build mode first.");
return;
}
/**
* Make sure the first thread loads the algo and dataset
*/
if (state.thread_index() == 0) {
std::unique_lock lk(init_mutex);
cond_var.wait(lk, [] { return processed_threads.load(std::memory_order_acquire) == 0; });
// algo is static to cache it between close search runs to save time on index loading
static std::string index_file = "";
if (index.file != index_file) {
current_algo.reset();
index_file = index.file;
}
std::unique_ptr<typename ANN<T>::AnnSearchParam> search_param;
ANN<T>* algo;
try {
if (!current_algo || (algo = dynamic_cast<ANN<T>*>(current_algo.get())) == nullptr) {
auto ualgo = ann::create_algo<T>(
index.algo, dataset->distance(), dataset->dim(), index.build_param, index.dev_list);
algo = ualgo.get();
algo->load(index_file);
current_algo = std::move(ualgo);
}
search_param = ann::create_search_param<T>(index.algo, sp_json);
search_param->metric_objective = metric_objective;
} catch (const std::exception& e) {
state.SkipWithError("Failed to create an algo: " + std::string(e.what()));
return;
}
auto algo_property = parse_algo_property(algo->get_preference(), sp_json);
current_algo_props = std::make_shared<AlgoProperty>(algo_property.dataset_memory_type,
algo_property.query_memory_type);
if (search_param->needs_dataset()) {
try {
algo->set_search_dataset(dataset->base_set(current_algo_props->dataset_memory_type),
dataset->base_set_size());
} catch (const std::exception& ex) {
state.SkipWithError("The algorithm '" + index.name +
"' requires the base set, but it's not available. " +
"Exception: " + std::string(ex.what()));
return;
}
}
try {
algo->set_search_param(*search_param);
} catch (const std::exception& ex) {
state.SkipWithError("An error occurred setting search parameters: " + std::string(ex.what()));
return;
}
query_set = dataset->query_set(current_algo_props->query_memory_type);
processed_threads.store(state.threads(), std::memory_order_acq_rel);
cond_var.notify_all();
} else {
std::unique_lock lk(init_mutex);
// All other threads will wait for the first thread to initialize the algo.
cond_var.wait(lk, [&state] {
return processed_threads.load(std::memory_order_acquire) == state.threads();
});
// gbench ensures that all threads are synchronized at the start of the benchmark loop.
// We are accessing shared variables (like current_algo, current_algo_probs) before the
// benchmark loop, therefore the synchronization here is necessary.
}
const auto algo_property = *current_algo_props;
query_set = dataset->query_set(algo_property.query_memory_type);
/**
* Each thread will manage its own outputs
*/
std::shared_ptr<buf<float>> distances =
std::make_shared<buf<float>>(algo_property.query_memory_type, k * query_set_size);
std::shared_ptr<buf<std::size_t>> neighbors =
std::make_shared<buf<std::size_t>>(algo_property.query_memory_type, k * query_set_size);
cuda_timer gpu_timer;
auto start = std::chrono::high_resolution_clock::now();
{
nvtx_case nvtx{state.name()};
ANN<T>* algo = dynamic_cast<ANN<T>*>(current_algo.get());
for (auto _ : state) {
[[maybe_unused]] auto ntx_lap = nvtx.lap();
[[maybe_unused]] auto gpu_lap = gpu_timer.lap();
// run the search
try {
algo->search(query_set + batch_offset * dataset->dim(),
n_queries,
k,
neighbors->data + out_offset * k,
distances->data + out_offset * k,
gpu_timer.stream());
} catch (const std::exception& e) {
state.SkipWithError(std::string(e.what()));
}
// advance to the next batch
batch_offset = (batch_offset + queries_stride) % query_set_size;
out_offset = (out_offset + n_queries) % query_set_size;
queries_processed += n_queries;
}
}
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count();
if (state.thread_index() == 0) { state.counters.insert({{"end_to_end", duration}}); }
state.counters.insert(
{"Latency", {duration / double(state.iterations()), benchmark::Counter::kAvgThreads}});
state.SetItemsProcessed(queries_processed);
if (cudart.found()) {
double gpu_time_per_iteration = gpu_timer.total_time() / (double)state.iterations();
state.counters.insert({"GPU", {gpu_time_per_iteration, benchmark::Counter::kAvgThreads}});
}
// This will be the total number of queries across all threads
state.counters.insert({{"total_queries", queries_processed}});
if (state.skipped()) { return; }
// assume thread has finished processing successfully at this point
// last thread to finish processing notifies all
if (processed_threads-- == 0) { cond_var.notify_all(); }
// Each thread calculates recall on their partition of queries.
// evaluate recall
if (dataset->max_k() >= k) {
const std::int32_t* gt = dataset->gt_set();
const std::uint32_t max_k = dataset->max_k();
buf<std::size_t> neighbors_host = neighbors->move(MemoryType::Host);
std::size_t rows = std::min(queries_processed, query_set_size);
std::size_t match_count = 0;
std::size_t total_count = rows * static_cast<size_t>(k);
// We go through the groundtruth with same stride as the benchmark loop.
size_t out_offset = 0;
size_t batch_offset = (state.thread_index() * n_queries) % query_set_size;
while (out_offset < rows) {
for (std::size_t i = 0; i < n_queries; i++) {
size_t i_orig_idx = batch_offset + i;
size_t i_out_idx = out_offset + i;
if (i_out_idx < rows) {
for (std::uint32_t j = 0; j < k; j++) {
auto act_idx = std::int32_t(neighbors_host.data[i_out_idx * k + j]);
for (std::uint32_t l = 0; l < k; l++) {
auto exp_idx = gt[i_orig_idx * max_k + l];
if (act_idx == exp_idx) {
match_count++;
break;
}
}
}
}
}
out_offset += n_queries;
batch_offset = (batch_offset + queries_stride) % query_set_size;
}
double actual_recall = static_cast<double>(match_count) / static_cast<double>(total_count);
state.counters.insert({"Recall", {actual_recall, benchmark::Counter::kAvgThreads}});
}
}
inline void printf_usage()
{
::benchmark::PrintDefaultHelp();
fprintf(stdout,
" [--build|--search] \n"
" [--force]\n"
" [--data_prefix=<prefix>]\n"
" [--index_prefix=<prefix>]\n"
" [--override_kv=<key:value1:value2:...:valueN>]\n"
" [--mode=<latency|throughput>\n"
" [--threads=min[:max]]\n"
" <conf>.json\n"
"\n"
"Note the non-standard benchmark parameters:\n"
" --build: build mode, will build index\n"
" --search: search mode, will search using the built index\n"
" one and only one of --build and --search should be specified\n"
" --force: force overwriting existing index files\n"
" --data_prefix=<prefix>:"
" prepend <prefix> to dataset file paths specified in the <conf>.json (default = "
"'data/').\n"
" --index_prefix=<prefix>:"
" prepend <prefix> to index file paths specified in the <conf>.json (default = "
"'index/').\n"
" --override_kv=<key:value1:value2:...:valueN>:"
" override a build/search key one or more times multiplying the number of configurations;"
" you can use this parameter multiple times to get the Cartesian product of benchmark"
" configs.\n"
" --mode=<latency|throughput>"
" run the benchmarks in latency (accumulate times spent in each batch) or "
" throughput (pipeline batches and measure end-to-end) mode\n"
" --threads=min[:max] specify the number threads to use for throughput benchmark."
" Power of 2 values between 'min' and 'max' will be used. If only 'min' is specified,"
" then a single test is run with 'min' threads. By default min=1, max=<num hyper"
" threads>.\n");
}
template <typename T>
void register_build(std::shared_ptr<const Dataset<T>> dataset,
std::vector<Configuration::Index> indices,
bool force_overwrite)
{
for (auto index : indices) {
auto suf = static_cast<std::string>(index.build_param["override_suffix"]);
auto file_suf = suf;
index.build_param.erase("override_suffix");
std::replace(file_suf.begin(), file_suf.end(), '/', '-');
index.file += file_suf;
auto* b = ::benchmark::RegisterBenchmark(
index.name + suf, bench_build<T>, dataset, index, force_overwrite);
b->Unit(benchmark::kSecond);
b->MeasureProcessCPUTime();
b->UseRealTime();
}
}
template <typename T>
void register_search(std::shared_ptr<const Dataset<T>> dataset,
std::vector<Configuration::Index> indices,
Objective metric_objective,
const std::vector<int>& threads)
{
for (auto index : indices) {
for (std::size_t i = 0; i < index.search_params.size(); i++) {
auto suf = static_cast<std::string>(index.search_params[i]["override_suffix"]);
index.search_params[i].erase("override_suffix");
auto* b = ::benchmark::RegisterBenchmark(
index.name + suf, bench_search<T>, index, i, dataset, metric_objective)
->Unit(benchmark::kMillisecond)
/**
* The following are important for getting accuracy QPS measurements on both CPU
* and GPU These make sure that
* - `end_to_end` ~ (`Time` * `Iterations`)
* - `items_per_second` ~ (`total_queries` / `end_to_end`)
* - Throughput = `items_per_second`
*/
->MeasureProcessCPUTime()
->UseRealTime();
if (metric_objective == Objective::THROUGHPUT) { b->ThreadRange(threads[0], threads[1]); }
}
}
}
template <typename T>
void dispatch_benchmark(const Configuration& conf,
bool force_overwrite,
bool build_mode,
bool search_mode,
std::string data_prefix,
std::string index_prefix,
kv_series override_kv,
Objective metric_objective,
const std::vector<int>& threads)
{
if (cudart.found()) {
for (auto [key, value] : cuda_info()) {
::benchmark::AddCustomContext(key, value);
}
}
const auto dataset_conf = conf.get_dataset_conf();
auto base_file = combine_path(data_prefix, dataset_conf.base_file);
auto query_file = combine_path(data_prefix, dataset_conf.query_file);
auto gt_file = dataset_conf.groundtruth_neighbors_file;
if (gt_file.has_value()) { gt_file.emplace(combine_path(data_prefix, gt_file.value())); }
auto dataset = std::make_shared<BinDataset<T>>(dataset_conf.name,
base_file,
dataset_conf.subset_first_row,
dataset_conf.subset_size,
query_file,
dataset_conf.distance,
gt_file);
::benchmark::AddCustomContext("dataset", dataset_conf.name);
::benchmark::AddCustomContext("distance", dataset_conf.distance);
std::vector<Configuration::Index> indices = conf.get_indices();
if (build_mode) {
if (file_exists(base_file)) {
log_info("Using the dataset file '%s'", base_file.c_str());
::benchmark::AddCustomContext("n_records", std::to_string(dataset->base_set_size()));
::benchmark::AddCustomContext("dim", std::to_string(dataset->dim()));
} else {
log_warn("Dataset file '%s' does not exist; benchmarking index building is impossible.",
base_file.c_str());
}
std::vector<Configuration::Index> more_indices{};
for (auto& index : indices) {
for (auto param : apply_overrides(index.build_param, override_kv)) {
auto modified_index = index;
modified_index.build_param = param;
modified_index.file = combine_path(index_prefix, modified_index.file);
more_indices.push_back(modified_index);
}
}
register_build<T>(dataset, more_indices, force_overwrite);
} else if (search_mode) {
if (file_exists(query_file)) {
log_info("Using the query file '%s'", query_file.c_str());
::benchmark::AddCustomContext("max_n_queries", std::to_string(dataset->query_set_size()));
::benchmark::AddCustomContext("dim", std::to_string(dataset->dim()));
if (gt_file.has_value()) {
if (file_exists(*gt_file)) {
log_info("Using the ground truth file '%s'", gt_file->c_str());
::benchmark::AddCustomContext("max_k", std::to_string(dataset->max_k()));
} else {
log_warn("Ground truth file '%s' does not exist; the recall won't be reported.",
gt_file->c_str());
}
} else {
log_warn(
"Ground truth file is not provided; the recall won't be reported. NB: use "
"the 'groundtruth_neighbors_file' alongside the 'query_file' key to specify the "
"path to "
"the ground truth in your conf.json.");
}
} else {
log_warn("Query file '%s' does not exist; benchmarking search is impossible.",
query_file.c_str());
}
for (auto& index : indices) {
index.search_params = apply_overrides(index.search_params, override_kv);
index.file = combine_path(index_prefix, index.file);
}
register_search<T>(dataset, indices, metric_objective, threads);
}
}
inline auto parse_bool_flag(const char* arg, const char* pat, bool& result) -> bool
{
if (strcmp(arg, pat) == 0) {
result = true;
return true;
}
return false;
}
inline auto parse_string_flag(const char* arg, const char* pat, std::string& result) -> bool
{
auto n = strlen(pat);
if (strncmp(pat, arg, strlen(pat)) == 0) {
result = arg + n + 1;
return true;
}
return false;
}
inline auto run_main(int argc, char** argv) -> int
{
bool force_overwrite = false;
bool build_mode = false;
bool search_mode = false;
std::string data_prefix = "data";
std::string index_prefix = "index";
std::string new_override_kv = "";
std::string mode = "latency";
std::string threads_arg_txt = "";
std::vector<int> threads = {1, -1}; // min_thread, max_thread
std::string log_level_str = "";
int raft_log_level = raft::logger::get(RAFT_NAME).get_level();
kv_series override_kv{};
char arg0_default[] = "benchmark"; // NOLINT
char* args_default = arg0_default;
if (!argv) {
argc = 1;
argv = &args_default;
}
if (argc == 1) {
printf_usage();
return -1;
}
char* conf_path = argv[--argc];
std::ifstream conf_stream(conf_path);
for (int i = 1; i < argc; i++) {
if (parse_bool_flag(argv[i], "--force", force_overwrite) ||
parse_bool_flag(argv[i], "--build", build_mode) ||
parse_bool_flag(argv[i], "--search", search_mode) ||
parse_string_flag(argv[i], "--data_prefix", data_prefix) ||
parse_string_flag(argv[i], "--index_prefix", index_prefix) ||
parse_string_flag(argv[i], "--mode", mode) ||
parse_string_flag(argv[i], "--override_kv", new_override_kv) ||
parse_string_flag(argv[i], "--threads", threads_arg_txt) ||
parse_string_flag(argv[i], "--raft_log_level", log_level_str)) {
if (!log_level_str.empty()) {
raft_log_level = std::stoi(log_level_str);
log_level_str = "";
}
if (!threads_arg_txt.empty()) {
auto threads_arg = split(threads_arg_txt, ':');
threads[0] = std::stoi(threads_arg[0]);
if (threads_arg.size() > 1) {
threads[1] = std::stoi(threads_arg[1]);
} else {
threads[1] = threads[0];
}
threads_arg_txt = "";
}
if (!new_override_kv.empty()) {
auto kvv = split(new_override_kv, ':');
auto key = kvv[0];
std::vector<nlohmann::json> vals{};
for (std::size_t j = 1; j < kvv.size(); j++) {
vals.push_back(nlohmann::json::parse(kvv[j]));
}
override_kv.emplace_back(key, vals);
new_override_kv = "";
}
for (int j = i; j < argc - 1; j++) {
argv[j] = argv[j + 1];
}
argc--;
i--;
}
}
raft::logger::get(RAFT_NAME).set_level(raft_log_level);
Objective metric_objective = Objective::LATENCY;
if (mode == "throughput") { metric_objective = Objective::THROUGHPUT; }
int max_threads =
(metric_objective == Objective::THROUGHPUT) ? std::thread::hardware_concurrency() : 1;
if (threads[1] == -1) threads[1] = max_threads;
if (metric_objective == Objective::LATENCY) {
if (threads[0] != 1 || threads[1] != 1) {
log_warn("Latency mode enabled. Overriding threads arg, running with single thread.");
threads = {1, 1};
}
}
if (build_mode == search_mode) {
log_error("One and only one of --build and --search should be specified");
printf_usage();
return -1;
}
if (!conf_stream) {
log_error("Can't open configuration file: %s", conf_path);
return -1;
}
if (cudart.needed() && !cudart.found()) {
log_warn("cudart library is not found, GPU-based indices won't work.");
}
Configuration conf(conf_stream);
std::string dtype = conf.get_dataset_conf().dtype;
if (dtype == "float") {
dispatch_benchmark<float>(conf,
force_overwrite,
build_mode,
search_mode,
data_prefix,
index_prefix,
override_kv,
metric_objective,
threads);
} else if (dtype == "uint8") {
dispatch_benchmark<std::uint8_t>(conf,
force_overwrite,
build_mode,
search_mode,
data_prefix,
index_prefix,
override_kv,
metric_objective,
threads);
} else if (dtype == "int8") {
dispatch_benchmark<std::int8_t>(conf,
force_overwrite,
build_mode,
search_mode,
data_prefix,
index_prefix,
override_kv,
metric_objective,
threads);
} else {
log_error("datatype '%s' is not supported", dtype.c_str());
return -1;
}
::benchmark::Initialize(&argc, argv, printf_usage);
if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return -1;
::benchmark::RunSpecifiedBenchmarks();
::benchmark::Shutdown();
// Release a possibly cached ANN object, so that it cannot be alive longer than the handle
// to a shared library it depends on (dynamic benchmark executable).
current_algo.reset();
return 0;
}
}; // namespace cuvs::bench
| 0 |
rapidsai_public_repos/cuvs/cpp/bench/ann/src | rapidsai_public_repos/cuvs/cpp/bench/ann/src/common/cuda_pinned_resource.hpp | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <cstddef>
namespace raft::mr {
/**
* @brief `device_memory_resource` derived class that uses cudaMallocHost/Free for
* allocation/deallocation.
*
* This is almost the same as rmm::mr::host::pinned_memory_resource, but it has
* device_memory_resource as base class. Pinned memory can be accessed from device,
* and using this allocator we can create device_mdarray backed by pinned allocator.
*
* TODO(tfeher): it would be preferred to just rely on the existing allocator from rmm
* (pinned_memory_resource), but that is incompatible with the container_policy class
* for device matrix, because the latter expects a device_memory_resource. We shall
* revise this once we progress with Issue https://github.com/rapidsai/raft/issues/1819
*/
class cuda_pinned_resource final : public rmm::mr::device_memory_resource {
public:
cuda_pinned_resource() = default;
~cuda_pinned_resource() override = default;
cuda_pinned_resource(cuda_pinned_resource const&) = default;
cuda_pinned_resource(cuda_pinned_resource&&) = default;
cuda_pinned_resource& operator=(cuda_pinned_resource const&) = default;
cuda_pinned_resource& operator=(cuda_pinned_resource&&) = default;
/**
* @brief Query whether the resource supports use of non-null CUDA streams for
* allocation/deallocation. `cuda_pinned_resource` does not support streams.
*
* @returns bool false
*/
[[nodiscard]] bool supports_streams() const noexcept override { return false; }
/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return true
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return true; }
private:
/**
* @brief Allocates memory of size at least `bytes` using cudaMalloc.
*
* The returned pointer has at least 256B alignment.
*
* @note Stream argument is ignored
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, rmm::cuda_stream_view) override
{
void* ptr{nullptr};
RMM_CUDA_TRY_ALLOC(cudaMallocHost(&ptr, bytes));
return ptr;
}
/**
* @brief Deallocate memory pointed to by \p p.
*
* @note Stream argument is ignored.
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
*/
void do_deallocate(void* ptr, std::size_t, rmm::cuda_stream_view) override
{
RMM_ASSERT_CUDA_SUCCESS(cudaFreeHost(ptr));
}
/**
* @brief Compare this resource to another.
*
* Two cuda_pinned_resources always compare equal, because they can each
* deallocate memory allocated by the other.
*
* @throws Nothing.
*
* @param other The other resource to compare to
* @return true If the two resources are equivalent
* @return false If the two resources are not equal
*/
[[nodiscard]] bool do_is_equal(device_memory_resource const& other) const noexcept override
{
return dynamic_cast<cuda_pinned_resource const*>(&other) != nullptr;
}
/**
* @brief Get free and available memory for memory resource
*
* @throws `rmm::cuda_error` if unable to retrieve memory info.
*
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
rmm::cuda_stream_view) const override
{
std::size_t free_size{};
std::size_t total_size{};
RMM_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
return std::make_pair(free_size, total_size);
}
};
} // namespace raft::mr | 0 |
rapidsai_public_repos/cuvs/cpp/bench | rapidsai_public_repos/cuvs/cpp/bench/micro/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# ##################################################################################################
# * compiler function -----------------------------------------------------------------------------
function(ConfigureBench)
set(options OPTIONAL LIB EXPLICIT_INSTANTIATE_ONLY)
set(oneValueArgs NAME)
set(multiValueArgs PATH TARGETS CONFIGURATIONS)
cmake_parse_arguments(ConfigureBench "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(BENCH_NAME ${ConfigureBench_NAME})
add_executable(${BENCH_NAME} ${ConfigureBench_PATH})
target_link_libraries(
${BENCH_NAME}
PRIVATE raft::raft cuvs_internal benchmark::benchmark Threads::Threads
$<TARGET_NAME_IF_EXISTS:OpenMP::OpenMP_CXX> $<TARGET_NAME_IF_EXISTS:conda_env>
)
set_target_properties(
${BENCH_NAME}
PROPERTIES # set target compile options
INSTALL_RPATH "\$ORIGIN/../../../lib"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
${BENCH_NAME} PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${CUVS_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${CUVS_CUDA_FLAGS}>"
)
if(ConfigureTest_EXPLICIT_INSTANTIATE_ONLY)
target_compile_definitions(${BENCH_NAME} PRIVATE "CUVS_EXPLICIT_INSTANTIATE_ONLY")
endif()
target_include_directories(
${BENCH_NAME} PUBLIC "$<BUILD_INTERFACE:${CUVS_SOURCE_DIR}/bench/micro>"
)
install(
TARGETS ${BENCH_NAME}
COMPONENT testing
DESTINATION bin/gbench/micro/libcuvs
EXCLUDE_FROM_ALL
)
endfunction()
if(BUILD_MICRO_BENCH)
ConfigureBench(
NAME CLUSTER_BENCH PATH bench/micro/cluster/kmeans_balanced.cu bench/micro/cluster/kmeans.cu
bench/micro/main.cpp OPTIONAL LIB EXPLICIT_INSTANTIATE_ONLY
)
ConfigureBench(
NAME TUNE_DISTANCE PATH bench/micro/distance/tune_pairwise/kernel.cu
bench/micro/distance/tune_pairwise/bench.cu bench/micro/main.cpp
)
ConfigureBench(
NAME
DISTANCE_BENCH
PATH
bench/micro/distance/distance_cosine.cu
bench/micro/distance/distance_exp_l2.cu
bench/micro/distance/distance_l1.cu
bench/micro/distance/distance_unexp_l2.cu
bench/micro/distance/fused_l2_nn.cu
bench/micro/distance/masked_nn.cu
bench/micro/distance/kernels.cu
bench/micro/main.cpp
OPTIONAL
LIB
EXPLICIT_INSTANTIATE_ONLY
)
ConfigureBench(
NAME
NEIGHBORS_BENCH
PATH
bench/micro/neighbors/knn/brute_force_float_int64_t.cu
bench/micro/neighbors/knn/brute_force_float_uint32_t.cu
bench/micro/neighbors/knn/cagra_float_uint32_t.cu
bench/micro/neighbors/knn/ivf_flat_filter_float_int64_t.cu
bench/micro/neighbors/knn/ivf_flat_float_int64_t.cu
bench/micro/neighbors/knn/ivf_flat_int8_t_int64_t.cu
bench/micro/neighbors/knn/ivf_flat_uint8_t_int64_t.cu
bench/micro/neighbors/knn/ivf_pq_float_int64_t.cu
bench/micro/neighbors/knn/ivf_pq_filter_float_int64_t.cu
bench/micro/neighbors/knn/ivf_pq_int8_t_int64_t.cu
bench/micro/neighbors/knn/ivf_pq_uint8_t_int64_t.cu
bench/micro/neighbors/refine_float_int64_t.cu
bench/micro/neighbors/refine_uint8_t_int64_t.cu
bench/micro/main.cpp
OPTIONAL
LIB
EXPLICIT_INSTANTIATE_ONLY
)
endif()
| 0 |
rapidsai_public_repos/cuvs/cpp/bench | rapidsai_public_repos/cuvs/cpp/bench/micro/main.cpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmark/benchmark.h> // NOLINT
BENCHMARK_MAIN();
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.