repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/slice.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/matrix.cuh>
#include <raft/util/input_validation.hpp>
namespace raft::matrix {
/**
* @defgroup matrix_slice Matrix slicing
* @{
*/
template <typename idx_t>
struct slice_coordinates {
idx_t row1; ///< row coordinate of the top-left point of the wanted area (0-based)
idx_t col1; ///< column coordinate of the top-left point of the wanted area (0-based)
idx_t row2; ///< row coordinate of the bottom-right point of the wanted area (1-based)
idx_t col2; ///< column coordinate of the bottom-right point of the wanted area (1-based)
slice_coordinates(idx_t row1_, idx_t col1_, idx_t row2_, idx_t col2_)
: row1(row1_), col1(col1_), row2(row2_), col2(col2_)
{
}
};
/**
* @brief Slice a matrix (in-place)
* @tparam m_t type of matrix elements
* @tparam idx_t integer type used for indexing
* @param[in] handle: raft handle
* @param[in] in: input matrix
* @param[out] out: output matrix
* @param[in] coords: coordinates of the wanted slice
* example: Slice the 2nd and 3rd columns of a 4x3 matrix: slice(handle, in, out, {0, 1, 4, 3});
*/
template <typename m_t, typename idx_t, typename layout_t>
void slice(raft::resources const& handle,
raft::device_matrix_view<const m_t, idx_t, layout_t> in,
raft::device_matrix_view<m_t, idx_t, layout_t> out,
slice_coordinates<idx_t> coords)
{
RAFT_EXPECTS(raft::is_row_or_column_major(in), "Matrix layout must be row- or column-major");
RAFT_EXPECTS(coords.row2 > coords.row1, "row2 must be > row1");
RAFT_EXPECTS(coords.col2 > coords.col1, "col2 must be > col1");
RAFT_EXPECTS(coords.row1 >= 0, "row1 must be >= 0");
RAFT_EXPECTS(coords.row2 <= in.extent(0), "row2 must be <= number of rows in the input matrix");
RAFT_EXPECTS(coords.col1 >= 0, "col1 must be >= 0");
RAFT_EXPECTS(coords.col2 <= in.extent(1),
"col2 must be <= number of columns in the input matrix");
detail::sliceMatrix(in.data_handle(),
in.extent(0),
in.extent(1),
out.data_handle(),
coords.row1,
coords.col1,
coords.row2,
coords.col2,
raft::is_row_major(in),
resource::get_cuda_stream(handle));
}
/** @} */ // end group matrix_slice
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/argmax.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/math.cuh>
namespace raft::matrix {
/**
* @defgroup argmax Argmax operation
* @{
*/
/**
* @brief Argmax: find the col idx with maximum value for each row
* @param[in] handle: raft handle
* @param[in] in: input matrix of size (n_rows, n_cols)
* @param[out] out: output vector of size n_rows
*/
template <typename math_t, typename idx_t, typename matrix_idx_t>
void argmax(raft::resources const& handle,
raft::device_matrix_view<const math_t, matrix_idx_t, row_major> in,
raft::device_vector_view<idx_t, matrix_idx_t> out)
{
RAFT_EXPECTS(out.extent(0) == in.extent(0),
"Size of output vector must equal number of rows in input matrix.");
detail::argmax(in.data_handle(),
in.extent(1),
in.extent(0),
out.data_handle(),
resource::get_cuda_stream(handle));
}
/** @} */ // end of group argmax
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/matrix_types.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
namespace raft::matrix {
struct print_separators {
char horizontal = ' ';
char vertical = '\n';
};
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/matrix.hpp | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use matrix.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the cuh version instead.")
#include "matrix.cuh"
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/reciprocal.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/math.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_reciprocal Matrix Reciprocal Operations
* @{
*/
/**
* @brief Reciprocal of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @param handle: raft handle
* @param in: input matrix and also the result is stored
* @param out: output matrix. The result is stored in the out matrix
* @param scalar: every element is multiplied with scalar
* @param setzero round down to zero if the input is less the threshold
* @param thres the threshold used to forcibly set inputs to zero
* @{
*/
template <typename math_t, typename idx_t, typename layout>
void reciprocal(raft::resources const& handle,
raft::device_matrix_view<const math_t, idx_t, layout> in,
raft::device_matrix_view<math_t, idx_t, layout> out,
raft::host_scalar_view<math_t> scalar,
bool setzero = false,
math_t thres = 1e-15)
{
RAFT_EXPECTS(in.size() == out.size(), "Input and output matrices must have the same size.");
detail::reciprocal<math_t>(in.data_handle(),
out.data_handle(),
*(scalar.data_handle()),
in.size(),
resource::get_cuda_stream(handle),
setzero,
thres);
}
/**
* @brief Reciprocal of every element in the input matrix (in place)
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle to manage resources
* @param[inout] inout: input matrix with in-place results
* @param[in] scalar: every element is multiplied with scalar
* @param[in] setzero round down to zero if the input is less the threshold
* @param[in] thres the threshold used to forcibly set inputs to zero
* @{
*/
template <typename math_t, typename idx_t, typename layout>
void reciprocal(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout> inout,
raft::host_scalar_view<math_t> scalar,
bool setzero = false,
math_t thres = 1e-15)
{
detail::reciprocal<math_t>(inout.data_handle(),
*(scalar.data_handle()),
inout.size(),
resource::get_cuda_stream(handle),
setzero,
thres);
}
/** @} */ // end group matrix_reciprocal
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/copy.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/matrix.cuh>
#include <raft/util/input_validation.hpp>
namespace raft::matrix {
/**
* @defgroup matrix_copy Matrix copy operations
* @{
*/
/**
* @brief Copy selected rows of the input matrix into contiguous space.
*
* On exit out[i + k*n_rows] = in[indices[i] + k*n_rows],
* where i = 0..n_rows_indices-1, and k = 0..n_cols-1.
*
* @param[in] handle raft handle
* @param[in] in input matrix
* @param[out] out output matrix
* @param[in] indices of the rows to be copied
*/
template <typename m_t, typename idx_t, typename layout>
void copy_rows(raft::resources const& handle,
raft::device_matrix_view<const m_t, idx_t, layout> in,
raft::device_matrix_view<m_t, idx_t, layout> out,
raft::device_vector_view<const idx_t, idx_t> indices)
{
RAFT_EXPECTS(in.extent(1) == out.extent(1),
"Input and output matrices must have same number of columns");
RAFT_EXPECTS(indices.extent(0) == out.extent(0),
"Number of rows in output matrix must equal number of indices");
detail::copyRows(in.data_handle(),
in.extent(0),
in.extent(1),
out.data_handle(),
indices.data_handle(),
indices.extent(0),
resource::get_cuda_stream(handle),
raft::is_row_major(in));
}
/**
* @brief copy matrix operation for row major matrices.
* @param[in] handle: raft handle
* @param[in] in: input matrix
* @param[out] out: output matrix
*/
template <typename m_t, typename matrix_idx_t>
void copy(raft::resources const& handle,
raft::device_matrix_view<const m_t, matrix_idx_t, row_major> in,
raft::device_matrix_view<m_t, matrix_idx_t, row_major> out)
{
RAFT_EXPECTS(in.extent(0) == out.extent(0) && in.extent(1) == out.extent(1),
"Input and output matrix shapes must match.");
raft::copy_async(out.data_handle(),
in.data_handle(),
in.extent(0) * out.extent(1),
resource::get_cuda_stream(handle));
}
/**
* @brief copy matrix operation for column major matrices.
* @param[in] handle: raft handle
* @param[in] in: input matrix
* @param[out] out: output matrix
*/
template <typename m_t, typename matrix_idx_t>
void copy(raft::resources const& handle,
raft::device_matrix_view<const m_t, matrix_idx_t, col_major> in,
raft::device_matrix_view<m_t, matrix_idx_t, col_major> out)
{
RAFT_EXPECTS(in.extent(0) == out.extent(0) && in.extent(1) == out.extent(1),
"Input and output matrix shapes must match.");
raft::copy_async(out.data_handle(),
in.data_handle(),
in.extent(0) * out.extent(1),
resource::get_cuda_stream(handle));
}
/**
* @brief copy matrix operation for column major matrices. First n_rows and
* n_cols of input matrix "in" is copied to "out" matrix.
* @param handle: raft handle for managing resources
* @param in: input matrix
* @param out: output matrix
*/
template <typename m_t, typename idx_t>
void trunc_zero_origin(raft::resources const& handle,
raft::device_matrix_view<const m_t, idx_t, col_major> in,
raft::device_matrix_view<m_t, idx_t, col_major> out)
{
RAFT_EXPECTS(out.extent(0) <= in.extent(0) && out.extent(1) <= in.extent(1),
"Output matrix must have less or equal number of rows and columns");
detail::truncZeroOrigin<m_t, idx_t>(in.data_handle(),
in.extent(0),
out.data_handle(),
out.extent(0),
out.extent(1),
resource::get_cuda_stream(handle));
}
/** @} */ // end of group matrix_copy
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/math.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in a future release.
* Please use versions in individual header files instead.
*/
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use versions in individual header files instead.")
#ifndef __MATH_H
#define __MATH_H
#pragma once
#include "detail/math.cuh"
namespace raft {
namespace matrix {
/**
* @defgroup MatrixMathOp math operation on the input matrix
* @{
*/
/**
* @brief Power of every element in the input matrix
* @param in: input matrix
* @param out: output matrix. The result is stored in the out matrix
* @param scalar: every element is multiplied with scalar.
* @param len: number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t>
void power(math_t* in, math_t* out, math_t scalar, int len, cudaStream_t stream)
{
detail::power(in, out, scalar, len, stream);
}
/**
* @brief Power of every element in the input matrix
* @param inout: input matrix and also the result is stored
* @param scalar: every element is multiplied with scalar.
* @param len: number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t>
void power(math_t* inout, math_t scalar, int len, cudaStream_t stream)
{
detail::power(inout, scalar, len, stream);
}
/**
* @brief Power of every element in the input matrix
* @param inout: input matrix and also the result is stored
* @param len: number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t>
void power(math_t* inout, int len, cudaStream_t stream)
{
detail::power(inout, len, stream);
}
/**
* @brief Power of every element in the input matrix
* @param in: input matrix
* @param out: output matrix. The result is stored in the out matrix
* @param len: number elements of input matrix
* @param stream cuda stream
* @{
*/
template <typename math_t>
void power(math_t* in, math_t* out, int len, cudaStream_t stream)
{
detail::power(in, out, len, stream);
}
/**
* @brief Square root of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param in: input matrix and also the result is stored
* @param out: output matrix. The result is stored in the out matrix
* @param scalar: every element is multiplied with scalar
* @param len: number elements of input matrix
* @param stream cuda stream
* @param set_neg_zero whether to set negative numbers to zero
*/
template <typename math_t, typename IdxType = int>
void seqRoot(math_t* in,
math_t* out,
math_t scalar,
IdxType len,
cudaStream_t stream,
bool set_neg_zero = false)
{
detail::seqRoot(in, out, scalar, len, stream, set_neg_zero);
}
/**
* @brief Square root of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param inout: input matrix and also the result is stored
* @param scalar: every element is multiplied with scalar
* @param len: number elements of input matrix
* @param stream cuda stream
* @param set_neg_zero whether to set negative numbers to zero
*/
template <typename math_t, typename IdxType = int>
void seqRoot(
math_t* inout, math_t scalar, IdxType len, cudaStream_t stream, bool set_neg_zero = false)
{
detail::seqRoot(inout, scalar, len, stream, set_neg_zero);
}
/**
* @brief Square root of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param in: input matrix and also the result is stored
* @param out: output matrix. The result is stored in the out matrix
* @param len: number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t, typename IdxType = int>
void seqRoot(math_t* in, math_t* out, IdxType len, cudaStream_t stream)
{
detail::seqRoot(in, out, len, stream);
}
/**
* @brief Square root of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param inout: input matrix with in-place results
* @param len: number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t, typename IdxType = int>
void seqRoot(math_t* inout, IdxType len, cudaStream_t stream)
{
detail::seqRoot(inout, len, stream);
}
/**
* @brief sets the small values to zero based on a defined threshold
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param out: output matrix. The result is stored in the out matrix
* @param in: input matrix
* @param len: number elements of input matrix
* @param stream cuda stream
* @param thres threshold to set values to zero
*/
template <typename math_t, typename IdxType = int>
void setSmallValuesZero(
math_t* out, const math_t* in, IdxType len, cudaStream_t stream, math_t thres = 1e-15)
{
detail::setSmallValuesZero(out, in, len, stream, thres);
}
/**
* @brief sets the small values to zero based on a defined threshold
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param inout: input matrix and also the result is stored
* @param len: number elements of input matrix
* @param stream cuda stream
* @param thres: threshold
*/
template <typename math_t, typename IdxType = int>
void setSmallValuesZero(math_t* inout, IdxType len, cudaStream_t stream, math_t thres = 1e-15)
{
detail::setSmallValuesZero(inout, len, stream, thres);
}
/**
* @brief Reciprocal of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param in: input matrix and also the result is stored
* @param out: output matrix. The result is stored in the out matrix
* @param scalar: every element is multiplied with scalar
* @param len: number elements of input matrix
* @param stream cuda stream
* @param setzero round down to zero if the input is less the threshold
* @param thres the threshold used to forcibly set inputs to zero
* @{
*/
template <typename math_t, typename IdxType = int>
void reciprocal(math_t* in,
math_t* out,
math_t scalar,
int len,
cudaStream_t stream,
bool setzero = false,
math_t thres = 1e-15)
{
detail::reciprocal(in, out, scalar, len, stream, setzero, thres);
}
/**
* @brief Reciprocal of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param inout: input matrix with in-place results
* @param scalar: every element is multiplied with scalar
* @param len: number elements of input matrix
* @param stream cuda stream
* @param setzero round down to zero if the input is less the threshold
* @param thres the threshold used to forcibly set inputs to zero
* @{
*/
template <typename math_t, typename IdxType = int>
void reciprocal(math_t* inout,
math_t scalar,
IdxType len,
cudaStream_t stream,
bool setzero = false,
math_t thres = 1e-15)
{
detail::reciprocal(inout, scalar, len, stream, setzero, thres);
}
/**
* @brief Reciprocal of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param inout: input matrix and also the result is stored
* @param len: number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t, typename IdxType = int>
void reciprocal(math_t* inout, IdxType len, cudaStream_t stream)
{
detail::reciprocal(inout, len, stream);
}
/**
* @brief Reciprocal of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param in: input matrix and also the result is stored
* @param out: output matrix. The result is stored in the out matrix
* @param len: number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t, typename IdxType = int>
void reciprocal(math_t* in, math_t* out, IdxType len, cudaStream_t stream)
{
detail::reciprocal(in, out, len, stream);
}
/**
* @brief set values to scalar in matrix
* @tparam math_t data-type upon which the math operation will be performed
* @param out output matrix. The result is stored in the out matrix
* @param in input matrix
* @param scalar svalar value
* @param len number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t>
void setValue(math_t* out, const math_t* in, math_t scalar, int len, cudaStream_t stream = 0)
{
detail::setValue(out, in, scalar, len, stream);
}
/**
* @brief ratio of every element over sum of input vector is calculated
* @tparam math_t data-type upon which the math operation will be performed
* @tparam IdxType Integer type used to for addressing
* @param handle
* @param src: input matrix
* @param dest: output matrix. The result is stored in the dest matrix
* @param len: number elements of input matrix
* @param stream cuda stream
*/
template <typename math_t, typename IdxType = int>
void ratio(
raft::resources const& handle, math_t* src, math_t* dest, IdxType len, cudaStream_t stream)
{
detail::ratio(handle, src, dest, len, stream);
}
/** @} */
/**
* @brief Argmin: find the row idx with minimum value for each column
* @param in: input matrix (column-major)
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param out: output vector of size n_cols
* @param stream: cuda stream
*/
template <typename math_t, typename out_t, typename idx_t = int>
void argmin(const math_t* in, idx_t n_rows, idx_t n_cols, out_t* out, cudaStream_t stream)
{
detail::argmin(in, n_rows, n_cols, out, stream);
}
/**
* @brief Argmax: find the row idx with maximum value for each column
* @param in: input matrix (column-major)
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param out: output vector of size n_cols
* @param stream: cuda stream
*/
template <typename math_t, typename out_t, typename idx_t = int>
void argmax(const math_t* in, idx_t n_rows, idx_t n_cols, out_t* out, cudaStream_t stream)
{
detail::argmax(in, n_rows, n_cols, out, stream);
}
/**
* @brief sign flip for PCA. This is used to stabilize the sign of column
* major eigen vectors. Flips the sign if the column has negative |max|.
* @param inout: input matrix. Result also stored in this parameter
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param stream cuda stream
*/
template <typename math_t>
void signFlip(math_t* inout, int n_rows, int n_cols, cudaStream_t stream)
{
detail::signFlip(inout, n_rows, n_cols, stream);
}
/**
* @brief multiply each row or column of matrix with vector
* @param data input matrix, results are in-place
* @param vec input vector
* @param n_row number of rows of input matrix
* @param n_col number of columns of input matrix
* @param rowMajor whether matrix is row major
* @param bcastAlongRows whether to broadcast vector along rows of matrix or columns
* @param stream cuda stream
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryMult(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
detail::matrixVectorBinaryMult<Type, IdxType, TPB>(
data, vec, n_row, n_col, rowMajor, bcastAlongRows, stream);
}
/**
* @brief multiply each row or column of matrix with vector, skipping zeros in vector
* @param data input matrix, results are in-place
* @param vec input vector
* @param n_row number of rows of input matrix
* @param n_col number of columns of input matrix
* @param rowMajor whether matrix is row major
* @param bcastAlongRows whether to broadcast vector along rows of matrix or columns
* @param stream cuda stream
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryMultSkipZero(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
detail::matrixVectorBinaryMultSkipZero<Type, IdxType, TPB>(
data, vec, n_row, n_col, rowMajor, bcastAlongRows, stream);
}
/**
* @brief divide each row or column of matrix with vector
* @param data input matrix, results are in-place
* @param vec input vector
* @param n_row number of rows of input matrix
* @param n_col number of columns of input matrix
* @param rowMajor whether matrix is row major
* @param bcastAlongRows whether to broadcast vector along rows of matrix or columns
* @param stream cuda stream
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryDiv(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
detail::matrixVectorBinaryDiv<Type, IdxType, TPB>(
data, vec, n_row, n_col, rowMajor, bcastAlongRows, stream);
}
/**
* @brief divide each row or column of matrix with vector, skipping zeros in vector
* @param data input matrix, results are in-place
* @param vec input vector
* @param n_row number of rows of input matrix
* @param n_col number of columns of input matrix
* @param rowMajor whether matrix is row major
* @param bcastAlongRows whether to broadcast vector along rows of matrix or columns
* @param stream cuda stream
* @param return_zero result is zero if true and vector value is below threshold, original value if
* false
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryDivSkipZero(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream,
bool return_zero = false)
{
detail::matrixVectorBinaryDivSkipZero<Type, IdxType, TPB>(
data, vec, n_row, n_col, rowMajor, bcastAlongRows, stream, return_zero);
}
/**
* @brief add each row or column of matrix with vector
* @param data input matrix, results are in-place
* @param vec input vector
* @param n_row number of rows of input matrix
* @param n_col number of columns of input matrix
* @param rowMajor whether matrix is row major
* @param bcastAlongRows whether to broadcast vector along rows of matrix or columns
* @param stream cuda stream
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryAdd(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
detail::matrixVectorBinaryAdd<Type, IdxType, TPB>(
data, vec, n_row, n_col, rowMajor, bcastAlongRows, stream);
}
/**
* @brief subtract each row or column of matrix with vector
* @param data input matrix, results are in-place
* @param vec input vector
* @param n_row number of rows of input matrix
* @param n_col number of columns of input matrix
* @param rowMajor whether matrix is row major
* @param bcastAlongRows whether to broadcast vector along rows of matrix or columns
* @param stream cuda stream
*/
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinarySub(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
detail::matrixVectorBinarySub<Type, IdxType, TPB>(
data, vec, n_row, n_col, rowMajor, bcastAlongRows, stream);
}
}; // end namespace matrix
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/triangular.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/matrix.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_triangular Extract Matrix Triangles
* @{
*/
/**
* @brief Copy the upper triangular part of a matrix to another
* @param[in] handle: raft handle
* @param[in] src: input matrix with a size of n_rows x n_cols
* @param[out] dst: output matrix with a size of kxk, k = min(n_rows, n_cols)
*/
template <typename m_t, typename idx_t>
void upper_triangular(raft::resources const& handle,
raft::device_matrix_view<const m_t, idx_t, col_major> src,
raft::device_matrix_view<m_t, idx_t, col_major> dst)
{
auto k = std::min(src.extent(0), src.extent(1));
RAFT_EXPECTS(k == dst.extent(0) && k == dst.extent(1),
"dst should be of size kxk, k = min(n_rows, n_cols)");
detail::copyUpperTriangular(src.data_handle(),
dst.data_handle(),
src.extent(0),
src.extent(1),
resource::get_cuda_stream(handle));
}
/** @} */ // end group matrix_triangular
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/sqrt.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/matrix.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_sqrt Matrix Square Root
* @{
*/
/**
* @brief Square root of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[in] in: input matrix and also the result is stored
* @param[out] out: output matrix. The result is stored in the out matrix
*/
template <typename math_t, typename idx_t, typename layout>
void sqrt(raft::resources const& handle,
raft::device_matrix_view<const math_t, idx_t, layout> in,
raft::device_matrix_view<math_t, idx_t, layout> out)
{
RAFT_EXPECTS(in.size() == out.size(), "Input and output matrices must have same size.");
detail::seqRoot(
in.data_handle(), out.data_handle(), in.size(), resource::get_cuda_stream(handle));
}
/**
* @brief Square root of every element in the input matrix (in place)
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[inout] inout: input matrix with in-place results
*/
template <typename math_t, typename idx_t, typename layout>
void sqrt(raft::resources const& handle, raft::device_matrix_view<math_t, idx_t, layout> inout)
{
detail::seqRoot(inout.data_handle(), inout.size(), resource::get_cuda_stream(handle));
}
/**
* @brief Square root of every element in the input matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[in] in: input matrix and also the result is stored
* @param[out] out: output matrix. The result is stored in the out matrix
* @param[in] scalar: every element is multiplied with scalar
* @param[in] set_neg_zero whether to set negative numbers to zero
*/
template <typename math_t, typename idx_t, typename layout>
void weighted_sqrt(raft::resources const& handle,
raft::device_matrix_view<const math_t, idx_t, layout> in,
raft::device_matrix_view<math_t, idx_t, layout> out,
raft::host_scalar_view<math_t> scalar,
bool set_neg_zero = false)
{
RAFT_EXPECTS(in.size() == out.size(), "Input and output matrices must have same size.");
detail::seqRoot(in.data_handle(),
out.data_handle(),
*(scalar.data_handle()),
in.size(),
resource::get_cuda_stream(handle),
set_neg_zero);
}
/**
* @brief Square root of every element in the input matrix (in place)
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[inout] inout: input matrix and also the result is stored
* @param[in] scalar: every element is multiplied with scalar
* @param[in] set_neg_zero whether to set negative numbers to zero
*/
template <typename math_t, typename idx_t, typename layout>
void weighted_sqrt(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout> inout,
raft::host_scalar_view<math_t> scalar,
bool set_neg_zero = false)
{
detail::seqRoot(inout.data_handle(),
*(scalar.data_handle()),
inout.size(),
resource::get_cuda_stream(handle),
set_neg_zero);
}
/** @} */ // end group matrix_sqrt
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/specializations.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/sign_flip.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/matrix.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_sign_flip Matrix sign flip operations
* @{
*/
/**
* @brief sign flip stabilizes the sign of col major eigen vectors.
* The sign is flipped if the column has negative |max|.
* @tparam math_t floating point type used for matrix elements
* @tparam idx_t integer type used for indexing
* @param[in] handle: raft handle
* @param[inout] inout: input matrix. Result also stored in this parameter
*/
template <typename math_t, typename idx_t>
void sign_flip(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, col_major> inout)
{
detail::signFlip(
inout.data_handle(), inout.extent(0), inout.extent(1), resource::get_cuda_stream(handle));
}
/** @} */ // end group matrix_sign_flip
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/ratio.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/matrix.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_ratio Matrix ratio operations
* @{
*/
/**
* @brief ratio of every element over sum of input vector is calculated
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle
* @param[in] src: input matrix
* @param[out] dest: output matrix. The result is stored in the dest matrix
*/
template <typename math_t, typename idx_t, typename layout>
void ratio(raft::resources const& handle,
raft::device_matrix_view<const math_t, idx_t, layout> src,
raft::device_matrix_view<math_t, idx_t, layout> dest)
{
RAFT_EXPECTS(src.size() == dest.size(), "Input and output matrices must be the same size.");
detail::ratio(
handle, src.data_handle(), dest.data_handle(), src.size(), resource::get_cuda_stream(handle));
}
/**
* @brief ratio of every element over sum of input vector is calculated
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle
* @param[inout] inout: input matrix
*/
template <typename math_t, typename idx_t, typename layout>
void ratio(raft::resources const& handle, raft::device_matrix_view<math_t, idx_t, layout> inout)
{
detail::ratio(handle,
inout.data_handle(),
inout.data_handle(),
inout.size(),
resource::get_cuda_stream(handle));
}
/** @} */ // end group matrix_ratio
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/init.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/linalg/map.cuh>
#include <raft/matrix/detail/math.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_init Matrix initialization operations
* @{
*/
/**
* @brief set values to scalar in matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam extents dimension and indexing type used for the input
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[in] in input matrix
* @param[out] out output matrix. The result is stored in the out matrix
* @param[in] scalar scalar value to fill matrix elements
*/
template <typename math_t, typename extents, typename layout>
void fill(raft::resources const& handle,
raft::device_mdspan<const math_t, extents, layout> in,
raft::device_mdspan<math_t, extents, layout> out,
raft::host_scalar_view<math_t> scalar)
{
RAFT_EXPECTS(raft::is_row_or_column_major(out), "Data layout not supported");
RAFT_EXPECTS(in.size() == out.size(), "Input and output matrices must be the same size.");
RAFT_EXPECTS(scalar.data_handle() != nullptr, "Empty scalar");
detail::setValue(out.data_handle(),
in.data_handle(),
*(scalar.data_handle()),
in.size(),
resource::get_cuda_stream(handle));
}
/**
* @brief set values to scalar in matrix
* @tparam math_t data-type upon which the math operation will be performed
* @tparam extents dimension and indexing type used for the input
* @tparam layout_t layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[inout] inout input matrix
* @param[in] scalar scalar value to fill matrix elements
*/
template <typename math_t, typename extents, typename layout>
void fill(raft::resources const& handle,
raft::device_mdspan<math_t, extents, layout> inout,
math_t scalar)
{
linalg::map(handle, inout, raft::const_op{scalar});
}
/** @} */ // end of group matrix_init
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/scatter.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resources.hpp>
#include <raft/matrix/detail/scatter_inplace.cuh>
namespace raft::matrix {
/**
* @brief In-place scatter elements in a row-major matrix according to a
* map. The map specifies the new order in which rows of the input matrix are
* rearranged, i.e. read the destination index from the map, and copy the row. For example,
* the matrix [[1, 2, 3], [4, 5, 6], [7, 8, 9]] with the map [2, 0, 1] will
* be transformed to [[4, 5, 6], [7, 8, 9], [1, 2, 3]]. Batching is done on
* columns and an additional scratch space of shape n_rows * cols_batch_size
* is created. For each batch, chunks of columns from each row are copied
* into the appropriate location in the scratch space and copied back to
* the corresponding locations in the input matrix.
* Note: in-place scatter is not thread safe if the values in the map are not unique.
* Users must ensure that the map indices are unique and in the range [0, n_rows).
*
* @tparam matrix_t Matrix element type
* @tparam idx_t Integer type used for indexing
*
* @param[in] handle raft handle
* @param[inout] inout input matrix (n_rows * n_cols)
* @param[in] map Pointer to the input sequence of scatter locations. The length of the map should
* be equal to the number of rows in the input matrix. Map indices should be unique and in the range
* [0, n_rows). The map represents a complete permutation of indices.
* @param[in] col_batch_size (optional) column batch size. Determines the shape of the scratch space
* (n_rows, col_batch_size). When set to zero (default), no batching is done and an additional
* scratch space of shape (n_rows, n_cols) is created.
*/
template <typename matrix_t, typename idx_t>
void scatter(raft::resources const& handle,
raft::device_matrix_view<matrix_t, idx_t, raft::layout_c_contiguous> inout,
raft::device_vector_view<const idx_t, idx_t, raft::layout_c_contiguous> map,
idx_t col_batch_size = 0)
{
detail::scatter(handle, inout, map, col_batch_size);
}
} // namespace raft::matrix | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/norm.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/matrix.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_norm Matrix Norm Operations
* @{
*/
/**
* @brief Get the L2/F-norm of a matrix
* @param[in] handle: raft handle
* @param[in] in: input matrix/vector with totally size elements
* @returns matrix l2 norm
*/
template <typename m_t, typename idx_t>
m_t l2_norm(raft::resources const& handle, raft::device_mdspan<const m_t, idx_t> in)
{
return detail::getL2Norm(handle, in.data_handle(), in.size(), resource::get_cuda_stream(handle));
}
/** @} */ // end of group matrix_norm
} // namespace raft::matrix | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/threshold.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/matrix.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_threshold Matrix thesholding
* @{
*/
/**
* @brief sets the small values to zero based on a defined threshold
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param handle: raft handle
* @param[in] in: input matrix
* @param[out] out: output matrix. The result is stored in the out matrix
* @param[in] thres threshold to set values to zero
*/
template <typename math_t, typename idx_t, typename layout>
void zero_small_values(raft::resources const& handle,
raft::device_matrix_view<const math_t, idx_t, layout> in,
raft::device_matrix_view<math_t, idx_t, layout> out,
math_t thres = 1e-15)
{
RAFT_EXPECTS(in.size() == out.size(), "Input and output matrices must have same size");
detail::setSmallValuesZero(
out.data_handle(), in.data_handle(), in.size(), resource::get_cuda_stream(handle), thres);
}
/**
* @brief sets the small values to zero in-place based on a defined threshold
* @tparam math_t data-type upon which the math operation will be performed
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param handle: raft handle
* @param inout: input matrix and also the result is stored
* @param thres: threshold
*/
template <typename math_t, typename idx_t, typename layout>
void zero_small_values(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout> inout,
math_t thres = 1e-15)
{
detail::setSmallValuesZero(
inout.data_handle(), inout.size(), resource::get_cuda_stream(handle), thres);
}
/** @} */ // end group matrix_threshold
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/power.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/matrix/detail/math.cuh>
namespace raft::matrix {
/**
* @defgroup matrix_power Matrix Power Operations
* @{
*/
/**
* @brief Power of every element in the input matrix
* @tparam math_t type of matrix elements
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[in] in: input matrix
* @param[out] out: output matrix. The result is stored in the out matrix
* @param[in] scalar: every element is multiplied with scalar.
*/
template <typename math_t, typename idx_t, typename layout>
void weighted_power(raft::resources const& handle,
raft::device_matrix_view<const math_t, idx_t, layout> in,
raft::device_matrix_view<math_t, idx_t, layout> out,
math_t scalar)
{
RAFT_EXPECTS(in.size() == out.size(), "Size of input and output matrices must be equal");
detail::power(
in.data_handle(), out.data_handle(), scalar, in.size(), resource::get_cuda_stream(handle));
}
/**
* @brief Power of every element in the input matrix (inplace)
* @tparam math_t matrix element type
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[inout] inout: input matrix and also the result is stored
* @param[in] scalar: every element is multiplied with scalar.
*/
template <typename math_t, typename idx_t, typename layout>
void weighted_power(raft::resources const& handle,
raft::device_matrix_view<math_t, idx_t, layout> inout,
math_t scalar)
{
detail::power(inout.data_handle(), scalar, inout.size(), resource::get_cuda_stream(handle));
}
/**
* @brief Power of every element in the input matrix (inplace)
* @tparam math_t matrix element type
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix data (must be row or col major)
* @param[in] handle: raft handle
* @param[inout] inout: input matrix and also the result is stored
*/
template <typename math_t, typename idx_t, typename layout>
void power(raft::resources const& handle, raft::device_matrix_view<math_t, idx_t, layout> inout)
{
detail::power<math_t>(inout.data_handle(), inout.size(), resource::get_cuda_stream(handle));
}
/**
* @brief Power of every element in the input matrix
* @tparam math_t type used for matrix elements
* @tparam idx_t integer type used for indexing
* @tparam layout layout of the matrix (row or column major)
* @param[in] handle: raft handle
* @param[in] in: input matrix
* @param[out] out: output matrix. The result is stored in the out matrix
* @{
*/
template <typename math_t, typename idx_t, typename layout>
void power(raft::resources const& handle,
raft::device_matrix_view<const math_t, idx_t, layout> in,
raft::device_matrix_view<math_t, idx_t, layout> out)
{
RAFT_EXPECTS(in.size() == out.size(), "Input and output matrices must be same size.");
detail::power<math_t>(
in.data_handle(), out.data_handle(), in.size(), resource::get_cuda_stream(handle));
}
/** @} */ // end group matrix_power
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/print.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/host_mdspan.hpp>
#include <raft/matrix/detail/print.hpp>
#include <raft/matrix/matrix_types.hpp>
namespace raft::matrix {
/**
* @brief Prints the data stored in CPU memory
* @param[in] in: input matrix with column-major layout
* @param[in] separators: horizontal and vertical separator characters
*/
template <typename m_t, typename idx_t>
void print(raft::host_matrix_view<const m_t, idx_t, col_major> in, print_separators& separators)
{
detail::printHost(
in.data_handle(), in.extent(0), in.extent(1), separators.horizontal, separators.vertical);
}
} // namespace raft::matrix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/math.hpp | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
#pragma once
#include "math.cuh" | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/matrix/matrix.cuh | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in a future release.
* Please use versions in individual header files instead.
*/
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use versions in individual header files instead.")
#ifndef __MATRIX_H
#define __MATRIX_H
#pragma once
#include "detail/linewise_op.cuh"
#include "detail/matrix.cuh"
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/common/nvtx.hpp>
namespace raft {
namespace matrix {
using namespace std;
/**
* @brief Copy selected rows of the input matrix into contiguous space.
*
* On exit out[i + k*n_rows] = in[indices[i] + k*n_rows],
* where i = 0..n_rows_indices-1, and k = 0..n_cols-1.
*
* @param in input matrix
* @param n_rows number of rows of output matrix
* @param n_cols number of columns of output matrix
* @param out output matrix
* @param indices of the rows to be copied
* @param n_rows_indices number of rows to copy
* @param stream cuda stream
* @param rowMajor whether the matrix has row major layout
*/
template <typename m_t, typename idx_array_t = int, typename idx_t = size_t>
void copyRows(const m_t* in,
idx_t n_rows,
idx_t n_cols,
m_t* out,
const idx_array_t* indices,
idx_t n_rows_indices,
cudaStream_t stream,
bool rowMajor = false)
{
detail::copyRows(in, n_rows, n_cols, out, indices, n_rows_indices, stream, rowMajor);
}
/**
* @brief copy matrix operation for column major matrices.
* @param in: input matrix
* @param out: output matrix
* @param n_rows: number of rows of output matrix
* @param n_cols: number of columns of output matrix
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void copy(const m_t* in, m_t* out, idx_t n_rows, idx_t n_cols, cudaStream_t stream)
{
raft::copy_async(out, in, n_rows * n_cols, stream);
}
/**
* @brief copy matrix operation for column major matrices.
* @param[in] handle: raft handle
* @param[in] in: input matrix
* @param[out] out: output matrix
*/
template <typename m_t, typename idx_t = int, typename matrix_idx_t>
void copy(raft::resources const& handle,
raft::device_matrix_view<const m_t, matrix_idx_t, col_major> in,
raft::device_matrix_view<m_t, matrix_idx_t, col_major> out)
{
RAFT_EXPECTS(in.extent(0) == out.extent(0) && in.extent(1) == out.extent(1),
"Input and output matrix shapes must match.");
raft::copy_async(out.data_handle(),
in.data_handle(),
in.extent(0) * out.extent(1),
resource::get_cuda_stream(handle));
}
/**
* @brief copy matrix operation for column major matrices. First n_rows and
* n_cols of input matrix "in" is copied to "out" matrix.
* @param in: input matrix
* @param in_n_rows: number of rows of input matrix
* @param out: output matrix
* @param out_n_rows: number of rows of output matrix
* @param out_n_cols: number of columns of output matrix
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void truncZeroOrigin(
m_t* in, idx_t in_n_rows, m_t* out, idx_t out_n_rows, idx_t out_n_cols, cudaStream_t stream)
{
detail::truncZeroOrigin(in, in_n_rows, out, out_n_rows, out_n_cols, stream);
}
/**
* @brief Columns of a column major matrix is reversed (i.e. first column and
* last column are swapped)
* @param inout: input and output matrix
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void colReverse(m_t* inout, idx_t n_rows, idx_t n_cols, cudaStream_t stream)
{
detail::colReverse(inout, n_rows, n_cols, stream);
}
/**
* @brief Rows of a column major matrix is reversed (i.e. first row and last
* row are swapped)
* @param inout: input and output matrix
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void rowReverse(m_t* inout, idx_t n_rows, idx_t n_cols, cudaStream_t stream)
{
detail::rowReverse(inout, n_rows, n_cols, stream);
}
/**
* @brief Prints the data stored in GPU memory
* @param in: input matrix
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param h_separator: horizontal separator character
* @param v_separator: vertical separator character
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void print(const m_t* in,
idx_t n_rows,
idx_t n_cols,
char h_separator = ' ',
char v_separator = '\n',
cudaStream_t stream = rmm::cuda_stream_default)
{
detail::print(in, n_rows, n_cols, h_separator, v_separator, stream);
}
/**
* @brief Prints the data stored in CPU memory
* @param in: input matrix
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
*/
template <typename m_t, typename idx_t = int>
void printHost(const m_t* in, idx_t n_rows, idx_t n_cols)
{
detail::printHost(in, n_rows, n_cols);
}
/**
* @brief Slice a matrix (in-place)
* @param in: input matrix
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param out: output matrix
* @param x1, y1: coordinate of the top-left point of the wanted area (0-based)
* @param x2, y2: coordinate of the bottom-right point of the wanted area
* (1-based)
* example: Slice the 2nd and 3rd columns of a 4x3 matrix: slice_matrix(M_d, 4,
* 3, 0, 1, 4, 3);
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void sliceMatrix(m_t* in,
idx_t n_rows,
idx_t n_cols,
m_t* out,
idx_t x1,
idx_t y1,
idx_t x2,
idx_t y2,
cudaStream_t stream)
{
detail::sliceMatrix(in, n_rows, n_cols, out, x1, y1, x2, y2, false, stream);
}
/**
* @brief Copy the upper triangular part of a matrix to another
* @param src: input matrix with a size of n_rows x n_cols
* @param dst: output matrix with a size of kxk, k = min(n_rows, n_cols)
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void copyUpperTriangular(m_t* src, m_t* dst, idx_t n_rows, idx_t n_cols, cudaStream_t stream)
{
detail::copyUpperTriangular(src, dst, n_rows, n_cols, stream);
}
/**
* @brief Initialize a diagonal col-major matrix with a vector
* @param vec: vector of length k = min(n_rows, n_cols)
* @param matrix: matrix of size n_rows x n_cols (col-major)
* @param n_rows: number of rows of the matrix
* @param n_cols: number of columns of the matrix
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void initializeDiagonalMatrix(
m_t* vec, m_t* matrix, idx_t n_rows, idx_t n_cols, cudaStream_t stream)
{
detail::initializeDiagonalMatrix(vec, matrix, n_rows, n_cols, false, stream);
}
/**
* @brief Get a square matrix with elements on diagonal reversed (in-place)
* @param in: square input matrix with size len x len
* @param len: size of one side of the matrix
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
void getDiagonalInverseMatrix(m_t* in, idx_t len, cudaStream_t stream)
{
detail::getDiagonalInverseMatrix(in, len, stream);
}
/**
* @brief Get the L2/F-norm of a matrix/vector
* @param handle
* @param in: input matrix/vector with totally size elements
* @param size: size of the matrix/vector
* @param stream: cuda stream
*/
template <typename m_t, typename idx_t = int>
m_t getL2Norm(raft::resources const& handle, m_t* in, idx_t size, cudaStream_t stream)
{
return detail::getL2Norm(handle, in, size, stream);
}
/**
* Run a function over matrix lines (rows or columns) with a variable number
* row-vectors or column-vectors.
* The term `line` here signifies that the lines can be either columns or rows,
* depending on the matrix layout.
* What matters is if the vectors are applied along lines (indices of vectors correspond to
* indices within lines), or across lines (indices of vectors correspond to line numbers).
*
* @param [out] out result of the operation; can be same as `in`; should be aligned the same
* as `in` to allow faster vectorized memory transfers.
* @param [in] in input matrix consisting of `nLines` lines, each `lineLen`-long.
* @param [in] lineLen length of matrix line in elements (`=nCols` in row-major or `=nRows` in
* col-major)
* @param [in] nLines number of matrix lines (`=nRows` in row-major or `=nCols` in col-major)
* @param [in] alongLines whether vectors are indices along or across lines.
* @param [in] op the operation applied on each line:
* for i in [0..lineLen) and j in [0..nLines):
* out[i, j] = op(in[i, j], vec1[i], vec2[i], ... veck[i]) if alongLines = true
* out[i, j] = op(in[i, j], vec1[j], vec2[j], ... veck[j]) if alongLines = false
* where matrix indexing is row-major ([i, j] = [i + lineLen * j]).
* @param [in] stream a cuda stream for the kernels
* @param [in] vecs zero or more vectors to be passed as arguments,
* size of each vector is `alongLines ? lineLen : nLines`.
*/
template <typename m_t, typename idx_t = int, typename Lambda, typename... Vecs>
void linewiseOp(m_t* out,
const m_t* in,
const idx_t lineLen,
const idx_t nLines,
const bool alongLines,
Lambda op,
cudaStream_t stream,
const Vecs*... vecs)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("linewiseOp-%c-%zu (%zu, %zu)",
alongLines ? 'l' : 'x',
sizeof...(Vecs),
size_t(lineLen),
size_t(nLines));
detail::MatrixLinewiseOp<16, 256>::run<m_t, idx_t, Lambda, Vecs...>(
out, in, lineLen, nLines, alongLines, op, stream, vecs...);
}
}; // end namespace matrix
}; // end namespace raft
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/select_radix.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/macros.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/operators.hpp>
#include <raft/linalg/map.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/device_atomics.cuh>
#include <raft/util/integer_utils.hpp>
#include <raft/util/pow2_utils.cuh>
#include <raft/util/vectorized.cuh>
#include <cub/block/block_load.cuh>
#include <cub/block/block_scan.cuh>
#include <cub/block/block_store.cuh>
#include <cub/block/radix_rank_sort_operations.cuh>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
namespace raft::matrix::detail::select::radix {
namespace impl {
constexpr int VECTORIZED_READ_SIZE = 16;
template <int BitsPerPass>
_RAFT_HOST_DEVICE constexpr int calc_num_buckets()
{
return 1 << BitsPerPass;
}
template <typename T, int BitsPerPass>
_RAFT_HOST_DEVICE constexpr int calc_num_passes()
{
return ceildiv<int>(sizeof(T) * 8, BitsPerPass);
}
/**
* Bit 0 is the least significant (rightmost);
* this implementation processes input from the most to the least significant bit.
* This way, we can skip some passes in the end at the cost of having an unsorted output.
*
* NB: Use pass=-1 for calc_mask().
*/
template <typename T, int BitsPerPass>
_RAFT_DEVICE constexpr int calc_start_bit(int pass)
{
int start_bit = static_cast<int>(sizeof(T) * 8) - (pass + 1) * BitsPerPass;
if (start_bit < 0) { start_bit = 0; }
return start_bit;
}
template <typename T, int BitsPerPass>
_RAFT_DEVICE constexpr unsigned calc_mask(int pass)
{
static_assert(BitsPerPass <= 31);
int num_bits = calc_start_bit<T, BitsPerPass>(pass - 1) - calc_start_bit<T, BitsPerPass>(pass);
return (1 << num_bits) - 1;
}
/**
* Use CUB to twiddle bits - so that we can correctly compare bits of floating-point values as well
* as of integers.
*/
template <typename T>
_RAFT_DEVICE typename cub::Traits<T>::UnsignedBits twiddle_in(T key, bool select_min)
{
auto bits = reinterpret_cast<typename cub::Traits<T>::UnsignedBits&>(key);
bits = cub::Traits<T>::TwiddleIn(bits);
if (!select_min) { bits = ~bits; }
return bits;
}
template <typename T>
_RAFT_DEVICE T twiddle_out(typename cub::Traits<T>::UnsignedBits bits, bool select_min)
{
if (!select_min) { bits = ~bits; }
bits = cub::Traits<T>::TwiddleOut(bits);
return reinterpret_cast<T&>(bits);
}
template <typename T, int BitsPerPass>
_RAFT_DEVICE int calc_bucket(T x, int start_bit, unsigned mask, bool select_min)
{
static_assert(BitsPerPass <= sizeof(int) * 8 - 1,
"BitsPerPass is too large that the result type could not be int");
return (twiddle_in(x, select_min) >> start_bit) & mask;
}
// Strangely, RATIO_T has a strong impact on register usage and occupancy for sm80, e.g.
// using RATIO_T=unsigned for radix_kernel decreases occupancy (with CUDA 12).
// In the meanwhile, RATIO_T has no impact for sm90.
template <typename T, typename IdxT, typename RATIO_T = float>
_RAFT_HOST_DEVICE IdxT calc_buf_len(IdxT len)
{
// When writing is skipped, only read `in`(type T).
// When writing is not skipped, read `in_buf`(T) and `in_idx_buf`(IdxT), and write `out_buf`(T)
// and `out_idx_buf`(IdxT).
// The ratio between these cases determines whether to skip writing and hence the buffer size.
constexpr RATIO_T ratio = 2 + sizeof(IdxT) * 2 / sizeof(T);
// Even such estimation is too conservative, so further decrease buf_len by 1/8
IdxT buf_len = len / (ratio * 8);
// one-block kernel splits one large buffer into smaller ones, so round buf size to 256 bytes to
// avoid alignment issues
static_assert(is_a_power_of_two(sizeof(T)));
static_assert(is_a_power_of_two(sizeof(IdxT)));
constexpr IdxT aligned = 256 / std::min(sizeof(T), sizeof(IdxT));
buf_len = Pow2<aligned>::roundDown(buf_len);
return buf_len;
}
/**
* Map a Func over the input data, using vectorized load instructions if possible.
*
* NB: in future, we should move this to cpp/include/raft/linalg/detail/unary_op.cuh, which
* currently does not support the second lambda argument (index of an element)
*
* @tparam T element type
* @tparam IdxT indexing type
* @tparam Func void (T x, IdxT idx)
*
* @param thread_rank rank of the calling thread among all participating threads
* @param num_threads number of the threads that participate in processing
* @param in the input data
* @param len the number of elements to read
* @param f the lambda taking two arguments (T x, IdxT idx)
*/
template <typename T, typename IdxT, typename Func>
_RAFT_DEVICE void vectorized_process(
size_t thread_rank, size_t num_threads, const T* in, IdxT len, Func f)
{
if constexpr (sizeof(T) >= VECTORIZED_READ_SIZE || VECTORIZED_READ_SIZE % sizeof(T) != 0) {
for (IdxT i = thread_rank; i < len; i += num_threads) {
f(in[i], i);
}
} else {
using wide_t = TxN_t<T, VECTORIZED_READ_SIZE / sizeof(T)>;
using align_bytes = Pow2<(size_t)VECTORIZED_READ_SIZE>;
using align_elems = Pow2<wide_t::Ratio>;
wide_t wide;
// how many elements to skip in order to do aligned vectorized load
const IdxT skip_cnt_left = std::min<IdxT>((IdxT)(align_bytes::roundUp(in) - in), len);
// The main loop: process all aligned data
for (IdxT i = thread_rank * wide_t::Ratio + skip_cnt_left; i + wide_t::Ratio <= len;
i += num_threads * wide_t::Ratio) {
wide.load(in, i);
#pragma unroll
for (int j = 0; j < wide_t::Ratio; ++j) {
f(wide.val.data[j], i + j);
}
}
static_assert(WarpSize >= wide_t::Ratio);
// Processes the skipped elements on the left
if (thread_rank < skip_cnt_left) { f(in[thread_rank], thread_rank); }
// Processes the skipped elements on the right
const IdxT skip_cnt_right = align_elems::mod(len - skip_cnt_left);
const IdxT remain_i = len - skip_cnt_right + thread_rank;
if (remain_i < len) { f(in[remain_i], remain_i); }
}
}
template <typename T, typename IdxT>
struct alignas(128) Counter {
// We are processing the values in multiple passes, from most significant to least significant. In
// each pass, we keep the length of input (`len`) and the `k` of current pass, and update them at
// the end of the pass.
IdxT k;
IdxT len;
// `previous_len` is the length of input in previous pass. Note that `previous_len` rather
// than `len` is used for the filtering step because filtering is indeed for previous pass (see
// comments before `radix_kernel`).
IdxT previous_len;
// We determine the bits of the k_th value inside the mask processed by the pass. The
// already known bits are stored in `kth_value_bits`. It's used to discriminate a element is a
// result (written to `out`), a candidate for next pass (written to `out_buf`), or not useful
// (discarded). The bits that are not yet processed do not matter for this purpose.
typename cub::Traits<T>::UnsignedBits kth_value_bits;
// Record how many elements have passed filtering. It's used to determine the position in the
// `out_buf` where an element should be written.
alignas(128) IdxT filter_cnt;
// For a row inside a batch, we may launch multiple thread blocks. This counter is used to
// determine if the current block is the last running block. If so, this block will execute scan()
// and choose_bucket().
alignas(128) unsigned int finished_block_cnt;
// Record how many elements have been written to the front of `out`. Elements less (if
// select_min==true) than the k-th value are written from front to back.
alignas(128) IdxT out_cnt;
// Record how many elements have been written to the back of `out`. Elements equal to the k-th
// value are written from back to front. We need to keep count of them separately because the
// number of elements that <= the k-th value might exceed k.
alignas(128) IdxT out_back_cnt;
};
/**
* Fused filtering of the current pass and building histogram for the next pass
* (see steps 4 & 1 in `radix_kernel` description).
*
* This function is more complicated than the one-block counterpart because this function handles
* the case of early stopping. When early stopping is triggered, it's desirable to do the final
* filtering in this function rather than in last_filter(), because this function is run by multiple
* blocks while last_filter is run by a single block.
*/
template <typename T, typename IdxT, int BitsPerPass>
_RAFT_DEVICE void filter_and_histogram(const T* in_buf,
const IdxT* in_idx_buf,
T* out_buf,
IdxT* out_idx_buf,
T* out,
IdxT* out_idx,
IdxT previous_len,
Counter<T, IdxT>* counter,
IdxT* histogram,
bool select_min,
int pass,
bool early_stop)
{
constexpr int num_buckets = calc_num_buckets<BitsPerPass>();
__shared__ IdxT histogram_smem[num_buckets];
for (IdxT i = threadIdx.x; i < num_buckets; i += blockDim.x) {
histogram_smem[i] = 0;
}
__syncthreads();
const int start_bit = calc_start_bit<T, BitsPerPass>(pass);
const unsigned mask = calc_mask<T, BitsPerPass>(pass);
if (pass == 0) {
// Passed to vectorized_process, this function executes in all blocks in parallel,
// i.e. the work is split along the input (both, in batches and chunks of a single row).
// Later, the histograms are merged using atomicAdd.
auto f = [select_min, start_bit, mask](T value, IdxT) {
int bucket = calc_bucket<T, BitsPerPass>(value, start_bit, mask, select_min);
atomicAdd(histogram_smem + bucket, static_cast<IdxT>(1));
};
vectorized_process(static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x,
static_cast<size_t>(blockDim.x) * gridDim.x,
in_buf,
previous_len,
f);
} else {
IdxT* p_filter_cnt = &counter->filter_cnt;
IdxT* p_out_cnt = &counter->out_cnt;
const auto kth_value_bits = counter->kth_value_bits;
const int previous_start_bit = calc_start_bit<T, BitsPerPass>(pass - 1);
// See the remark above on the distributed execution of `f` using vectorized_process.
auto f = [in_idx_buf,
out_buf,
out_idx_buf,
out,
out_idx,
select_min,
start_bit,
mask,
previous_start_bit,
kth_value_bits,
p_filter_cnt,
p_out_cnt,
early_stop](T value, IdxT i) {
const auto previous_bits = (twiddle_in(value, select_min) >> previous_start_bit)
<< previous_start_bit;
if (previous_bits == kth_value_bits) {
if (early_stop) {
IdxT pos = atomicAdd(p_out_cnt, static_cast<IdxT>(1));
out[pos] = value;
out_idx[pos] = in_idx_buf ? in_idx_buf[i] : i;
} else {
if (out_buf) {
IdxT pos = atomicAdd(p_filter_cnt, static_cast<IdxT>(1));
out_buf[pos] = value;
out_idx_buf[pos] = in_idx_buf ? in_idx_buf[i] : i;
}
int bucket = calc_bucket<T, BitsPerPass>(value, start_bit, mask, select_min);
atomicAdd(histogram_smem + bucket, static_cast<IdxT>(1));
}
}
// the condition `(out_buf || early_stop)` is a little tricky:
// If we skip writing to `out_buf` (when `out_buf` is nullptr), we should skip writing to
// `out` too. So we won't write the same value to `out` multiple times in different passes.
// And if we keep skipping the writing, values will be written in `last_filter_kernel()` at
// last. But when `early_stop` is true, we need to write to `out` since it's the last chance.
else if ((out_buf || early_stop) && previous_bits < kth_value_bits) {
IdxT pos = atomicAdd(p_out_cnt, static_cast<IdxT>(1));
out[pos] = value;
out_idx[pos] = in_idx_buf ? in_idx_buf[i] : i;
}
};
vectorized_process(static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x,
static_cast<size_t>(blockDim.x) * gridDim.x,
in_buf,
previous_len,
f);
}
if (early_stop) { return; }
__syncthreads();
// merge histograms produced by individual blocks
for (int i = threadIdx.x; i < num_buckets; i += blockDim.x) {
if (histogram_smem[i] != 0) { atomicAdd(histogram + i, histogram_smem[i]); }
}
}
/**
* Replace histogram with its own prefix sum
* (step 2 in `radix_kernel` description)
*/
template <typename IdxT, int BitsPerPass, int BlockSize>
_RAFT_DEVICE void scan(volatile IdxT* histogram)
{
constexpr int num_buckets = calc_num_buckets<BitsPerPass>();
if constexpr (num_buckets >= BlockSize) {
static_assert(num_buckets % BlockSize == 0);
constexpr int items_per_thread = num_buckets / BlockSize;
typedef cub::BlockLoad<IdxT, BlockSize, items_per_thread, cub::BLOCK_LOAD_TRANSPOSE> BlockLoad;
typedef cub::BlockStore<IdxT, BlockSize, items_per_thread, cub::BLOCK_STORE_TRANSPOSE>
BlockStore;
typedef cub::BlockScan<IdxT, BlockSize> BlockScan;
__shared__ union {
typename BlockLoad::TempStorage load;
typename BlockScan::TempStorage scan;
typename BlockStore::TempStorage store;
} temp_storage;
IdxT thread_data[items_per_thread];
BlockLoad(temp_storage.load).Load(histogram, thread_data);
__syncthreads();
BlockScan(temp_storage.scan).InclusiveSum(thread_data, thread_data);
__syncthreads();
BlockStore(temp_storage.store).Store(histogram, thread_data);
} else {
typedef cub::BlockScan<IdxT, BlockSize> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
IdxT thread_data = 0;
if (threadIdx.x < num_buckets) { thread_data = histogram[threadIdx.x]; }
BlockScan(temp_storage).InclusiveSum(thread_data, thread_data);
__syncthreads();
if (threadIdx.x < num_buckets) { histogram[threadIdx.x] = thread_data; }
}
}
/**
* Calculate in which bucket the k-th value will fall
* (steps 3 in `radix_kernel` description)
*/
template <typename T, typename IdxT, int BitsPerPass>
_RAFT_DEVICE void choose_bucket(Counter<T, IdxT>* counter,
const IdxT* histogram,
const IdxT k,
const int pass)
{
constexpr int num_buckets = calc_num_buckets<BitsPerPass>();
for (int i = threadIdx.x; i < num_buckets; i += blockDim.x) {
IdxT prev = (i == 0) ? 0 : histogram[i - 1];
IdxT cur = histogram[i];
// one and only one thread will satisfy this condition, so counter is written by only one thread
if (prev < k && cur >= k) {
counter->k = k - prev; // how many values still are there to find
counter->len = cur - prev; // number of values in next pass
typename cub::Traits<T>::UnsignedBits bucket = i;
int start_bit = calc_start_bit<T, BitsPerPass>(pass);
counter->kth_value_bits |= bucket << start_bit;
}
}
}
// For one-block version, last_filter() could be called when pass < num_passes - 1.
// So `pass` could not be constexpr
template <typename T, typename IdxT, int BitsPerPass>
_RAFT_DEVICE void last_filter(const T* in_buf,
const IdxT* in_idx_buf,
T* out,
IdxT* out_idx,
IdxT current_len,
IdxT k,
Counter<T, IdxT>* counter,
const bool select_min,
const int pass)
{
const auto kth_value_bits = counter->kth_value_bits;
const int start_bit = calc_start_bit<T, BitsPerPass>(pass);
// changed in choose_bucket(); need to reload
const IdxT num_of_kth_needed = counter->k;
IdxT* p_out_cnt = &counter->out_cnt;
IdxT* p_out_back_cnt = &counter->out_back_cnt;
for (IdxT i = threadIdx.x; i < current_len; i += blockDim.x) {
const T value = in_buf[i];
const auto bits = (twiddle_in(value, select_min) >> start_bit) << start_bit;
if (bits < kth_value_bits) {
IdxT pos = atomicAdd(p_out_cnt, static_cast<IdxT>(1));
out[pos] = value;
// For one-block version, `in_idx_buf` could be nullptr at pass 0.
// For non one-block version, if writing has been skipped, `in_idx_buf` could be nullptr if
// `in_buf` is `in`
out_idx[pos] = in_idx_buf ? in_idx_buf[i] : i;
} else if (bits == kth_value_bits) {
IdxT back_pos = atomicAdd(p_out_back_cnt, static_cast<IdxT>(1));
if (back_pos < num_of_kth_needed) {
IdxT pos = k - 1 - back_pos;
out[pos] = value;
out_idx[pos] = in_idx_buf ? in_idx_buf[i] : i;
}
}
}
}
template <typename T, typename IdxT, int BitsPerPass>
RAFT_KERNEL last_filter_kernel(const T* in,
const IdxT* in_idx,
const T* in_buf,
const IdxT* in_idx_buf,
T* out,
IdxT* out_idx,
const IdxT len,
const IdxT k,
Counter<T, IdxT>* counters,
const bool select_min)
{
const size_t batch_id = blockIdx.y; // size_t to avoid multiplication overflow
Counter<T, IdxT>* counter = counters + batch_id;
IdxT previous_len = counter->previous_len;
if (previous_len == 0) { return; }
const IdxT buf_len = calc_buf_len<T>(len);
if (previous_len > buf_len || in_buf == in) {
in_buf = in + batch_id * len;
in_idx_buf = in_idx ? (in_idx + batch_id * len) : nullptr;
previous_len = len;
} else {
in_buf += batch_id * buf_len;
in_idx_buf += batch_id * buf_len;
}
out += batch_id * k;
out_idx += batch_id * k;
constexpr int pass = calc_num_passes<T, BitsPerPass>() - 1;
constexpr int start_bit = calc_start_bit<T, BitsPerPass>(pass);
const auto kth_value_bits = counter->kth_value_bits;
const IdxT num_of_kth_needed = counter->k;
IdxT* p_out_cnt = &counter->out_cnt;
IdxT* p_out_back_cnt = &counter->out_back_cnt;
auto f = [k,
select_min,
kth_value_bits,
num_of_kth_needed,
p_out_cnt,
p_out_back_cnt,
in_idx_buf,
out,
out_idx](T value, IdxT i) {
const auto bits = (twiddle_in(value, select_min) >> start_bit) << start_bit;
if (bits < kth_value_bits) {
IdxT pos = atomicAdd(p_out_cnt, static_cast<IdxT>(1));
out[pos] = value;
out_idx[pos] = in_idx_buf ? in_idx_buf[i] : i;
} else if (bits == kth_value_bits) {
IdxT back_pos = atomicAdd(p_out_back_cnt, static_cast<IdxT>(1));
if (back_pos < num_of_kth_needed) {
IdxT pos = k - 1 - back_pos;
out[pos] = value;
out_idx[pos] = in_idx_buf ? in_idx_buf[i] : i;
}
}
};
vectorized_process(static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x,
static_cast<size_t>(blockDim.x) * gridDim.x,
in_buf,
previous_len,
f);
}
/**
*
* It is expected to call this kernel multiple times (passes), in each pass we process a radix,
* going from the most significant towards the least significant bits (MSD).
*
* Conceptually, each pass consists of 4 steps:
*
* 1. Calculate histogram
* First, transform bits into a digit, the value of which is in the range
* [0, 2^{BITS_PER_PASS}-1]. Then count the frequency of each digit value and the result is a
* histogram. That is, histogram[i] contains the count of inputs having value i.
*
* 2. Scan the histogram
* Inclusive prefix sum is computed for the histogram. After this step, histogram[i] contains
* the count of inputs having value <= i.
*
* 3. Find the bucket j of the histogram that the k-th value falls into
*
* 4. Filtering
* Input elements whose digit value <j are the top-k elements. We put them into the result
* array out. The number of such elements is histogram[j-1]. Since the k-th value must be in
* the bucket j, we write all elements in bucket j into a intermediate buffer out_buf. For the
* next pass, these elements are used as input, and we would like to find the
* (k - histogram[j-1])-th value among them. That is, the k in the next pass is set to
* (k - histogram[j-1]).
*
* In the implementation, the filtering step is delayed to the next pass so the filtering and
* histogram computation are fused. In this way, inputs are read once rather than twice.
*
* During the filtering step, we won't write candidates (elements in bucket j) to `out_buf` if the
* number of candidates is larger than the length of `out_buf` (this could happen when the leading
* bits of input values are almost the same). And then in the next pass, inputs are read from `in`
* rather than from `in_buf`. The benefit is that we can save the cost of writing candidates and
* their indices.
*/
template <typename T, typename IdxT, int BitsPerPass, int BlockSize, bool fused_last_filter>
RAFT_KERNEL radix_kernel(const T* in,
const IdxT* in_idx,
const T* in_buf,
const IdxT* in_idx_buf,
T* out_buf,
IdxT* out_idx_buf,
T* out,
IdxT* out_idx,
Counter<T, IdxT>* counters,
IdxT* histograms,
const IdxT len,
const IdxT k,
const bool select_min,
const int pass)
{
const size_t batch_id = blockIdx.y;
auto counter = counters + batch_id;
IdxT current_k;
IdxT previous_len;
IdxT current_len;
if (pass == 0) {
current_k = k;
previous_len = len;
// Need to do this so setting counter->previous_len for the next pass is correct.
// This value is meaningless for pass 0, but it's fine because pass 0 won't be the
// last pass in this implementation so pass 0 won't hit the "if (pass ==
// num_passes - 1)" branch.
// Maybe it's better to reload counter->previous_len and use it rather than
// current_len in last_filter()
current_len = len;
} else {
current_k = counter->k;
current_len = counter->len;
previous_len = counter->previous_len;
}
if (current_len == 0) { return; }
// When k=len, early_stop will be true at pass 0. It means filter_and_histogram() should handle
// correctly the case that pass=0 and early_stop=true. However, this special case of k=len is
// handled in other way in select_k() so such case is not possible here.
const bool early_stop = (current_len == current_k);
const IdxT buf_len = calc_buf_len<T>(len);
// "previous_len > buf_len" means previous pass skips writing buffer
if (pass == 0 || pass == 1 || previous_len > buf_len) {
in_buf = in + batch_id * len;
in_idx_buf = in_idx ? (in_idx + batch_id * len) : nullptr;
previous_len = len;
} else {
in_buf += batch_id * buf_len;
in_idx_buf += batch_id * buf_len;
}
// "current_len > buf_len" means current pass will skip writing buffer
if (pass == 0 || current_len > buf_len) {
out_buf = nullptr;
out_idx_buf = nullptr;
} else {
out_buf += batch_id * buf_len;
out_idx_buf += batch_id * buf_len;
}
out += batch_id * k;
out_idx += batch_id * k;
constexpr int num_buckets = calc_num_buckets<BitsPerPass>();
auto histogram = histograms + batch_id * num_buckets;
filter_and_histogram<T, IdxT, BitsPerPass>(in_buf,
in_idx_buf,
out_buf,
out_idx_buf,
out,
out_idx,
previous_len,
counter,
histogram,
select_min,
pass,
early_stop);
__threadfence();
bool isLastBlock = false;
if (threadIdx.x == 0) {
unsigned int finished = atomicInc(&counter->finished_block_cnt, gridDim.x - 1);
isLastBlock = (finished == (gridDim.x - 1));
}
if (__syncthreads_or(isLastBlock)) {
if (early_stop) {
if (threadIdx.x == 0) {
// `last_filter_kernel()` requires setting previous_len
counter->previous_len = 0;
counter->len = 0;
}
return;
}
scan<IdxT, BitsPerPass, BlockSize>(histogram);
__syncthreads();
choose_bucket<T, IdxT, BitsPerPass>(counter, histogram, current_k, pass);
__syncthreads();
constexpr int num_passes = calc_num_passes<T, BitsPerPass>();
// reset for next pass
if (pass != num_passes - 1) {
for (int i = threadIdx.x; i < num_buckets; i += blockDim.x) {
histogram[i] = 0;
}
}
if (threadIdx.x == 0) {
// `last_filter_kernel()` requires setting previous_len even in the last pass
counter->previous_len = current_len;
// not necessary for the last pass, but put it here anyway
counter->filter_cnt = 0;
}
if constexpr (fused_last_filter) {
if (pass == num_passes - 1) {
last_filter<T, IdxT, BitsPerPass>(out_buf ? out_buf : in_buf,
out_idx_buf ? out_idx_buf : in_idx_buf,
out,
out_idx,
out_buf ? current_len : len,
k,
counter,
select_min,
pass);
}
}
}
}
template <typename T, typename IdxT, int BlockSize, typename Kernel>
int calc_chunk_size(int batch_size, IdxT len, int sm_cnt, Kernel kernel, bool one_block)
{
int active_blocks;
RAFT_CUDA_TRY(
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&active_blocks, kernel, BlockSize, 0));
// The chunk size is chosen so that there is enough workload to fully utilize GPU.
// One full wave contains (sm_cnt * active_blocks) blocks, and 10 waves is an empirically safe
// estimation of enough workload. It also counteracts imbalance if some blocks run slower
// than others.
constexpr int num_waves = 10;
int chunk_size;
if (one_block) {
// For one-block version, one block processes one instance in the chunk. Just ensure that there
// are enough blocks.
chunk_size = num_waves * sm_cnt * active_blocks;
} else {
// One instance in the chunk contains len items and is processed by multiple blocks.
// The total number of items in a chunk (chunk_size * len) should be large enough that every
// thread has enough items to processes. So set it to num_waves * "max num of active threads"
// (sm_cnt * active_blocks * BlockSize) * items_per_thread.
//
// Also, the upper bound of the total number of items in a chunk is:
// 10 (num_waves) * ~100 (sm_cnt) * 2048 (active_blocks*BlockSize) * 32 (items_per_thread) =64M.
// So temporary buffer size required for one chunk won't be too large.
constexpr int items_per_thread = 32;
chunk_size =
std::max<int>(1, num_waves * sm_cnt * active_blocks * BlockSize * items_per_thread / len);
}
return std::min(chunk_size, batch_size);
}
template <typename T, typename IdxT, int BitsPerPass, int BlockSize>
unsigned calc_grid_dim(int batch_size, IdxT len, int sm_cnt)
{
static_assert(VECTORIZED_READ_SIZE / sizeof(T) >= 1);
int active_blocks;
RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&active_blocks, radix_kernel<T, IdxT, BitsPerPass, BlockSize, false>, BlockSize, 0));
active_blocks *= sm_cnt;
IdxT best_num_blocks = 0;
float best_tail_wave_penalty = 1.0f;
const IdxT max_num_blocks = ceildiv<IdxT>(len, VECTORIZED_READ_SIZE / sizeof(T) * BlockSize);
for (int num_waves = 1;; ++num_waves) {
IdxT num_blocks = std::min(
max_num_blocks, static_cast<IdxT>(std::max(num_waves * active_blocks / batch_size, 1)));
IdxT items_per_thread = ceildiv<IdxT>(len, num_blocks * BlockSize);
items_per_thread = alignTo<IdxT>(items_per_thread, VECTORIZED_READ_SIZE / sizeof(T));
num_blocks = ceildiv<IdxT>(len, items_per_thread * BlockSize);
float actual_num_waves = static_cast<float>(num_blocks) * batch_size / active_blocks;
float tail_wave_penalty =
(ceilf(actual_num_waves) - actual_num_waves) / ceilf(actual_num_waves);
// 0.15 is determined experimentally. It also ensures breaking the loop early,
// e.g. when num_waves > 7, tail_wave_penalty will always <0.15
if (tail_wave_penalty < 0.15) {
best_num_blocks = num_blocks;
break;
} else if (tail_wave_penalty < best_tail_wave_penalty) {
best_num_blocks = num_blocks;
best_tail_wave_penalty = tail_wave_penalty;
}
if (num_blocks == max_num_blocks) { break; }
}
return best_num_blocks;
}
template <typename T, typename IdxT>
_RAFT_HOST void set_buf_pointers(const T* in,
const IdxT* in_idx,
T* buf1,
IdxT* idx_buf1,
T* buf2,
IdxT* idx_buf2,
int pass,
const T*& in_buf,
const IdxT*& in_idx_buf,
T*& out_buf,
IdxT*& out_idx_buf)
{
if (pass == 0) {
in_buf = in;
in_idx_buf = nullptr;
out_buf = nullptr;
out_idx_buf = nullptr;
} else if (pass == 1) {
in_buf = in;
in_idx_buf = in_idx;
out_buf = buf1;
out_idx_buf = idx_buf1;
} else if (pass % 2 == 0) {
in_buf = buf1;
in_idx_buf = idx_buf1;
out_buf = buf2;
out_idx_buf = idx_buf2;
} else {
in_buf = buf2;
in_idx_buf = idx_buf2;
out_buf = buf1;
out_idx_buf = idx_buf1;
}
}
template <typename T, typename IdxT>
_RAFT_DEVICE void set_buf_pointers(const T* in,
const IdxT* in_idx,
char* bufs,
IdxT buf_len,
int pass,
const T*& in_buf,
const IdxT*& in_idx_buf,
T*& out_buf,
IdxT*& out_idx_buf)
{
// bufs consists of 4 pieces in order: buf1, buf2, idx_buf1, idx_buf2
if (pass == 0) {
in_buf = in;
in_idx_buf = nullptr;
out_buf = nullptr;
out_idx_buf = nullptr;
} else if (pass == 1) {
in_buf = in;
in_idx_buf = in_idx;
out_buf = reinterpret_cast<T*>(bufs);
out_idx_buf = reinterpret_cast<IdxT*>(bufs + sizeof(T) * 2 * buf_len);
} else if (pass % 2 == 0) {
in_buf = reinterpret_cast<T*>(bufs);
in_idx_buf = reinterpret_cast<IdxT*>(bufs + sizeof(T) * 2 * buf_len);
out_buf = const_cast<T*>(in_buf + buf_len);
out_idx_buf = const_cast<IdxT*>(in_idx_buf + buf_len);
} else {
out_buf = reinterpret_cast<T*>(bufs);
out_idx_buf = reinterpret_cast<IdxT*>(bufs + sizeof(T) * 2 * buf_len);
in_buf = out_buf + buf_len;
in_idx_buf = out_idx_buf + buf_len;
}
}
template <typename T, typename IdxT, int BitsPerPass, int BlockSize>
void radix_topk(const T* in,
const IdxT* in_idx,
int batch_size,
IdxT len,
IdxT k,
T* out,
IdxT* out_idx,
bool select_min,
bool fused_last_filter,
unsigned grid_dim,
int sm_cnt,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// TODO: is it possible to relax this restriction?
static_assert(calc_num_passes<T, BitsPerPass>() > 1);
constexpr int num_buckets = calc_num_buckets<BitsPerPass>();
auto kernel = radix_kernel<T, IdxT, BitsPerPass, BlockSize, false>;
const size_t max_chunk_size =
calc_chunk_size<T, IdxT, BlockSize>(batch_size, len, sm_cnt, kernel, false);
if (max_chunk_size != static_cast<size_t>(batch_size)) {
grid_dim = calc_grid_dim<T, IdxT, BitsPerPass, BlockSize>(max_chunk_size, len, sm_cnt);
}
const IdxT buf_len = calc_buf_len<T>(len);
size_t req_aux = max_chunk_size * (sizeof(Counter<T, IdxT>) + num_buckets * sizeof(IdxT));
size_t req_buf = max_chunk_size * buf_len * 2 * (sizeof(T) + sizeof(IdxT));
size_t mem_req = req_aux + req_buf + 256 * 6; // might need extra memory for alignment
auto pool_guard = raft::get_pool_memory_resource(mr, mem_req);
if (pool_guard) {
RAFT_LOG_DEBUG("radix::select_k: using pool memory resource with initial size %zu bytes",
mem_req);
}
rmm::device_uvector<Counter<T, IdxT>> counters(max_chunk_size, stream, mr);
rmm::device_uvector<IdxT> histograms(max_chunk_size * num_buckets, stream, mr);
rmm::device_uvector<T> buf1(max_chunk_size * buf_len, stream, mr);
rmm::device_uvector<IdxT> idx_buf1(max_chunk_size * buf_len, stream, mr);
rmm::device_uvector<T> buf2(max_chunk_size * buf_len, stream, mr);
rmm::device_uvector<IdxT> idx_buf2(max_chunk_size * buf_len, stream, mr);
for (size_t offset = 0; offset < static_cast<size_t>(batch_size); offset += max_chunk_size) {
int chunk_size = std::min(max_chunk_size, batch_size - offset);
RAFT_CUDA_TRY(
cudaMemsetAsync(counters.data(), 0, counters.size() * sizeof(Counter<T, IdxT>), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(histograms.data(), 0, histograms.size() * sizeof(IdxT), stream));
auto kernel = radix_kernel<T, IdxT, BitsPerPass, BlockSize, false>;
const T* chunk_in = in + offset * len;
const IdxT* chunk_in_idx = in_idx ? (in_idx + offset * len) : nullptr;
T* chunk_out = out + offset * k;
IdxT* chunk_out_idx = out_idx + offset * k;
const T* in_buf = nullptr;
const IdxT* in_idx_buf = nullptr;
T* out_buf = nullptr;
IdxT* out_idx_buf = nullptr;
dim3 blocks(grid_dim, chunk_size);
constexpr int num_passes = calc_num_passes<T, BitsPerPass>();
for (int pass = 0; pass < num_passes; ++pass) {
set_buf_pointers(chunk_in,
chunk_in_idx,
buf1.data(),
idx_buf1.data(),
buf2.data(),
idx_buf2.data(),
pass,
in_buf,
in_idx_buf,
out_buf,
out_idx_buf);
if (fused_last_filter && pass == num_passes - 1) {
kernel = radix_kernel<T, IdxT, BitsPerPass, BlockSize, true>;
}
kernel<<<blocks, BlockSize, 0, stream>>>(chunk_in,
chunk_in_idx,
in_buf,
in_idx_buf,
out_buf,
out_idx_buf,
chunk_out,
chunk_out_idx,
counters.data(),
histograms.data(),
len,
k,
select_min,
pass);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
if (!fused_last_filter) {
last_filter_kernel<T, IdxT, BitsPerPass><<<blocks, BlockSize, 0, stream>>>(chunk_in,
chunk_in_idx,
out_buf,
out_idx_buf,
chunk_out,
chunk_out_idx,
len,
k,
counters.data(),
select_min);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
}
// The following a few functions are for the one-block version, which uses single thread block for
// each row of a batch.
template <typename T, typename IdxT, int BitsPerPass>
_RAFT_DEVICE void filter_and_histogram_for_one_block(const T* in_buf,
const IdxT* in_idx_buf,
T* out_buf,
IdxT* out_idx_buf,
T* out,
IdxT* out_idx,
const IdxT previous_len,
Counter<T, IdxT>* counter,
IdxT* histogram,
bool select_min,
int pass)
{
constexpr int num_buckets = calc_num_buckets<BitsPerPass>();
for (int i = threadIdx.x; i < num_buckets; i += blockDim.x) {
histogram[i] = 0;
}
IdxT* p_filter_cnt = &counter->filter_cnt;
if (threadIdx.x == 0) { *p_filter_cnt = 0; }
__syncthreads();
const int start_bit = calc_start_bit<T, BitsPerPass>(pass);
const unsigned mask = calc_mask<T, BitsPerPass>(pass);
if (pass == 0) {
auto f = [histogram, select_min, start_bit, mask](T value, IdxT) {
int bucket = calc_bucket<T, BitsPerPass>(value, start_bit, mask, select_min);
atomicAdd(histogram + bucket, static_cast<IdxT>(1));
};
vectorized_process(threadIdx.x, blockDim.x, in_buf, previous_len, f);
} else if (!out_buf) {
// not use vectorized_process here because it increases #registers a lot
const auto kth_value_bits = counter->kth_value_bits;
const int previous_start_bit = calc_start_bit<T, BitsPerPass>(pass - 1);
for (IdxT i = threadIdx.x; i < previous_len; i += blockDim.x) {
const T value = in_buf[i];
const auto previous_bits = (twiddle_in(value, select_min) >> previous_start_bit)
<< previous_start_bit;
if (previous_bits == kth_value_bits) {
int bucket = calc_bucket<T, BitsPerPass>(value, start_bit, mask, select_min);
atomicAdd(histogram + bucket, static_cast<IdxT>(1));
}
}
} else {
// not use vectorized_process here because it increases #registers a lot
IdxT* p_out_cnt = &counter->out_cnt;
const auto kth_value_bits = counter->kth_value_bits;
const int previous_start_bit = calc_start_bit<T, BitsPerPass>(pass - 1);
for (IdxT i = threadIdx.x; i < previous_len; i += blockDim.x) {
const T value = in_buf[i];
const auto previous_bits = (twiddle_in(value, select_min) >> previous_start_bit)
<< previous_start_bit;
if (previous_bits == kth_value_bits) {
#if CUDART_VERSION < 12000
// Avoiding potential compiler bug in CUDA 11
volatile
#endif
IdxT pos = atomicAdd(p_filter_cnt, static_cast<IdxT>(1));
out_buf[pos] = value;
out_idx_buf[pos] = in_idx_buf ? in_idx_buf[i] : i;
int bucket = calc_bucket<T, BitsPerPass>(value, start_bit, mask, select_min);
atomicAdd(histogram + bucket, static_cast<IdxT>(1));
} else if (previous_bits < kth_value_bits) {
IdxT pos = atomicAdd(p_out_cnt, static_cast<IdxT>(1));
out[pos] = value;
out_idx[pos] = in_idx_buf ? in_idx_buf[i] : i;
}
}
}
}
template <typename T, typename IdxT, int BitsPerPass, int BlockSize>
RAFT_KERNEL radix_topk_one_block_kernel(const T* in,
const IdxT* in_idx,
const IdxT len,
const IdxT k,
T* out,
IdxT* out_idx,
const bool select_min,
char* bufs)
{
constexpr int num_buckets = calc_num_buckets<BitsPerPass>();
__shared__ Counter<T, IdxT> counter;
__shared__ IdxT histogram[num_buckets];
if (threadIdx.x == 0) {
counter.k = k;
counter.len = len;
counter.previous_len = len;
counter.kth_value_bits = 0;
counter.out_cnt = 0;
counter.out_back_cnt = 0;
}
__syncthreads();
const size_t batch_id = blockIdx.x; // size_t to avoid multiplication overflow
in += batch_id * len;
if (in_idx) { in_idx += batch_id * len; }
out += batch_id * k;
out_idx += batch_id * k;
const IdxT buf_len = calc_buf_len<T, IdxT, unsigned>(len);
bufs += batch_id * buf_len * 2 * (sizeof(T) + sizeof(IdxT));
constexpr int num_passes = calc_num_passes<T, BitsPerPass>();
for (int pass = 0; pass < num_passes; ++pass) {
const T* in_buf;
const IdxT* in_idx_buf;
T* out_buf;
IdxT* out_idx_buf;
set_buf_pointers(in, in_idx, bufs, buf_len, pass, in_buf, in_idx_buf, out_buf, out_idx_buf);
const IdxT current_len = counter.len;
const IdxT current_k = counter.k;
IdxT previous_len = counter.previous_len;
if (previous_len > buf_len) {
in_buf = in;
in_idx_buf = in_idx;
previous_len = len;
}
if (current_len > buf_len) {
// so "out_buf==nullptr" denotes skipping writing buffer in current pass
out_buf = nullptr;
out_idx_buf = nullptr;
}
filter_and_histogram_for_one_block<T, IdxT, BitsPerPass>(in_buf,
in_idx_buf,
out_buf,
out_idx_buf,
out,
out_idx,
previous_len,
&counter,
histogram,
select_min,
pass);
__syncthreads();
scan<IdxT, BitsPerPass, BlockSize>(histogram);
__syncthreads();
choose_bucket<T, IdxT, BitsPerPass>(&counter, histogram, current_k, pass);
if (threadIdx.x == 0) { counter.previous_len = current_len; }
__syncthreads();
if (counter.len == counter.k || pass == num_passes - 1) {
last_filter<T, IdxT, BitsPerPass>(out_buf ? out_buf : in,
out_buf ? out_idx_buf : in_idx,
out,
out_idx,
out_buf ? current_len : len,
k,
&counter,
select_min,
pass);
break;
}
}
}
// radix_topk() might use multiple thread blocks for one row of a batch. In contrast, the following
// one-block version uses single thread block for one row of a batch, so intermediate data, like
// counters and global histograms, can be kept in shared memory and cheap sync operations can be
// used. It's used when len is relatively small or when the number of blocks per row calculated by
// `calc_grid_dim()` is 1.
template <typename T, typename IdxT, int BitsPerPass, int BlockSize>
void radix_topk_one_block(const T* in,
const IdxT* in_idx,
int batch_size,
IdxT len,
IdxT k,
T* out,
IdxT* out_idx,
bool select_min,
int sm_cnt,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
static_assert(calc_num_passes<T, BitsPerPass>() > 1);
auto kernel = radix_topk_one_block_kernel<T, IdxT, BitsPerPass, BlockSize>;
const IdxT buf_len = calc_buf_len<T, IdxT, unsigned>(len);
const size_t max_chunk_size =
calc_chunk_size<T, IdxT, BlockSize>(batch_size, len, sm_cnt, kernel, true);
auto pool_guard =
raft::get_pool_memory_resource(mr, max_chunk_size * buf_len * 2 * (sizeof(T) + sizeof(IdxT)));
if (pool_guard) { RAFT_LOG_DEBUG("radix::select_k: using pool memory resource"); }
rmm::device_uvector<char> bufs(
max_chunk_size * buf_len * 2 * (sizeof(T) + sizeof(IdxT)), stream, mr);
for (size_t offset = 0; offset < static_cast<size_t>(batch_size); offset += max_chunk_size) {
int chunk_size = std::min(max_chunk_size, batch_size - offset);
kernel<<<chunk_size, BlockSize, 0, stream>>>(in + offset * len,
in_idx ? (in_idx + offset * len) : nullptr,
len,
k,
out + offset * k,
out_idx + offset * k,
select_min,
bufs.data());
}
}
} // namespace impl
/**
* Select k smallest or largest key/values from each row in the input data.
*
* If you think of the input data `in_keys` as a row-major matrix with len columns and
* batch_size rows, then this function selects k smallest/largest values in each row and fills
* in the row-major matrix `out` of size (batch_size, k).
*
* Note, the output is NOT sorted within the groups of `k` selected elements.
*
* @tparam T
* the type of the keys (what is being compared).
* @tparam IdxT
* the index type (what is being selected together with the keys).
* @tparam BitsPerPass
* The size of the radix;
* it affects the number of passes and number of buckets.
* @tparam BlockSize
* Number of threads in a kernel thread block.
*
* @param[in] in
* contiguous device array of inputs of size (len * batch_size);
* these are compared and selected.
* @param[in] in_idx
* contiguous device array of inputs of size (len * batch_size);
* typically, these are indices of the corresponding in_keys.
* @param batch_size
* number of input rows, i.e. the batch size.
* @param len
* length of a single input array (row); also sometimes referred as n_cols.
* Invariant: len >= k.
* @param k
* the number of outputs to select in each input row.
* @param[out] out
* contiguous device array of outputs of size (k * batch_size);
* the k smallest/largest values from each row of the `in_keys`.
* @param[out] out_idx
* contiguous device array of outputs of size (k * batch_size);
* the payload selected together with `out`.
* @param select_min
* whether to select k smallest (true) or largest (false) keys.
* @param fused_last_filter
* when it's true, the last filter is fused into the kernel in the last pass and only one thread
* block will do the filtering; when false, a standalone filter kernel with multiple thread
* blocks is called. The later case is preferable when leading bits of input data are almost the
* same. That is, when the value range of input data is narrow. In such case, there could be a
* large number of inputs for the last filter, hence using multiple thread blocks is beneficial.
* @param stream
* @param mr an optional memory resource to use across the calls (you can provide a large enough
* memory pool here to avoid memory allocations within the call).
*/
template <typename T, typename IdxT, int BitsPerPass, int BlockSize>
void select_k(const T* in,
const IdxT* in_idx,
int batch_size,
IdxT len,
IdxT k,
T* out,
IdxT* out_idx,
bool select_min,
bool fused_last_filter,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = nullptr)
{
if (k == len) {
RAFT_CUDA_TRY(
cudaMemcpyAsync(out, in, sizeof(T) * batch_size * len, cudaMemcpyDeviceToDevice, stream));
if (in_idx) {
RAFT_CUDA_TRY(cudaMemcpyAsync(
out_idx, in_idx, sizeof(IdxT) * batch_size * len, cudaMemcpyDeviceToDevice, stream));
} else {
auto out_idx_view =
raft::make_device_vector_view(out_idx, static_cast<size_t>(len) * batch_size);
raft::resources handle;
resource::set_cuda_stream(handle, stream);
raft::linalg::map_offset(handle, out_idx_view, raft::mod_const_op<IdxT>(len));
}
return;
}
// TODO: use device_resources::get_device_properties() instead; should change it when we refactor
// resource management
int sm_cnt;
{
int dev;
RAFT_CUDA_TRY(cudaGetDevice(&dev));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&sm_cnt, cudaDevAttrMultiProcessorCount, dev));
}
constexpr int items_per_thread = 32;
if (len <= BlockSize * items_per_thread) {
impl::radix_topk_one_block<T, IdxT, BitsPerPass, BlockSize>(
in, in_idx, batch_size, len, k, out, out_idx, select_min, sm_cnt, stream, mr);
} else {
unsigned grid_dim =
impl::calc_grid_dim<T, IdxT, BitsPerPass, BlockSize>(batch_size, len, sm_cnt);
if (grid_dim == 1) {
impl::radix_topk_one_block<T, IdxT, BitsPerPass, BlockSize>(
in, in_idx, batch_size, len, k, out, out_idx, select_min, sm_cnt, stream, mr);
} else {
impl::radix_topk<T, IdxT, BitsPerPass, BlockSize>(in,
in_idx,
batch_size,
len,
k,
out,
out_idx,
select_min,
fused_last_filter,
grid_dim,
sm_cnt,
stream,
mr);
}
}
}
} // namespace raft::matrix::detail::select::radix
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/linewise_op.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
#include <raft/core/mdspan.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/pow2_utils.cuh>
#include <raft/util/vectorized.cuh>
#include <algorithm>
#include <thrust/tuple.h>
namespace raft {
namespace matrix {
namespace detail {
/** This type simplifies returning arrays and passing them as arguments */
template <typename Type, int VecElems>
struct VecArg {
Type val[VecElems];
};
/** Executes the operation with the given matrix element and an arbitrary number of vector elements
* contained in the given tuple. The index_sequence is used here for compile-time indexing of the
* tuple in the fold expression. */
template <typename MatT, typename Lambda, class Tuple, size_t... Is>
__device__ __forceinline__ MatT
RunMatVecOp(Lambda op, MatT mat, Tuple&& args, std::index_sequence<Is...>)
{
return op(mat, (thrust::get<Is>(args))...);
}
template <typename Type, typename IdxType, std::size_t VecBytes, int BlockSize>
struct Linewise {
static constexpr IdxType VecElems = VecBytes / sizeof(Type);
typedef raft::TxN_t<Type, VecElems> Vec;
typedef raft::Pow2<VecBytes> AlignBytes;
typedef raft::Pow2<VecElems> AlignElems;
typedef raft::Pow2<raft::WarpSize> AlignWarp;
/**
* Compute op(matrix_in, vec_1, vec_2, ...) where vectors are applied across the
* matrix rows (one vector element per matrix row).
*
* It's assumed that `in` and `out` are aligned to the cuda-vector-size,
* and their length is multiple of that.
*
* Block work arrangement: blocked;
* one warp works on a contiguous chunk of a matrix. Since the matrix is represented
* as a flat array, such an arrangement minimizes the number of times when a single
* thread needs to reload the vector value at an index corresponding to the current
* matrix row. Ideally, a thread would load a value from a vector only once, but that
* is not possible if the vector size (= number of matrix rows) is too small or not
* aligned with the cuda-vector-size.
*
* Note about rowDiv/rowMod:
* these two represent the row/column indices in the original input matrices, before
* it was converted to (Vec::io_t*) type (which possibly involves shifting a pointer
* a bit to align to the cuda-vector-size). Thus, they are used to track the index for
* the argument vectors only (the vector pointers are not altered in any way).
*
*
* @tparam Vecs a pack of pointers to vectors (Type*)
* @param [out] out (aligned part of) the output matrix
* @param [in] in (aligned part of) the input matrix
* @param [in] in_end end of the (aligned part of the) input matrix
* @param [in] rowLen number of elements in a row (NOT the vector size)
* @param [in] rowDiv the index in the vectors (= row num in the original unaligned input matrix)
* @param [in] rowMod the index within a row in the original unaligned input matrix.
* @param [in] op the function to apply
* @param [in] vecs pointers to the argument vectors.
*
*/
template <typename Lambda, typename... Vecs>
static __device__ __forceinline__ void vectorCols(typename Vec::io_t* out,
const typename Vec::io_t* in,
const typename Vec::io_t* in_end,
const IdxType rowLen,
IdxType rowDiv,
IdxType rowMod,
Lambda op,
const Vecs*... vecs) noexcept
{
constexpr IdxType warpPad = (AlignWarp::Value - 1) * VecElems;
constexpr auto index = std::index_sequence_for<Vecs...>();
// todo(lsugy): switch to cuda::std::tuple from libcudacxx if we add it as a required
// dependency. Note that thrust::tuple is limited to 10 elements.
thrust::tuple<Vecs...> args;
Vec v, w;
bool update = true;
for (; in < in_end; in += AlignWarp::Value, out += AlignWarp::Value, rowMod += warpPad) {
*v.vectorized_data() = __ldcv(in);
while (rowMod >= rowLen) {
rowMod -= rowLen;
rowDiv++;
update = true;
}
if (update) {
args = thrust::make_tuple((vecs[rowDiv])...);
update = false;
}
#pragma unroll VecElems
for (int k = 0; k < VecElems; k++, rowMod++) {
if (rowMod == rowLen) {
rowMod = 0;
rowDiv++;
args = thrust::make_tuple((vecs[rowDiv])...);
}
w.val.data[k] = RunMatVecOp(op, v.val.data[k], args, index);
}
*out = *w.vectorized_data();
}
}
/**
* Compute op(matrix_in, vec_1, vec_2, ...) where vectors are applied along
* matrix rows (vector and matrix indices are 1-1).
*
* It's assumed that `in` and `out` are aligned to the cuda-vector-size,
* and their length is multiple of that.
*
* Block work arrangement: striped;
* the grid size is chosen in such a way, that one thread always processes
* the same vector elements. That's why there is no need to read the
* vector arguments multiple times.
*
* @tparam Args a pack of raft::TxN_t<Type, VecElems>
* @param [out] out (aligned part of) the output matrix
* @param [in] in (aligned part of) the input matrix
* @param [in] len total length of (the aligned part of) the input/output matrices
* @param [in] op the function to apply
* @param [in] args the cuda-vector-sized chunks on input vectors (raft::TxN_t<Type, VecElems>)
*/
template <typename Lambda, typename... Args>
static __device__ __forceinline__ void vectorRows(typename Vec::io_t* out,
const typename Vec::io_t* in,
const IdxType len,
Lambda op,
Args... args) noexcept
{
Vec v;
const IdxType d = BlockSize * gridDim.x;
for (IdxType i = threadIdx.x + blockIdx.x * BlockSize; i < len; i += d) {
*v.vectorized_data() = __ldcv(in + i);
#pragma unroll VecElems
for (int k = 0; k < VecElems; k++)
v.val.data[k] = op(v.val.data[k], args.val[k]...);
__stwt(out + i, *v.vectorized_data());
}
}
/**
* The helper for `vectorRows`. Loads the `raft::TxN_t<Type, VecElems>` chunk
* of a vector. Most of the time this is not aligned, so we load it thread-striped
* within a block and then use the shared memory to get a contiguous chunk.
*
* @tparam VecT Type of the vector to load
* @param [in] shm a shared memory region for rearranging the data among threads
* @param [in] p pointer to a vector
* @param [in] blockOffset the offset of the current block into a vector.
* @param [in] rowLen the length of a vector.
* @return a contiguous chunk of a vector, suitable for `vectorRows`.
*/
template <typename VecT>
static __device__ __forceinline__ VecArg<VecT, VecElems> loadVec(VecT* shm,
const VecT* p,
const IdxType blockOffset,
const IdxType rowLen) noexcept
{
IdxType j = blockOffset + threadIdx.x;
#pragma unroll VecElems
for (int k = threadIdx.x; k < VecElems * BlockSize; k += BlockSize, j += BlockSize) {
while (j >= rowLen)
j -= rowLen;
shm[k] = p[j];
}
__syncthreads();
{
VecArg<VecT, VecElems> out;
#pragma unroll VecElems
for (int i = 0; i < VecElems; i++)
out.val[i] = shm[threadIdx.x * VecElems + i];
return out;
}
}
/**
* @brief Same as loadVec, but pads data with Ones
*
* @tparam VecT Type of the vector to load
* @param shm
* @param p
* @param blockOffset
* @param rowLen
* @param rowLenPadded
* @return a contiguous chunk of a vector, suitable for `vectorRows`.
*/
template <typename VecT>
static __device__ __forceinline__ VecArg<VecT, VecElems> loadVecPadded(
VecT* shm,
const VecT* p,
const IdxType blockOffset,
const IdxType rowLen,
const IdxType rowLenPadded) noexcept
{
IdxType j = blockOffset + threadIdx.x;
#pragma unroll VecElems
for (int k = threadIdx.x; k < VecElems * BlockSize; k += BlockSize, j += BlockSize) {
while (j >= rowLenPadded)
j -= rowLenPadded;
shm[k] = j < rowLen ? p[j] : VecT(1);
}
__syncthreads();
{
VecArg<VecT, VecElems> out;
#pragma unroll VecElems
for (int i = 0; i < VecElems; i++)
out.val[i] = shm[threadIdx.x * VecElems + i];
return out;
}
}
};
/**
* This kernel prepares the inputs for the `vectorCols` function where the most of the
* work happens; see `vectorCols` for details.
*
* The work arrangement is blocked; a single block works on a contiguous chunk of flattened
* matrix data and does not care about the gridDim.
*
* @param [out] out the output matrix
* @param [in] in the input matrix
* @param [in] arrOffset such an offset into the matrices that makes them aligned to the
* cuda-vector-size
* @param [in] rowLen number of elements in a row (NOT the vector size)
* @param [in] len the total length of the aligned part of the matrices
* @param [in] elemsPerThread how many elements are processed by a single thread in total
* @param [in] op the function to apply
* @param [in] vecs pointers to the argument vectors
*/
template <typename Type,
typename IdxType,
std::size_t VecBytes,
int BlockSize,
typename Lambda,
typename... Vecs>
RAFT_KERNEL __launch_bounds__(BlockSize)
matrixLinewiseVecColsMainKernel(Type* out,
const Type* in,
const IdxType arrOffset,
const IdxType rowLen,
const IdxType len,
const IdxType elemsPerThread,
Lambda op,
const Vecs*... vecs)
{
typedef Linewise<Type, IdxType, VecBytes, BlockSize> L;
IdxType t = L::AlignWarp::mod(threadIdx.x);
t = arrOffset + elemsPerThread * (blockIdx.x * BlockSize + threadIdx.x - t) + t * L::VecElems;
return L::vectorCols(reinterpret_cast<typename L::Vec::io_t*>(out + t),
reinterpret_cast<const typename L::Vec::io_t*>(in + t),
reinterpret_cast<const typename L::Vec::io_t*>(
in + min(t + elemsPerThread * L::AlignWarp::Value, len)),
rowLen,
t / rowLen,
t % rowLen,
op,
vecs...);
}
/**
* This kernel is similar to `matrixLinewiseVecColsMainKernel`, but processes only the unaligned
* head and tail parts of the matrix.
* This kernel is always launched in just two blocks; the first block processes the head of the
* matrix, the second block processes the tail. It uses the same `vectorCols` function, but
* sets `VecElems = 1`
*
* @param [out] out the output matrix
* @param [in] in the input matrix
* @param [in] arrOffset the length of the unaligned head - such an offset into the matrices that
* makes them aligned to the `VecBytes`
* @param [in] arrTail the offset to the unaligned tail
* @param [in] rowLen number of elements in a row (NOT the vector size)
* @param [in] len the total length of the matrices (rowLen * nRows)
* @param [in] op the function to apply
* @param [in] vecs pointers to the argument vectors
*/
template <typename Type, typename IdxType, std::size_t MaxOffset, typename Lambda, typename... Vecs>
RAFT_KERNEL __launch_bounds__(MaxOffset, 2) matrixLinewiseVecColsTailKernel(Type* out,
const Type* in,
const IdxType arrOffset,
const IdxType arrTail,
const IdxType rowLen,
const IdxType len,
Lambda op,
const Vecs*... vecs)
{
// Note, L::VecElems == 1
typedef Linewise<Type, IdxType, sizeof(Type), MaxOffset> L;
IdxType threadOffset, elemsPerWarp;
if (blockIdx.x == 0) {
// first block: offset = 0, length = arrOffset
threadOffset = threadIdx.x;
elemsPerWarp = threadOffset < arrOffset;
} else {
// second block: offset = arrTail, length = len - arrTail
threadOffset = arrTail + threadIdx.x;
elemsPerWarp = threadOffset < len;
}
const IdxType rowDiv = threadOffset / rowLen;
const IdxType rowMod = threadOffset % rowLen;
return L::vectorCols(
reinterpret_cast<typename L::Vec::io_t*>(out + threadOffset),
reinterpret_cast<const typename L::Vec::io_t*>(in + threadOffset),
reinterpret_cast<const typename L::Vec::io_t*>(in + threadOffset + elemsPerWarp),
rowLen,
rowDiv,
rowMod,
op,
vecs...);
}
/** Helper function to get the largest type from a variadic list of types */
template <typename... Types>
constexpr size_t maxSizeOf()
{
size_t maxSize = 0;
((maxSize = std::max(maxSize, sizeof(Types))), ...);
return maxSize;
}
/**
* This kernel prepares the inputs for the `vectorRows` function where the most of the
* work happens; see `vectorRows` for details.
*
* The work arrangement is striped; the gridDim should be selected in such a way, that
* on each iteration a thread processes the same indices along rows:
* `(gridDim.x * BlockSize * VecElems) % rowLen == 0`.
*
* @param [out] out the start of the *aligned* part of the output matrix
* @param [in] in the start of the *aligned* part of the input matrix
* @param [in] arrOffset such an offset into the matrices that makes them aligned to `VecBytes`
* @param [in] rowLen number of elements in a row (= the vector size)
* @param [in] len the total length of the aligned part of the matrices
* @param [in] op the function to apply
* @param [in] vecs pointers to the argument vectors
*/
template <typename Type,
typename IdxType,
std::size_t VecBytes,
int BlockSize,
typename Lambda,
typename... Vecs>
RAFT_KERNEL __launch_bounds__(BlockSize) matrixLinewiseVecRowsMainKernel(Type* out,
const Type* in,
const IdxType arrOffset,
const IdxType rowLen,
const IdxType len,
Lambda op,
const Vecs*... vecs)
{
typedef Linewise<Type, IdxType, VecBytes, BlockSize> L;
constexpr uint workSize = L::VecElems * BlockSize;
constexpr size_t maxVecItemSize = maxSizeOf<Vecs...>();
uint workOffset = workSize * maxVecItemSize;
__shared__ __align__(
maxVecItemSize *
L::VecElems) char shm[workSize * maxVecItemSize * ((sizeof...(Vecs)) > 1 ? 2 : 1)];
const IdxType blockOffset = (arrOffset + BlockSize * L::VecElems * blockIdx.x) % rowLen;
return L::vectorRows(reinterpret_cast<typename L::Vec::io_t*>(out),
reinterpret_cast<const typename L::Vec::io_t*>(in),
L::AlignElems::div(len),
op,
(workOffset ^= workSize * maxVecItemSize,
L::loadVec((Vecs*)(shm + workOffset), vecs, blockOffset, rowLen))...);
}
/**
* Simplified version of `matrixLinewiseVecRowsMainKernel` for use with padded data.
* Data is required to be aligned and padded.
*
* @param [out] out the start of the *aligned* part of the output matrix
* @param [in] in the start of the *aligned* part of the input matrix
* @param [in] arrOffset such an offset into the matrices that makes them aligned to `VecBytes`
* @param [in] rowLen number of elements in a row (= the vector size)
* @param [in] len the total length of the aligned part of the matrices
* @param [in] op the function to apply
* @param [in] vecs pointers to the argument vectors
*/
template <typename Type,
typename IdxType,
std::size_t VecBytes,
int BlockSize,
typename Lambda,
typename... Vecs>
RAFT_KERNEL __launch_bounds__(BlockSize) matrixLinewiseVecRowsSpanKernel(Type* out,
const Type* in,
const IdxType rowLen,
const IdxType rowLenPadded,
const IdxType lenPadded,
Lambda op,
const Vecs*... vecs)
{
typedef Linewise<Type, IdxType, VecBytes, BlockSize> L;
constexpr uint workSize = L::VecElems * BlockSize;
constexpr size_t maxVecItemSize = maxSizeOf<Vecs...>();
uint workOffset = workSize * maxVecItemSize;
__shared__ __align__(
maxVecItemSize *
L::VecElems) char shm[workSize * maxVecItemSize * ((sizeof...(Vecs)) > 1 ? 2 : 1)];
const IdxType blockOffset = (BlockSize * L::VecElems * blockIdx.x) % rowLenPadded;
return L::vectorRows(
reinterpret_cast<typename L::Vec::io_t*>(out),
reinterpret_cast<const typename L::Vec::io_t*>(in),
L::AlignElems::div(lenPadded),
op,
(workOffset ^= workSize * maxVecItemSize,
L::loadVecPadded((Vecs*)(shm + workOffset), vecs, blockOffset, rowLen, rowLenPadded))...);
}
/**
* This kernel is similar to `matrixLinewiseVecRowsMainKernel`, but processes only the unaligned
* head and tail parts of the matrix.
* This kernel is always launched in just two blocks; the first block processes the head of the
* matrix, the second block processes the tail. It uses the same `vectorRows` function, but
* sets `VecElems = 1`
*
* @param [out] out the output matrix
* @param [in] in the input matrix
* @param [in] arrOffset the length of the unaligned head - such an offset into the matrices that
* makes them aligned to the `VecBytes`
* @param [in] arrTail the offset to the unaligned tail
* @param [in] rowLen number of elements in a row (= the vector size)
* @param [in] len the total length of the matrices (rowLen * nRows)
* @param [in] op the function to apply
* @param [in] vecs pointers to the argument vectors
*/
template <typename Type, typename IdxType, std::size_t MaxOffset, typename Lambda, typename... Vecs>
RAFT_KERNEL __launch_bounds__(MaxOffset, 2) matrixLinewiseVecRowsTailKernel(Type* out,
const Type* in,
const IdxType arrOffset,
const IdxType arrTail,
const IdxType rowLen,
const IdxType len,
Lambda op,
const Vecs*... vecs)
{
// Note, L::VecElems == 1
constexpr uint workSize = MaxOffset;
constexpr size_t maxVecItemSize = maxSizeOf<Vecs...>();
uint workOffset = workSize * maxVecItemSize;
__shared__ char shm[workSize * maxVecItemSize * ((sizeof...(Vecs)) > 1 ? 2 : 1)];
typedef Linewise<Type, IdxType, sizeof(Type), MaxOffset> L;
if (blockIdx.x == 0) {
// first block: offset = 0, length = arrOffset
L::vectorRows(reinterpret_cast<typename L::Vec::io_t*>(out),
reinterpret_cast<const typename L::Vec::io_t*>(in),
arrOffset,
op,
(workOffset ^= workSize * maxVecItemSize,
L::loadVec((Vecs*)(shm + workOffset), vecs, 0, rowLen))...);
} else {
// second block: offset = arrTail, length = len - arrTail
// NB: I subtract MaxOffset (= blockDim.x) to get the correct indexing for block 1
L::vectorRows(reinterpret_cast<typename L::Vec::io_t*>(out + arrTail - MaxOffset),
reinterpret_cast<const typename L::Vec::io_t*>(in + arrTail - MaxOffset),
len - arrTail + MaxOffset,
op,
(workOffset ^= workSize * maxVecItemSize,
L::loadVec((Vecs*)(shm + workOffset), vecs, arrTail % rowLen, rowLen))...);
}
}
/** Fully occupy GPU this many times for better work balancing. */
static inline constexpr uint OptimalSmOccupancy = 16;
/**
* Calculate the grid size to be `OptimalSmOccupancy * FullyOccupiedGPU`, where `FullyOccupiedGPU`
* is the maximum number of blocks fitting in all available SMs.
*
* @tparam BlockSize blockDim of the kernel.
* @return OptimalSmOccupancy * FullyOccupiedGPU
*/
template <int BlockSize>
inline uint getOptimalGridSize()
{
int devId, smCount, maxBlockSize;
RAFT_CUDA_TRY(cudaGetDevice(&devId));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&smCount, cudaDevAttrMultiProcessorCount, devId));
RAFT_CUDA_TRY(cudaDeviceGetAttribute(&maxBlockSize, cudaDevAttrMaxThreadsPerBlock, devId));
return OptimalSmOccupancy * static_cast<uint>(smCount * maxBlockSize / BlockSize);
}
template <typename Type,
typename IdxType,
std::size_t VecBytes,
int BlockSize,
typename Lambda,
typename... Vecs>
void matrixLinewiseVecCols(Type* out,
const Type* in,
const IdxType rowLen,
const IdxType nRows,
Lambda op,
cudaStream_t stream,
const Vecs*... vecs)
{
typedef raft::Pow2<VecBytes> AlignBytes;
constexpr std::size_t VecElems = VecBytes / sizeof(Type);
const IdxType totalLen = rowLen * nRows;
const Type* alignedStart = AlignBytes::roundUp(in);
const IdxType alignedOff = IdxType(alignedStart - in);
const IdxType alignedEnd = IdxType(AlignBytes::roundDown(in + totalLen) - in);
const IdxType alignedLen = alignedEnd - alignedOff;
if (alignedLen > 0) {
constexpr dim3 bs(BlockSize, 1, 1);
// Minimum size of the grid to make the device well occupied
const uint occupy = getOptimalGridSize<BlockSize>();
// does not make sense to have more blocks than this
const uint maxBlocks = raft::ceildiv<uint>(uint(alignedLen), bs.x * VecElems);
const dim3 gs(std::min(maxBlocks, occupy), 1, 1);
// The work arrangement is blocked on the block and warp levels;
// see more details at Linewise::vectorCols.
// The value below determines how many scalar elements are processed by on thread in total.
const IdxType elemsPerThread =
raft::ceildiv<IdxType>(alignedLen, gs.x * VecElems * BlockSize) * VecElems;
matrixLinewiseVecColsMainKernel<Type, IdxType, VecBytes, BlockSize, Lambda, Vecs...>
<<<gs, bs, 0, stream>>>(out, in, alignedOff, rowLen, alignedLen, elemsPerThread, op, vecs...);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
if (alignedLen < totalLen) {
// should be not smaller than the warp size for better branching
constexpr std::size_t MaxOffset = std::max(std::size_t(raft::WarpSize), VecBytes);
matrixLinewiseVecColsTailKernel<Type, IdxType, MaxOffset, Lambda, Vecs...>
<<<dim3(2, 1, 1), dim3(MaxOffset, 1, 1), 0, stream>>>(
out, in, alignedOff, alignedEnd, rowLen, totalLen, op, vecs...);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
/**
* input/output data is expected to be aligned and padded
* we simply extend the operation over the padded elements to be fully aligned
*/
template <typename Type,
typename IdxType,
typename LayoutPolicy,
std::size_t VecBytes,
int BlockSize,
typename Lambda,
typename... Vecs>
void matrixLinewiseVecColsSpan(
raft::device_aligned_matrix_view<Type, IdxType, LayoutPolicy> out,
raft::device_aligned_matrix_view<const Type, IdxType, LayoutPolicy> in,
const IdxType rowLen,
const IdxType nRows,
Lambda op,
cudaStream_t stream,
const Vecs*... vecs)
{
typedef raft::Pow2<VecBytes> AlignBytes;
constexpr std::size_t VecElems = VecBytes / sizeof(Type);
typedef raft::Pow2<raft::layout_left_padded<Type>::padding> AlignPadding;
const uint paddedRowLen = AlignPadding::roundUp(rowLen);
const IdxType alignedLen = paddedRowLen * nRows;
if (rowLen * nRows > 0) {
constexpr dim3 bs(BlockSize, 1, 1);
// Minimum size of the grid to make the device well occupied
const uint occupy = getOptimalGridSize<BlockSize>();
// does not make sense to have more blocks than this
const uint maxBlocks = raft::ceildiv<uint>(uint(alignedLen), bs.x * VecElems);
const dim3 gs(std::min(maxBlocks, occupy), 1, 1);
// The work arrangement is blocked on the block and warp levels;
// see more details at Linewise::vectorCols.
// The value below determines how many scalar elements are processed by on thread in total.
const IdxType elemsPerThread =
raft::ceildiv<IdxType>(alignedLen, gs.x * VecElems * BlockSize) * VecElems;
matrixLinewiseVecColsMainKernel<Type, IdxType, VecBytes, BlockSize, Lambda, Vecs...>
<<<gs, bs, 0, stream>>>(out.data_handle(),
in.data_handle(),
0,
paddedRowLen,
alignedLen,
elemsPerThread,
op,
vecs...);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
template <typename Type,
typename IdxType,
std::size_t VecBytes,
int BlockSize,
typename Lambda,
typename... Vecs>
void matrixLinewiseVecRows(Type* out,
const Type* in,
const IdxType rowLen,
const IdxType nRows,
Lambda op,
cudaStream_t stream,
const Vecs*... vecs)
{
typedef raft::Pow2<VecBytes> AlignBytes;
constexpr std::size_t VecElems = VecBytes / sizeof(Type);
const IdxType totalLen = rowLen * nRows;
const Type* alignedStart = AlignBytes::roundUp(in);
const IdxType alignedOff = IdxType(alignedStart - in);
const IdxType alignedEnd = IdxType(AlignBytes::roundDown(in + totalLen) - in);
const IdxType alignedLen = alignedEnd - alignedOff;
if (alignedLen > 0) {
constexpr dim3 bs(BlockSize, 1, 1);
// The work arrangement is striped;
// see more details at Linewise::vectorRows.
// Below is the work amount performed by one block in one iteration.
constexpr uint block_work_size = bs.x * uint(VecElems);
/* Here I would define `grid_work_size = lcm(block_work_size, rowLen)` (Least Common Multiple)
This way, the grid spans a set of one or more rows each iteration, and, most importantly,
on every iteration each row processes the same set of indices within a row (= the same set
of vector indices).
This means, each block needs to load the values from the vector arguments only once.
Sadly, sometimes `grid_work_size > rowLen*nRows`, and sometimes grid_work_size > UINT_MAX.
That's why I don't declare it here explicitly.
Instead, I straightaway compute the
expected_grid_size = lcm(block_work_size, rowLen) / block_work_size
*/
const uint expected_grid_size = rowLen / raft::gcd(block_work_size, uint(rowLen));
// Minimum size of the grid to make the device well occupied
const uint occupy = getOptimalGridSize<BlockSize>();
const dim3 gs(std::min(
// does not make sense to have more blocks than this
raft::ceildiv<uint>(uint(alignedLen), block_work_size),
// increase the grid size to be not less than `occupy` while
// still being the multiple of `expected_grid_size`
raft::ceildiv<uint>(occupy, expected_grid_size) * expected_grid_size),
1,
1);
matrixLinewiseVecRowsMainKernel<Type, IdxType, VecBytes, BlockSize, Lambda, Vecs...>
<<<gs, bs, 0, stream>>>(
out + alignedOff, alignedStart, alignedOff, rowLen, alignedLen, op, vecs...);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
if (alignedLen < totalLen) {
// should be not smaller than the warp size for better branching
constexpr std::size_t MaxOffset = std::max(std::size_t(raft::WarpSize), VecBytes);
matrixLinewiseVecRowsTailKernel<Type, IdxType, MaxOffset, Lambda, Vecs...>
<<<dim3(2, 1, 1), dim3(MaxOffset, 1, 1), 0, stream>>>(
out, in, alignedOff, alignedEnd, rowLen, totalLen, op, vecs...);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
/**
* input/output data is expected to be aligned and padded
* we simply extend the operation over the padded elements to be fully aligned
* special treatment for 'Vecs' is needed as no elements are available for the padded region
*/
template <typename Type,
typename IdxType,
typename LayoutPolicy,
std::size_t VecBytes,
int BlockSize,
typename Lambda,
typename... Vecs>
void matrixLinewiseVecRowsSpan(
raft::device_aligned_matrix_view<Type, IdxType, LayoutPolicy> out,
raft::device_aligned_matrix_view<const Type, IdxType, LayoutPolicy> in,
const IdxType rowLen,
const IdxType nRows,
Lambda op,
cudaStream_t stream,
const Vecs*... vecs)
{
constexpr std::size_t VecElems = VecBytes / sizeof(Type);
typedef raft::Pow2<VecBytes> AlignBytes;
typedef raft::Pow2<raft::layout_right_padded<Type>::padding> AlignPadding;
const uint paddedRowLen = AlignPadding::roundUp(rowLen);
const IdxType alignedLen = paddedRowLen * nRows;
if (rowLen * nRows > 0) {
constexpr dim3 bs(BlockSize, 1, 1);
// The work arrangement is striped;
// see more details at Linewise::vectorRows.
// Below is the work amount performed by one block in one iteration.
constexpr uint block_work_size = bs.x * uint(VecElems);
/* Here I would define `grid_work_size = lcm(block_work_size, rowLen)` (Least Common Multiple)
This way, the grid spans a set of one or more rows each iteration, and, most importantly,
on every iteration each row processes the same set of indices within a row (= the same set
of vector indices).
This means, each block needs to load the values from the vector arguments only once.
Sadly, sometimes `grid_work_size > rowLen*nRows`, and sometimes grid_work_size > UINT_MAX.
That's why I don't declare it here explicitly.
Instead, I straightaway compute the
expected_grid_size = lcm(block_work_size, rowLen) / block_work_size
*/
const uint expected_grid_size = paddedRowLen / raft::gcd(block_work_size, uint(paddedRowLen));
// Minimum size of the grid to make the device well occupied
const uint occupy = getOptimalGridSize<BlockSize>();
const dim3 gs(std::min(
// does not make sense to have more blocks than this
raft::ceildiv<uint>(uint(alignedLen), block_work_size),
// increase the grid size to be not less than `occupy` while
// still being the multiple of `expected_grid_size`
raft::ceildiv<uint>(occupy, expected_grid_size) * expected_grid_size),
1,
1);
matrixLinewiseVecRowsSpanKernel<Type, IdxType, VecBytes, BlockSize, Lambda, Vecs...>
<<<gs, bs, 0, stream>>>(
out.data_handle(), in.data_handle(), rowLen, paddedRowLen, alignedLen, op, vecs...);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
}
/**
* Select one of the implementations:
* a. vectors applied along/across lines
* b. recursively try different VecBytes, such that alignments of `in` and `out`
* are the same.
*
* @tparam VecBytes - size of the load/store ops in bytes.
* @tparam BlockSize - is fixed and should not affect the performance.
*/
template <std::size_t VecBytes = 16, int BlockSize = 256>
struct MatrixLinewiseOp {
template <typename Type, typename IdxType, typename Lambda, typename... Vecs>
static void run(Type* out,
const Type* in,
const IdxType lineLen,
const IdxType nLines,
const bool alongLines,
Lambda op,
cudaStream_t stream,
const Vecs*... vecs)
{
if constexpr (VecBytes > sizeof(Type)) {
if (!raft::Pow2<VecBytes>::areSameAlignOffsets(in, out))
return MatrixLinewiseOp<std::max((VecBytes >> 1), sizeof(Type)), BlockSize>::run(
out, in, lineLen, nLines, alongLines, op, stream, vecs...);
}
if (alongLines)
return matrixLinewiseVecRows<Type, IdxType, VecBytes, BlockSize, Lambda, Vecs...>(
out, in, lineLen, nLines, op, stream, vecs...);
else
return matrixLinewiseVecCols<Type, IdxType, VecBytes, BlockSize, Lambda, Vecs...>(
out, in, lineLen, nLines, op, stream, vecs...);
}
template <typename Type,
typename IdxType,
typename LayoutPolicy,
typename Lambda,
typename... Vecs>
static void runPadded(raft::device_aligned_matrix_view<Type, IdxType, LayoutPolicy> out,
raft::device_aligned_matrix_view<const Type, IdxType, LayoutPolicy> in,
const IdxType lineLen,
const IdxType nLines,
const bool alongLines,
Lambda op,
cudaStream_t stream,
const Vecs*... vecs)
{
constexpr auto is_rowmajor = std::is_same_v<LayoutPolicy, raft::layout_right_padded<Type>>;
constexpr auto is_colmajor = std::is_same_v<LayoutPolicy, raft::layout_left_padded<Type>>;
static_assert(is_rowmajor || is_colmajor,
"layout for in and out must be either padded row or col major");
// also statically assert padded matrix alignment == 2^i*VecBytes
RAFT_EXPECTS(raft::Pow2<VecBytes>::areSameAlignOffsets(in.data_handle(), out.data_handle()),
"The matrix views in and out does not have correct alignment");
if (alongLines)
return matrixLinewiseVecRowsSpan<Type,
IdxType,
LayoutPolicy,
VecBytes,
BlockSize,
Lambda,
Vecs...>(out, in, lineLen, nLines, op, stream, vecs...);
else
return matrixLinewiseVecColsSpan<Type,
IdxType,
LayoutPolicy,
VecBytes,
BlockSize,
Lambda,
Vecs...>(out, in, lineLen, nLines, op, stream, vecs...);
}
};
} // end namespace detail
} // end namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/columnWiseSort.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstddef>
#include <cub/cub.cuh>
#include <limits>
#include <map>
#include <raft/util/cuda_utils.cuh>
#define INST_BLOCK_SORT(keyIn, keyOut, valueInOut, rows, columns, blockSize, elemPT, stream) \
devKeyValSortColumnPerRow<InType, OutType, blockSize, elemPT><<<rows, blockSize, 0, stream>>>( \
keyIn, keyOut, valueInOut, rows, columns, std::numeric_limits<InType>::max())
namespace raft {
namespace matrix {
namespace detail {
template <typename InType, int BLOCK_SIZE>
struct TemplateChecker {
enum {
IsValid = (std::is_same<InType, short>::value && BLOCK_SIZE <= 1024) ||
(std::is_same<InType, int>::value && BLOCK_SIZE <= 1024) ||
(std::is_same<InType, float>::value && BLOCK_SIZE <= 1024) ||
(std::is_same<InType, double>::value && BLOCK_SIZE <= 512)
};
};
template <typename InType, typename OutType, int BLOCK_SIZE, int ITEMS_PER_THREAD>
struct SmemPerBlock {
typedef cub::BlockLoad<InType, BLOCK_SIZE, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE>
BlockLoadTypeKey;
typedef cub::BlockRadixSort<InType, BLOCK_SIZE, ITEMS_PER_THREAD, OutType> BlockRadixSortType;
union TempStorage {
typename BlockLoadTypeKey::TempStorage keyLoad;
typename BlockRadixSortType::TempStorage sort;
} tempStorage;
};
template <typename InType>
RAFT_KERNEL devLayoutIdx(InType* in, int n_cols, int totalElements)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int n = n_cols;
if (idx < totalElements) { in[idx] = idx % n; }
}
template <typename T>
RAFT_KERNEL devOffsetKernel(T* in, T value, int n_times)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n_times) in[idx] = idx * value;
}
// block level radix sort - can only sort as much data we can fit within shared memory
template <
typename InType,
typename OutType,
int BLOCK_SIZE,
int ITEMS_PER_THREAD,
typename std::enable_if<TemplateChecker<InType, BLOCK_SIZE>::IsValid, InType>::type* = nullptr>
RAFT_KERNEL __launch_bounds__(1024, 1) devKeyValSortColumnPerRow(const InType* inputKeys,
InType* outputKeys,
OutType* inputVals,
int n_rows,
int n_cols,
InType MAX_VALUE)
{
typedef cub::BlockLoad<InType, BLOCK_SIZE, ITEMS_PER_THREAD, cub::BLOCK_LOAD_WARP_TRANSPOSE>
BlockLoadTypeKey;
typedef cub::BlockRadixSort<InType, BLOCK_SIZE, ITEMS_PER_THREAD, OutType> BlockRadixSortType;
__shared__ SmemPerBlock<InType, OutType, BLOCK_SIZE, ITEMS_PER_THREAD> tmpSmem;
InType threadKeys[ITEMS_PER_THREAD];
OutType threadValues[ITEMS_PER_THREAD];
int blockOffset = blockIdx.x * n_cols;
BlockLoadTypeKey(tmpSmem.tempStorage.keyLoad)
.Load(inputKeys + blockOffset, threadKeys, n_cols, MAX_VALUE);
OutType idxBase = threadIdx.x * ITEMS_PER_THREAD;
for (int i = 0; i < ITEMS_PER_THREAD; i++) {
OutType eId = idxBase + (OutType)i;
if (eId < n_cols)
threadValues[i] = eId;
else
threadValues[i] = MAX_VALUE;
}
__syncthreads();
BlockRadixSortType(tmpSmem.tempStorage.sort).SortBlockedToStriped(threadKeys, threadValues);
// storing index values back (not keys)
cub::StoreDirectStriped<BLOCK_SIZE>(threadIdx.x, inputVals + blockOffset, threadValues, n_cols);
if (outputKeys) {
cub::StoreDirectStriped<BLOCK_SIZE>(threadIdx.x, outputKeys + blockOffset, threadKeys, n_cols);
}
}
template <
typename InType,
typename OutType,
int BLOCK_SIZE,
int ITEMS_PER_THREAD,
typename std::enable_if<!(TemplateChecker<InType, BLOCK_SIZE>::IsValid), InType>::type* = nullptr>
RAFT_KERNEL devKeyValSortColumnPerRow(const InType* inputKeys,
InType* outputKeys,
OutType* inputVals,
int n_rows,
int n_cols,
InType MAX_VALUE)
{
// place holder function
// so that compiler unrolls for all template types successfully
}
// helper function to layout values (index's) for key-value sort
template <typename OutType>
cudaError_t layoutIdx(OutType* in, int n_rows, int n_columns, cudaStream_t stream)
{
int totalElements = n_rows * n_columns;
dim3 block(256);
dim3 grid((totalElements + block.x - 1) / block.x);
devLayoutIdx<OutType><<<grid, block, 0, stream>>>(in, n_columns, totalElements);
return cudaGetLastError();
}
// helper function to layout offsets for rows for DeviceSegmentedRadixSort
template <typename T>
cudaError_t layoutSortOffset(T* in, T value, int n_times, cudaStream_t stream)
{
dim3 block(128);
dim3 grid((n_times + block.x - 1) / block.x);
devOffsetKernel<T><<<grid, block, 0, stream>>>(in, value, n_times);
return cudaGetLastError();
}
/**
* @brief sort columns within each row of row-major input matrix and return sorted indexes
* modelled as key-value sort with key being input matrix and value being index of values
* @param in: input matrix
* @param out: output value(index) matrix
* @param n_rows: number rows of input matrix
* @param n_cols: number columns of input matrix
* @param bAllocWorkspace: check returned value, if true allocate workspace passed in workspaceSize
* @param workspacePtr: pointer to workspace memory
* @param workspaceSize: Size of workspace to be allocated
* @param stream: cuda stream to execute prim on
* @param sortedKeys: Optional, output matrix for sorted keys (input)
*/
template <typename InType, typename OutType>
void sortColumnsPerRow(const InType* in,
OutType* out,
int n_rows,
int n_columns,
bool& bAllocWorkspace,
void* workspacePtr,
size_t& workspaceSize,
cudaStream_t stream,
InType* sortedKeys = nullptr)
{
// assume non-square row-major matrices
// current use-case: KNN, trustworthiness scores
// output : either sorted indices or sorted indices and input values
// future : this prim can be modified to be more generic and serve as a way to sort column entries
// per row
// i.e. another output format: sorted values only
int totalElements = n_rows * n_columns;
size_t perElementSmemUsage = sizeof(InType) + sizeof(OutType);
size_t memAlignWidth = 256;
// @ToDo: Figure out dynamic shared memory for block sort kernel - better for volta and beyond
// int currDevice = 0, smemLimit = 0;
// RAFT_CUDA_TRY(cudaGetDevice(&currDevice));
// RAFT_CUDA_TRY(cudaDeviceGetAttribute(&smemLimit, cudaDevAttrMaxSharedMemoryPerBlock,
// currDevice)); size_t maxElementsForBlockSort = smemLimit / perElementSmemUsage;
// for 48KB smem/block, can fit in 6144 4byte key-value pair
// assuming key-value sort for now - smem computation will change for value only sort
// dtype being size of key-value pair
std::map<size_t, int> dtypeToColumnMap = {{4, 12288}, // short + short
{8, 12288}, // float/int + int/float
{12, 6144}, // double + int/float
{16, 6144}}; // double + double
if (dtypeToColumnMap.count(perElementSmemUsage) != 0 &&
n_columns <= dtypeToColumnMap[perElementSmemUsage]) {
// more elements per thread --> more register pressure
// 512(blockSize) * 8 elements per thread = 71 register / thread
// instantiate some kernel combinations
if (n_columns <= 512)
INST_BLOCK_SORT(in, sortedKeys, out, n_rows, n_columns, 128, 4, stream);
else if (n_columns > 512 && n_columns <= 1024)
INST_BLOCK_SORT(in, sortedKeys, out, n_rows, n_columns, 128, 8, stream);
else if (n_columns > 1024 && n_columns <= 3072)
INST_BLOCK_SORT(in, sortedKeys, out, n_rows, n_columns, 512, 6, stream);
else if (n_columns > 3072 && n_columns <= 4096)
INST_BLOCK_SORT(in, sortedKeys, out, n_rows, n_columns, 512, 8, stream);
else if (n_columns > 4096 && n_columns <= 6144)
INST_BLOCK_SORT(in, sortedKeys, out, n_rows, n_columns, 512, 12, stream);
else
INST_BLOCK_SORT(in, sortedKeys, out, n_rows, n_columns, 1024, 12, stream);
} else if (n_columns <= (1 << 18) && n_rows > 1) {
// device Segmented radix sort
// 2^18 column cap to restrict size of workspace ~512 MB
// will give better perf than below deviceWide Sort for even larger dims
int numSegments = n_rows + 1;
// need auxiliary storage: cub sorting + keys (if user not passing) +
// staging for values out + segment partition
if (workspaceSize == 0 || !workspacePtr) {
OutType* tmpValIn = nullptr;
int* tmpOffsetBuffer = nullptr;
// first call is to get size of workspace
RAFT_CUDA_TRY(cub::DeviceSegmentedRadixSort::SortPairs(workspacePtr,
workspaceSize,
in,
sortedKeys,
tmpValIn,
out,
totalElements,
numSegments,
tmpOffsetBuffer,
tmpOffsetBuffer + 1));
bAllocWorkspace = true;
// more staging space for temp output of keys
if (!sortedKeys)
workspaceSize += raft::alignTo(sizeof(InType) * (size_t)totalElements, memAlignWidth);
// value in KV pair need to be passed in, out buffer is separate
workspaceSize += raft::alignTo(sizeof(OutType) * (size_t)totalElements, memAlignWidth);
// for segment offsets
workspaceSize += raft::alignTo(sizeof(int) * (size_t)numSegments, memAlignWidth);
} else {
size_t workspaceOffset = 0;
if (!sortedKeys) {
sortedKeys = reinterpret_cast<InType*>(workspacePtr);
workspaceOffset = raft::alignTo(sizeof(InType) * (size_t)totalElements, memAlignWidth);
workspacePtr = (void*)((size_t)workspacePtr + workspaceOffset);
}
OutType* dValuesIn = reinterpret_cast<OutType*>(workspacePtr);
workspaceOffset = raft::alignTo(sizeof(OutType) * (size_t)totalElements, memAlignWidth);
workspacePtr = (void*)((size_t)workspacePtr + workspaceOffset);
int* dSegmentOffsets = reinterpret_cast<int*>(workspacePtr);
workspaceOffset = raft::alignTo(sizeof(int) * (size_t)numSegments, memAlignWidth);
workspacePtr = (void*)((size_t)workspacePtr + workspaceOffset);
// layout idx
RAFT_CUDA_TRY(layoutIdx(dValuesIn, n_rows, n_columns, stream));
// layout segment lengths - spread out column length
RAFT_CUDA_TRY(layoutSortOffset(dSegmentOffsets, n_columns, numSegments, stream));
RAFT_CUDA_TRY(cub::DeviceSegmentedRadixSort::SortPairs(workspacePtr,
workspaceSize,
in,
sortedKeys,
dValuesIn,
out,
totalElements,
numSegments,
dSegmentOffsets,
dSegmentOffsets + 1,
0,
sizeof(InType) * 8,
stream));
}
} else {
// batched per row device wide sort
if (workspaceSize == 0 || !workspacePtr) {
OutType* tmpValIn = nullptr;
// first call is to get size of workspace
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortPairs(
workspacePtr, workspaceSize, in, sortedKeys, tmpValIn, out, n_columns));
bAllocWorkspace = true;
if (!sortedKeys)
workspaceSize += raft::alignTo(sizeof(InType) * (size_t)n_columns, memAlignWidth);
workspaceSize += raft::alignTo(sizeof(OutType) * (size_t)n_columns, memAlignWidth);
} else {
size_t workspaceOffset = 0;
bool userKeyOutputBuffer = true;
if (!sortedKeys) {
userKeyOutputBuffer = false;
sortedKeys = reinterpret_cast<InType*>(workspacePtr);
workspaceOffset = raft::alignTo(sizeof(InType) * (size_t)n_columns, memAlignWidth);
workspacePtr = (void*)((size_t)workspacePtr + workspaceOffset);
}
OutType* dValuesIn = reinterpret_cast<OutType*>(workspacePtr);
workspaceOffset = raft::alignTo(sizeof(OutType) * (size_t)n_columns, memAlignWidth);
workspacePtr = (void*)((size_t)workspacePtr + workspaceOffset);
// layout idx
RAFT_CUDA_TRY(layoutIdx(dValuesIn, 1, n_columns, stream));
for (int i = 0; i < n_rows; i++) {
InType* rowIn =
reinterpret_cast<InType*>((size_t)in + (i * sizeof(InType) * (size_t)n_columns));
OutType* rowOut =
reinterpret_cast<OutType*>((size_t)out + (i * sizeof(OutType) * (size_t)n_columns));
RAFT_CUDA_TRY(cub::DeviceRadixSort::SortPairs(
workspacePtr, workspaceSize, rowIn, sortedKeys, dValuesIn, rowOut, n_columns));
if (userKeyOutputBuffer)
sortedKeys =
reinterpret_cast<InType*>((size_t)sortedKeys + sizeof(InType) * (size_t)n_columns);
}
}
}
}
}; // end namespace detail
}; // end namespace matrix
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/gather.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <functional>
#include <raft/core/operators.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace matrix {
namespace detail {
/** Tiling policy for the gather kernel.
*
* The output matrix is considered as a flattened array, an approach that provides much better
* performance than 1 row per block when D is small. Additionally, each thread works on multiple
* output elements using an unrolled loop (approx. 30% faster than working on a single element)
*/
template <int tpb, int wpt>
struct gather_policy {
static constexpr int n_threads = tpb;
static constexpr int work_per_thread = wpt;
static constexpr int stride = tpb * wpt;
};
/** Conditionally copies rows from the source matrix 'in' into the destination matrix
* 'out' according to a map (or a transformed map) */
template <typename Policy,
typename InputIteratorT,
typename MapIteratorT,
typename StencilIteratorT,
typename PredicateOp,
typename MapTransformOp,
typename OutputIteratorT,
typename IndexT>
RAFT_KERNEL gather_kernel(const InputIteratorT in,
IndexT D,
IndexT len,
const MapIteratorT map,
StencilIteratorT stencil,
OutputIteratorT out,
PredicateOp pred_op,
MapTransformOp transform_op)
{
typedef typename std::iterator_traits<MapIteratorT>::value_type MapValueT;
typedef typename std::iterator_traits<StencilIteratorT>::value_type StencilValueT;
#pragma unroll
for (IndexT wid = 0; wid < Policy::work_per_thread; wid++) {
IndexT tid = threadIdx.x + (Policy::work_per_thread * static_cast<IndexT>(blockIdx.x) + wid) *
Policy::n_threads;
if (tid < len) {
IndexT i_dst = tid / D;
IndexT j = tid % D;
MapValueT map_val = map[i_dst];
StencilValueT stencil_val = stencil[i_dst];
bool predicate = pred_op(stencil_val);
if (predicate) {
IndexT i_src = transform_op(map_val);
out[tid] = in[i_src * D + j];
}
}
}
}
/**
* @brief gather conditionally copies rows from a source matrix into a destination matrix according
* to a transformed map.
*
* @tparam InputIteratorT Random-access iterator type, for reading input matrix (may be a
* simple pointer type).
* @tparam MapIteratorT Random-access iterator type, for reading input map (may be a simple
* pointer type).
* @tparam StencilIteratorT Random-access iterator type, for reading input stencil (may be a
* simple pointer type).
* @tparam UnaryPredicateOp Unary lambda expression or operator type, UnaryPredicateOp's result
* type must be convertible to bool type.
* @tparam MapTransformOp Unary lambda expression or operator type, MapTransformOp's result
* type must be convertible to IndexT.
* @tparam OutputIteratorT Random-access iterator type, for writing output matrix (may be a
* simple pointer type).
* @tparam IndexT Index type.
*
* @param in Pointer to the input matrix (assumed to be row-major)
* @param D Leading dimension of the input matrix 'in', which in-case of row-major
* storage is the number of columns
* @param N Second dimension
* @param map Pointer to the input sequence of gather locations
* @param stencil Pointer to the input sequence of stencil or predicate values
* @param map_length The length of 'map' and 'stencil'
* @param out Pointer to the output matrix (assumed to be row-major)
* @param pred_op Predicate to apply to the stencil values
* @param transform_op The transformation operation, transforms the map values to IndexT
* @param stream CUDA stream to launch kernels within
*/
template <typename InputIteratorT,
typename MapIteratorT,
typename StencilIteratorT,
typename UnaryPredicateOp,
typename MapTransformOp,
typename OutputIteratorT,
typename IndexT>
void gatherImpl(const InputIteratorT in,
IndexT D,
IndexT N,
const MapIteratorT map,
StencilIteratorT stencil,
IndexT map_length,
OutputIteratorT out,
UnaryPredicateOp pred_op,
MapTransformOp transform_op,
cudaStream_t stream)
{
// skip in case of 0 length input
if (map_length <= 0 || N <= 0 || D <= 0) return;
// map value type
typedef typename std::iterator_traits<MapIteratorT>::value_type MapValueT;
// stencil value type
typedef typename std::iterator_traits<StencilIteratorT>::value_type StencilValueT;
IndexT len = map_length * D;
constexpr int TPB = 128;
const int n_sm = raft::getMultiProcessorCount();
// The following empirical heuristics enforce that we keep a good balance between having enough
// blocks and enough work per thread.
if (len < static_cast<IndexT>(32 * TPB * n_sm)) {
using Policy = gather_policy<TPB, 1>;
IndexT n_blocks = raft::ceildiv(map_length * D, static_cast<IndexT>(Policy::stride));
gather_kernel<Policy><<<n_blocks, Policy::n_threads, 0, stream>>>(
in, D, len, map, stencil, out, pred_op, transform_op);
} else if (len < static_cast<IndexT>(32 * 4 * TPB * n_sm)) {
using Policy = gather_policy<TPB, 4>;
IndexT n_blocks = raft::ceildiv(map_length * D, static_cast<IndexT>(Policy::stride));
gather_kernel<Policy><<<n_blocks, Policy::n_threads, 0, stream>>>(
in, D, len, map, stencil, out, pred_op, transform_op);
} else {
using Policy = gather_policy<TPB, 8>;
IndexT n_blocks = raft::ceildiv(map_length * D, static_cast<IndexT>(Policy::stride));
gather_kernel<Policy><<<n_blocks, Policy::n_threads, 0, stream>>>(
in, D, len, map, stencil, out, pred_op, transform_op);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
/**
* @brief gather copies rows from a source matrix into a destination matrix according to a map.
*
* @tparam InputIteratorT Random-access iterator type, for reading input matrix (may be a
* simple pointer type).
* @tparam MapIteratorT Random-access iterator type, for reading input map (may be a simple
* pointer type).
* @tparam OutputIteratorT Random-access iterator type, for writing output matrix (may be a
* simple pointer type).
* @tparam IndexT Index type.
*
* @param in Pointer to the input matrix (assumed to be row-major)
* @param D Leading dimension of the input matrix 'in', which in-case of row-major
* storage is the number of columns
* @param N Second dimension
* @param map Pointer to the input sequence of gather locations
* @param map_length The length of 'map' and 'stencil'
* @param out Pointer to the output matrix (assumed to be row-major)
* @param stream CUDA stream to launch kernels within
*/
template <typename InputIteratorT, typename MapIteratorT, typename OutputIteratorT, typename IndexT>
void gather(const InputIteratorT in,
IndexT D,
IndexT N,
const MapIteratorT map,
IndexT map_length,
OutputIteratorT out,
cudaStream_t stream)
{
typedef typename std::iterator_traits<MapIteratorT>::value_type MapValueT;
gatherImpl(
in, D, N, map, map, map_length, out, raft::const_op(true), raft::identity_op(), stream);
}
/**
* @brief gather copies rows from a source matrix into a destination matrix according to a
* transformed map.
*
* @tparam InputIteratorT Random-access iterator type, for reading input matrix (may be a
* simple pointer type).
* @tparam MapIteratorT Random-access iterator type, for reading input map (may be a simple
* pointer type).
* @tparam MapTransformOp Unary lambda expression or operator type, MapTransformOp's result
* type must be convertible to IndexT.
* @tparam OutputIteratorT Random-access iterator type, for writing output matrix (may be a
* simple pointer type).
* @tparam IndexT Index type.
*
* @param in Pointer to the input matrix (assumed to be row-major)
* @param D Leading dimension of the input matrix 'in', which in-case of row-major
* storage is the number of columns
* @param N Second dimension
* @param map Pointer to the input sequence of gather locations
* @param map_length The length of 'map' and 'stencil'
* @param out Pointer to the output matrix (assumed to be row-major)
* @param transform_op The transformation operation, transforms the map values to IndexT
* @param stream CUDA stream to launch kernels within
*/
template <typename InputIteratorT,
typename MapIteratorT,
typename MapTransformOp,
typename OutputIteratorT,
typename IndexT>
void gather(const InputIteratorT in,
IndexT D,
IndexT N,
const MapIteratorT map,
IndexT map_length,
OutputIteratorT out,
MapTransformOp transform_op,
cudaStream_t stream)
{
typedef typename std::iterator_traits<MapIteratorT>::value_type MapValueT;
gatherImpl(in, D, N, map, map, map_length, out, raft::const_op(true), transform_op, stream);
}
/**
* @brief gather_if conditionally copies rows from a source matrix into a destination matrix
* according to a map.
*
* @tparam InputIteratorT Random-access iterator type, for reading input matrix (may be a
* simple pointer type).
* @tparam MapIteratorT Random-access iterator type, for reading input map (may be a simple
* pointer type).
* @tparam StencilIteratorT Random-access iterator type, for reading input stencil (may be a
* simple pointer type).
* @tparam UnaryPredicateOp Unary lambda expression or operator type, UnaryPredicateOp's result
* type must be convertible to bool type.
* @tparam OutputIteratorT Random-access iterator type, for writing output matrix (may be a
* simple pointer type).
* @tparam IndexT Index type.
*
* @param in Pointer to the input matrix (assumed to be row-major)
* @param D Leading dimension of the input matrix 'in', which in-case of row-major
* storage is the number of columns
* @param N Second dimension
* @param map Pointer to the input sequence of gather locations
* @param stencil Pointer to the input sequence of stencil or predicate values
* @param map_length The length of 'map' and 'stencil'
* @param out Pointer to the output matrix (assumed to be row-major)
* @param pred_op Predicate to apply to the stencil values
* @param stream CUDA stream to launch kernels within
*/
template <typename InputIteratorT,
typename MapIteratorT,
typename StencilIteratorT,
typename UnaryPredicateOp,
typename OutputIteratorT,
typename IndexT>
void gather_if(const InputIteratorT in,
IndexT D,
IndexT N,
const MapIteratorT map,
StencilIteratorT stencil,
IndexT map_length,
OutputIteratorT out,
UnaryPredicateOp pred_op,
cudaStream_t stream)
{
typedef typename std::iterator_traits<MapIteratorT>::value_type MapValueT;
gatherImpl(in, D, N, map, stencil, map_length, out, pred_op, raft::identity_op(), stream);
}
/**
* @brief gather_if conditionally copies rows from a source matrix into a destination matrix
* according to a transformed map.
*
* @tparam InputIteratorT Random-access iterator type, for reading input matrix (may be a
* simple pointer type).
* @tparam MapIteratorT Random-access iterator type, for reading input map (may be a simple
* pointer type).
* @tparam StencilIteratorT Random-access iterator type, for reading input stencil (may be a
* simple pointer type).
* @tparam UnaryPredicateOp Unary lambda expression or operator type, UnaryPredicateOp's result
* type must be convertible to bool type.
* @tparam MapTransformOp Unary lambda expression or operator type, MapTransformOp's result
* type must be convertible to IndexT type.
* @tparam OutputIteratorT Random-access iterator type, for writing output matrix (may be a
* simple pointer type).
* @tparam IndexT Index type.
*
* @param in Pointer to the input matrix (assumed to be row-major)
* @param D Leading dimension of the input matrix 'in', which in-case of row-major
* storage is the number of columns
* @param N Second dimension
* @param map Pointer to the input sequence of gather locations
* @param stencil Pointer to the input sequence of stencil or predicate values
* @param map_length The length of 'map' and 'stencil'
* @param out Pointer to the output matrix (assumed to be row-major)
* @param pred_op Predicate to apply to the stencil values
* @param transform_op The transformation operation, transforms the map values to IndexT
* @param stream CUDA stream to launch kernels within
*/
template <typename InputIteratorT,
typename MapIteratorT,
typename StencilIteratorT,
typename UnaryPredicateOp,
typename MapTransformOp,
typename OutputIteratorT,
typename IndexT>
void gather_if(const InputIteratorT in,
IndexT D,
IndexT N,
const MapIteratorT map,
StencilIteratorT stencil,
IndexT map_length,
OutputIteratorT out,
UnaryPredicateOp pred_op,
MapTransformOp transform_op,
cudaStream_t stream)
{
typedef typename std::iterator_traits<MapIteratorT>::value_type MapValueT;
gatherImpl(in, D, N, map, stencil, map_length, out, pred_op, transform_op, stream);
}
} // namespace detail
} // namespace matrix
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/select_k.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef RAFT_EXPLICIT_INSTANTIATE_ONLY
#include "select_k-inl.cuh"
#endif
#ifdef RAFT_COMPILED
#include "select_k-ext.cuh"
#endif
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/select_k-ext.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint> // uint32_t
#include <cuda_fp16.h> // __half
#include <raft/core/device_resources.hpp>
#include <raft/util/raft_explicit.hpp> // RAFT_EXPLICIT
#include <rmm/cuda_stream_view.hpp> // rmm:cuda_stream_view
#include <rmm/mr/device/device_memory_resource.hpp> // rmm::mr::device_memory_resource
#ifdef RAFT_EXPLICIT_INSTANTIATE_ONLY
namespace raft::matrix::detail {
template <typename T, typename IdxT>
void select_k(raft::resources const& handle,
const T* in_val,
const IdxT* in_idx,
size_t batch_size,
size_t len,
int k,
T* out_val,
IdxT* out_idx,
bool select_min,
rmm::mr::device_memory_resource* mr = nullptr,
bool sorted = false) RAFT_EXPLICIT;
} // namespace raft::matrix::detail
#endif // RAFT_EXPLICIT_INSTANTIATE_ONLY
#define instantiate_raft_matrix_detail_select_k(T, IdxT) \
extern template void raft::matrix::detail::select_k(raft::resources const& handle, \
const T* in_val, \
const IdxT* in_idx, \
size_t batch_size, \
size_t len, \
int k, \
T* out_val, \
IdxT* out_idx, \
bool select_min, \
rmm::mr::device_memory_resource* mr, \
bool sorted)
instantiate_raft_matrix_detail_select_k(__half, uint32_t);
instantiate_raft_matrix_detail_select_k(__half, int64_t);
instantiate_raft_matrix_detail_select_k(float, int64_t);
instantiate_raft_matrix_detail_select_k(float, uint32_t);
// needed for brute force knn
instantiate_raft_matrix_detail_select_k(float, int);
// We did not have these two for double before, but there are tests for them. We
// therefore include them here.
instantiate_raft_matrix_detail_select_k(double, int64_t);
instantiate_raft_matrix_detail_select_k(double, uint32_t);
#undef instantiate_raft_matrix_detail_select_k
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/select_warpsort.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/detail/macros.hpp>
#include <raft/core/logger.hpp>
#include <raft/util/bitonic_sort.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/integer_utils.hpp>
#include <raft/util/pow2_utils.cuh>
#include <algorithm>
#include <functional>
#include <type_traits>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
/*
Three APIs of different scopes are provided:
1. host function: select_k()
2. block-wide API: class block_sort
3. warp-wide API: several implementations of warp_sort_*
1. select_k()
(see the docstring)
2. class block_sort
It can be regarded as a fixed size priority queue for a thread block,
although the API is not typical.
one of the classes `warp_sort_*` can be used to instantiate block_sort.
It uses dynamic shared memory as an intermediate buffer.
So the required shared memory size should be calculated using
calc_smem_size_for_block_wide() and passed as the 3rd kernel launch parameter.
To add elements to the queue, use add(T val, IdxT idx) with unique values per-thread.
Use WarpSortClass<...>::kDummy constant for the threads outside of input bounds.
After adding is finished, function done() should be called. And finally, store() is used to get
the top-k result.
Example:
RAFT_KERNEL kernel() {
block_sort<warp_sort_immediate, ...> queue(...);
for (IdxT i = threadIdx.x; i < len, i += blockDim.x) {
queue.add(in[i], in_idx[i]);
}
queue.done();
queue.store(out, out_idx);
}
int smem_size = calc_smem_size_for_block_wide<T>(...);
kernel<<<grid_dim, block_dim, smem_size>>>();
3. class warp_sort_*
These two classes can be regarded as fixed size priority queue for a warp.
Usage is similar to class block_sort. No shared memory is needed.
The host function (select_k) uses a heuristic to choose between these two classes for
sorting, warp_sort_immediate being chosen when the number of inputs per warp is somewhat small
(see the usage of LaunchThreshold<warp_sort_immediate>::len_factor_for_choosing).
Example:
RAFT_KERNEL kernel() {
warp_sort_immediate<...> queue(...);
int warp_id = threadIdx.x / WarpSize;
int lane_id = threadIdx.x % WarpSize;
for (IdxT i = lane_id; i < len, i += WarpSize) {
queue.add(in[i], idx[i]);
}
queue.done();
// each warp outputs to a different offset
queue.store(out + warp_id * k, out_idx + warp_id * k);
}
*/
namespace raft::matrix::detail::select::warpsort {
static constexpr int kMaxCapacity = 256;
namespace {
/** Whether 'left` should indeed be on the left w.r.t. `right`. */
template <bool Ascending, typename T>
_RAFT_DEVICE _RAFT_FORCEINLINE auto is_ordered(T left, T right) -> bool
{
if constexpr (Ascending) { return left < right; }
if constexpr (!Ascending) { return left > right; }
}
} // namespace
/**
* A fixed-size warp-level priority queue.
* By feeding the data through this queue, you get the `k <= Capacity`
* smallest/greatest values in the data.
*
* @tparam Capacity
* maximum number of elements in the queue.
* @tparam Ascending
* which comparison to use: `true` means `<`, collect the smallest elements,
* `false` means `>`, collect the greatest elements.
* @tparam T
* the type of keys (what is being compared)
* @tparam IdxT
* the type of payload (normally, indices of elements), i.e.
* the content sorted alongside the keys.
*/
template <int Capacity, bool Ascending, typename T, typename IdxT>
class warp_sort {
static_assert(is_a_power_of_two(Capacity));
static_assert(std::is_default_constructible_v<IdxT>);
public:
/**
* The `empty` value for the chosen binary operation,
* i.e. `Ascending ? upper_bound<T>() : lower_bound<T>()`.
*/
static constexpr T kDummy = Ascending ? upper_bound<T>() : lower_bound<T>();
/** Width of the subwarp. */
static constexpr int kWarpWidth = std::min<int>(Capacity, WarpSize);
/** The number of elements to select. */
const int k;
/** Extra memory required per-block for keeping the state (shared or global). */
constexpr static auto mem_required(uint32_t block_size) -> size_t { return 0; }
/**
* Construct the warp_sort empty queue.
*
* @param k
* number of elements to select.
*/
_RAFT_DEVICE warp_sort(int k) : k(k)
{
#pragma unroll
for (int i = 0; i < kMaxArrLen; i++) {
val_arr_[i] = kDummy;
idx_arr_[i] = IdxT{};
}
}
/**
* Load k values from the pointers at the given position, and merge them in the storage.
*
* When it actually loads the values, it always performs some collective warp operations in the
* end, thus enforcing warp sync. This means, it's safe to call `store` with the same arguments
* after `load_sorted` without extra sync. Note, however, that this is not necessarily true for
* the reverse order, because the access patterns of `store` and `load_sorted` are different.
*
* @param[in] in
* a device pointer to a contiguous array, unique per-subwarp
* (length: k <= kWarpWidth * kMaxArrLen).
* @param[in] in_idx
* a device pointer to a contiguous array, unique per-subwarp
* (length: k <= kWarpWidth * kMaxArrLen).
* @param[in] do_merge
* must be the same for all threads within a subwarp of size `kWarpWidth`.
* It serves as a conditional; when `false` the function does nothing.
* We need it to ensure threads within a full warp don't diverge calling `bitonic::merge()`.
*/
_RAFT_DEVICE void load_sorted(const T* in, const IdxT* in_idx, bool do_merge = true)
{
if (do_merge) {
int idx = Pow2<kWarpWidth>::mod(laneId()) ^ Pow2<kWarpWidth>::Mask;
#pragma unroll
for (int i = kMaxArrLen - 1; i >= 0; --i, idx += kWarpWidth) {
if (idx < k) {
T t = in[idx];
if (is_ordered<Ascending>(t, val_arr_[i])) {
val_arr_[i] = t;
idx_arr_[i] = in_idx[idx];
}
}
}
}
if (kWarpWidth < WarpSize || do_merge) {
util::bitonic<kMaxArrLen>(Ascending, kWarpWidth).merge(val_arr_, idx_arr_);
}
}
/**
* Save the content by the pointer location.
*
* @param[out] out
* device pointer to a contiguous array, unique per-subwarp of size `kWarpWidth`
* (length: k <= kWarpWidth * kMaxArrLen).
* @param[out] out_idx
* device pointer to a contiguous array, unique per-subwarp of size `kWarpWidth`
* (length: k <= kWarpWidth * kMaxArrLen).
* @param valF (optional) postprocess values (T -> OutT)
* @param idxF (optional) postprocess indices (IdxT -> OutIdxT)
*/
template <typename OutT,
typename OutIdxT,
typename ValF = identity_op,
typename IdxF = identity_op>
_RAFT_DEVICE void store(OutT* out,
OutIdxT* out_idx,
ValF valF = raft::identity_op{},
IdxF idxF = raft::identity_op{}) const
{
int idx = Pow2<kWarpWidth>::mod(laneId());
#pragma unroll kMaxArrLen
for (int i = 0; i < kMaxArrLen && idx < k; i++, idx += kWarpWidth) {
out[idx] = valF(val_arr_[i]);
out_idx[idx] = idxF(idx_arr_[i]);
}
}
protected:
static constexpr int kMaxArrLen = Capacity / kWarpWidth;
T val_arr_[kMaxArrLen];
IdxT idx_arr_[kMaxArrLen];
/**
* Merge another array (sorted in the opposite direction) in the queue.
* Thanks to the other array being sorted in the opposite direction,
* it's enough to call bitonic.merge once to maintain the valid state
* of the queue.
*
* @tparam PerThreadSizeIn
* the size of the other array per-thread (compared to `kMaxArrLen`).
*
* @param keys_in
* the values to be merged in. Pointers are unique per-thread. The values
* must already be sorted in the opposite direction.
* The layout of `keys_in` must be the same as the layout of `val_arr_`.
* @param ids_in
* the associated indices of the elements in the same format as `keys_in`.
*/
template <int PerThreadSizeIn>
_RAFT_DEVICE _RAFT_FORCEINLINE void merge_in(const T* __restrict__ keys_in,
const IdxT* __restrict__ ids_in)
{
#pragma unroll
for (int i = std::min(kMaxArrLen, PerThreadSizeIn); i > 0; i--) {
T& key = val_arr_[kMaxArrLen - i];
T other = keys_in[PerThreadSizeIn - i];
if (is_ordered<Ascending>(other, key)) {
key = other;
idx_arr_[kMaxArrLen - i] = ids_in[PerThreadSizeIn - i];
}
}
util::bitonic<kMaxArrLen>(Ascending, kWarpWidth).merge(val_arr_, idx_arr_);
}
};
/**
* This version of warp_sort compares each input element against the current
* estimate of k-th value before adding it to the intermediate sorting buffer.
* This makes the algorithm do less sorting steps for long input sequences
* at the cost of extra checks on each step.
*
* This implementation is preferred for large len values.
*/
template <int Capacity, bool Ascending, typename T, typename IdxT>
class warp_sort_filtered : public warp_sort<Capacity, Ascending, T, IdxT> {
public:
using warp_sort<Capacity, Ascending, T, IdxT>::kDummy;
using warp_sort<Capacity, Ascending, T, IdxT>::kWarpWidth;
using warp_sort<Capacity, Ascending, T, IdxT>::k;
using warp_sort<Capacity, Ascending, T, IdxT>::mem_required;
explicit _RAFT_DEVICE warp_sort_filtered(int k, T limit = kDummy)
: warp_sort<Capacity, Ascending, T, IdxT>(k), buf_len_(0), k_th_(limit)
{
#pragma unroll
for (int i = 0; i < kMaxBufLen; i++) {
val_buf_[i] = kDummy;
idx_buf_[i] = IdxT{};
}
}
_RAFT_DEVICE _RAFT_FORCEINLINE static auto init_blockwide(int k,
uint8_t* = nullptr,
T limit = kDummy)
{
return warp_sort_filtered<Capacity, Ascending, T, IdxT>{k, limit};
}
_RAFT_DEVICE void add(T val, IdxT idx)
{
// comparing for k_th should reduce the total amount of updates:
// `false` means the input value is surely not in the top-k values.
bool do_add = is_ordered<Ascending>(val, k_th_);
// merge the buf if it's full and we cannot add an element anymore.
if (any(buf_len_ + do_add > kMaxBufLen)) {
// still, add an element before merging if possible for this thread
if (do_add && buf_len_ < kMaxBufLen) {
add_to_buf_(val, idx);
do_add = false;
}
merge_buf_();
}
// add an element if necessary and haven't already.
if (do_add) { add_to_buf_(val, idx); }
}
_RAFT_DEVICE void done()
{
if (any(buf_len_ != 0)) { merge_buf_(); }
}
private:
_RAFT_DEVICE _RAFT_FORCEINLINE void set_k_th_()
{
// NB on using srcLane: it's ok if it is outside the warp size / width;
// the modulo op will be done inside the __shfl_sync.
k_th_ = shfl(val_arr_[kMaxArrLen - 1], k - 1, kWarpWidth);
}
_RAFT_DEVICE _RAFT_FORCEINLINE void merge_buf_()
{
util::bitonic<kMaxBufLen>(!Ascending, kWarpWidth).sort(val_buf_, idx_buf_);
this->merge_in<kMaxBufLen>(val_buf_, idx_buf_);
buf_len_ = 0;
set_k_th_(); // contains warp sync
#pragma unroll
for (int i = 0; i < kMaxBufLen; i++) {
val_buf_[i] = kDummy;
}
}
_RAFT_DEVICE _RAFT_FORCEINLINE void add_to_buf_(T val, IdxT idx)
{
// NB: the loop is used here to ensure the constant indexing,
// to not force the buffers spill into the local memory.
#pragma unroll
for (int i = 0; i < kMaxBufLen; i++) {
if (i == buf_len_) {
val_buf_[i] = val;
idx_buf_[i] = idx;
}
}
buf_len_++;
}
using warp_sort<Capacity, Ascending, T, IdxT>::kMaxArrLen;
using warp_sort<Capacity, Ascending, T, IdxT>::val_arr_;
using warp_sort<Capacity, Ascending, T, IdxT>::idx_arr_;
static constexpr int kMaxBufLen = (Capacity <= 64) ? 2 : 4;
T val_buf_[kMaxBufLen];
IdxT idx_buf_[kMaxBufLen];
int buf_len_;
T k_th_;
};
/**
* This version of warp_sort compares each input element against the current
* estimate of k-th value before adding it to the intermediate sorting buffer.
* In contrast to `warp_sort_filtered`, it keeps one distributed buffer for
* all threads in a warp (independently of the subwarp size), which makes its flushing less often.
*/
template <int Capacity, bool Ascending, typename T, typename IdxT>
class warp_sort_distributed : public warp_sort<Capacity, Ascending, T, IdxT> {
public:
using warp_sort<Capacity, Ascending, T, IdxT>::kDummy;
using warp_sort<Capacity, Ascending, T, IdxT>::kWarpWidth;
using warp_sort<Capacity, Ascending, T, IdxT>::k;
using warp_sort<Capacity, Ascending, T, IdxT>::mem_required;
explicit _RAFT_DEVICE warp_sort_distributed(int k, T limit = kDummy)
: warp_sort<Capacity, Ascending, T, IdxT>(k),
buf_val_(kDummy),
buf_idx_(IdxT{}),
buf_len_(0),
k_th_(limit)
{
}
_RAFT_DEVICE _RAFT_FORCEINLINE static auto init_blockwide(int k,
uint8_t* = nullptr,
T limit = kDummy)
{
return warp_sort_distributed<Capacity, Ascending, T, IdxT>{k, limit};
}
_RAFT_DEVICE void add(T val, IdxT idx)
{
// mask tells which lanes in the warp have valid items to be added
uint32_t mask = ballot(is_ordered<Ascending>(val, k_th_));
if (mask == 0) { return; }
// how many elements to be added
uint32_t n_valid = __popc(mask);
// index of the source lane containing the value to put into the current lane.
uint32_t src_ix = 0;
// remove a few smallest set bits from the mask.
for (uint32_t i = std::min(n_valid, Pow2<WarpSize>::mod(uint32_t(laneId()) - buf_len_)); i > 0;
i--) {
src_ix = __ffs(mask) - 1;
mask ^= (0x1u << src_ix);
}
// now the least significant bit of the mask corresponds to the lane id we want to get.
// for not-added (invalid) indices, the mask is zeroed by now.
src_ix = __ffs(mask) - 1;
// rearrange the inputs to be ready to put them into the tmp buffer
val = shfl(val, src_ix);
idx = shfl(idx, src_ix);
// for non-valid lanes, src_ix should be uint(-1)
if (mask == 0) { val = kDummy; }
// save the values into the free slots of the warp tmp buffer
if (laneId() >= buf_len_) {
buf_val_ = val;
buf_idx_ = idx;
}
buf_len_ += n_valid;
if (buf_len_ < WarpSize) { return; }
// merge the warp tmp buffer into the queue
merge_buf_();
buf_len_ -= WarpSize;
// save the inputs that couldn't fit before the merge
if (laneId() < buf_len_) {
buf_val_ = val;
buf_idx_ = idx;
}
}
_RAFT_DEVICE void done()
{
if (buf_len_ != 0) {
merge_buf_();
buf_len_ = 0;
}
}
private:
_RAFT_DEVICE _RAFT_FORCEINLINE void set_k_th_()
{
// NB on using srcLane: it's ok if it is outside the warp size / width;
// the modulo op will be done inside the __shfl_sync.
k_th_ = shfl(val_arr_[kMaxArrLen - 1], k - 1, kWarpWidth);
}
_RAFT_DEVICE _RAFT_FORCEINLINE void merge_buf_()
{
util::bitonic<1>(!Ascending, kWarpWidth).sort(buf_val_, buf_idx_);
this->merge_in<1>(&buf_val_, &buf_idx_);
set_k_th_(); // contains warp sync
buf_val_ = kDummy;
}
using warp_sort<Capacity, Ascending, T, IdxT>::kMaxArrLen;
using warp_sort<Capacity, Ascending, T, IdxT>::val_arr_;
using warp_sort<Capacity, Ascending, T, IdxT>::idx_arr_;
T buf_val_;
IdxT buf_idx_;
uint32_t buf_len_; // 0 <= buf_len_ <= WarpSize
T k_th_;
};
/**
* The same as `warp_sort_distributed`, but keeps the temporary value and index buffers
* in the given external pointers (normally, a shared memory pointer should be passed in).
*/
template <int Capacity, bool Ascending, typename T, typename IdxT>
class warp_sort_distributed_ext : public warp_sort<Capacity, Ascending, T, IdxT> {
public:
using warp_sort<Capacity, Ascending, T, IdxT>::kDummy;
using warp_sort<Capacity, Ascending, T, IdxT>::kWarpWidth;
using warp_sort<Capacity, Ascending, T, IdxT>::k;
constexpr static auto mem_required(uint32_t block_size) -> size_t
{
return (sizeof(T) + sizeof(IdxT)) * block_size;
}
_RAFT_DEVICE warp_sort_distributed_ext(int k, T* val_buf, IdxT* idx_buf, T limit = kDummy)
: warp_sort<Capacity, Ascending, T, IdxT>(k),
val_buf_(val_buf),
idx_buf_(idx_buf),
buf_len_(0),
k_th_(limit)
{
val_buf_[laneId()] = kDummy;
}
_RAFT_DEVICE static auto init_blockwide(int k, uint8_t* shmem, T limit = kDummy)
{
T* val_buf = nullptr;
IdxT* idx_buf = nullptr;
if constexpr (alignof(T) >= alignof(IdxT)) {
val_buf = reinterpret_cast<T*>(shmem);
idx_buf = reinterpret_cast<IdxT*>(val_buf + blockDim.x);
} else {
idx_buf = reinterpret_cast<IdxT*>(shmem);
val_buf = reinterpret_cast<T*>(idx_buf + blockDim.x);
}
auto warp_offset = Pow2<WarpSize>::roundDown(threadIdx.x);
val_buf += warp_offset;
idx_buf += warp_offset;
return warp_sort_distributed_ext<Capacity, Ascending, T, IdxT>{k, val_buf, idx_buf, limit};
}
_RAFT_DEVICE void add(T val, IdxT idx)
{
bool do_add = is_ordered<Ascending>(val, k_th_);
// mask tells which lanes in the warp have valid items to be added
uint32_t mask = ballot(do_add);
if (mask == 0) { return; }
// where to put the element in the tmp buffer
int dst_ix = buf_len_ + __popc(mask & ((1u << laneId()) - 1u));
// put all elements, which fit into the current tmp buffer
if (do_add && dst_ix < WarpSize) {
val_buf_[dst_ix] = val;
idx_buf_[dst_ix] = idx;
do_add = false;
}
// Total number of elements to be added
buf_len_ += __popc(mask);
// If the buffer is still not full, we can return
if (buf_len_ < WarpSize) { return; }
// Otherwise, merge the warp tmp buffer into the queue
merge_buf_(); // implies warp sync
buf_len_ -= WarpSize;
// save the inputs that couldn't fit before the merge
if (do_add) {
dst_ix -= WarpSize;
val_buf_[dst_ix] = val;
idx_buf_[dst_ix] = idx;
}
}
_RAFT_DEVICE void done()
{
if (buf_len_ != 0) {
merge_buf_();
buf_len_ = 0;
}
__syncthreads();
}
private:
_RAFT_DEVICE _RAFT_FORCEINLINE void set_k_th_()
{
// NB on using srcLane: it's ok if it is outside the warp size / width;
// the modulo op will be done inside the __shfl_sync.
k_th_ = shfl(val_arr_[kMaxArrLen - 1], k - 1, kWarpWidth);
}
_RAFT_DEVICE _RAFT_FORCEINLINE void merge_buf_()
{
__syncwarp(); // make sure the threads are aware of the data written by others
T buf_val = val_buf_[laneId()];
IdxT buf_idx = idx_buf_[laneId()];
val_buf_[laneId()] = kDummy;
util::bitonic<1>(!Ascending, kWarpWidth).sort(buf_val, buf_idx);
this->merge_in<1>(&buf_val, &buf_idx);
set_k_th_(); // contains warp sync
}
using warp_sort<Capacity, Ascending, T, IdxT>::kMaxArrLen;
using warp_sort<Capacity, Ascending, T, IdxT>::val_arr_;
using warp_sort<Capacity, Ascending, T, IdxT>::idx_arr_;
T* val_buf_;
IdxT* idx_buf_;
uint32_t buf_len_; // 0 <= buf_len_ < WarpSize
T k_th_;
};
/**
* This version of warp_sort adds every input element into the intermediate sorting
* buffer, and thus does the sorting step every `Capacity` input elements.
*
* This implementation is preferred for very small len values.
*/
template <int Capacity, bool Ascending, typename T, typename IdxT>
class warp_sort_immediate : public warp_sort<Capacity, Ascending, T, IdxT> {
public:
using warp_sort<Capacity, Ascending, T, IdxT>::kDummy;
using warp_sort<Capacity, Ascending, T, IdxT>::kWarpWidth;
using warp_sort<Capacity, Ascending, T, IdxT>::k;
using warp_sort<Capacity, Ascending, T, IdxT>::mem_required;
explicit _RAFT_DEVICE warp_sort_immediate(int k)
: warp_sort<Capacity, Ascending, T, IdxT>(k), buf_len_(0)
{
#pragma unroll
for (int i = 0; i < kMaxArrLen; i++) {
val_buf_[i] = kDummy;
idx_buf_[i] = IdxT{};
}
}
_RAFT_DEVICE _RAFT_FORCEINLINE static auto init_blockwide(int k, uint8_t* = nullptr)
{
return warp_sort_immediate<Capacity, Ascending, T, IdxT>{k};
}
_RAFT_DEVICE void add(T val, IdxT idx)
{
// NB: the loop is used here to ensure the constant indexing,
// to not force the buffers spill into the local memory.
#pragma unroll
for (int i = 0; i < kMaxArrLen; ++i) {
if (i == buf_len_) {
val_buf_[i] = val;
idx_buf_[i] = idx;
}
}
++buf_len_;
if (buf_len_ == kMaxArrLen) {
util::bitonic<kMaxArrLen>(!Ascending, kWarpWidth).sort(val_buf_, idx_buf_);
this->merge_in<kMaxArrLen>(val_buf_, idx_buf_);
#pragma unroll
for (int i = 0; i < kMaxArrLen; i++) {
val_buf_[i] = kDummy;
}
buf_len_ = 0;
}
}
_RAFT_DEVICE void done()
{
if (buf_len_ != 0) {
util::bitonic<kMaxArrLen>(!Ascending, kWarpWidth).sort(val_buf_, idx_buf_);
this->merge_in<kMaxArrLen>(val_buf_, idx_buf_);
}
}
private:
using warp_sort<Capacity, Ascending, T, IdxT>::kMaxArrLen;
using warp_sort<Capacity, Ascending, T, IdxT>::val_arr_;
using warp_sort<Capacity, Ascending, T, IdxT>::idx_arr_;
T val_buf_[kMaxArrLen];
IdxT idx_buf_[kMaxArrLen];
int buf_len_;
};
template <typename T, typename IdxT>
auto calc_smem_size_for_block_wide(int num_of_warp, int k) -> int
{
return Pow2<256>::roundUp(ceildiv(num_of_warp, 2) * sizeof(T) * k) +
ceildiv(num_of_warp, 2) * sizeof(IdxT) * k;
}
template <template <int, bool, typename, typename> class WarpSortWarpWide,
int Capacity,
bool Ascending,
typename T,
typename IdxT>
class block_sort {
public:
using queue_t = WarpSortWarpWide<Capacity, Ascending, T, IdxT>;
template <typename... Args>
_RAFT_DEVICE block_sort(int k, Args... args) : queue_(queue_t::init_blockwide(k, args...))
{
}
_RAFT_DEVICE void add(T val, IdxT idx) { queue_.add(val, idx); }
/**
* At the point of calling this function, the warp-level queues consumed all input
* independently. The remaining work to be done is to merge them together.
*
* Here we tree-merge the results using the shared memory and block sync.
*/
_RAFT_DEVICE void done(uint8_t* smem_buf)
{
queue_.done();
int nwarps = subwarp_align::div(blockDim.x);
auto val_smem = reinterpret_cast<T*>(smem_buf);
auto idx_smem = reinterpret_cast<IdxT*>(
smem_buf + Pow2<256>::roundUp(ceildiv(nwarps, 2) * sizeof(T) * queue_.k));
const int warp_id = subwarp_align::div(threadIdx.x);
// NB: there is no need for the second __synchthreads between .load_sorted and .store:
// we shift the pointers every iteration, such that individual warps either access the same
// locations or do not overlap with any of the other warps. The access patterns within warps
// are different for the two functions, but .load_sorted implies warp sync at the end, so
// there is no need for __syncwarp either.
for (int shift_mask = ~0, split = (nwarps + 1) >> 1; nwarps > 1;
nwarps = split, split = (nwarps + 1) >> 1) {
if (warp_id < nwarps && warp_id >= split) {
int dst_warp_shift = (warp_id - (split & shift_mask)) * queue_.k;
queue_.store(val_smem + dst_warp_shift, idx_smem + dst_warp_shift);
}
__syncthreads();
shift_mask = ~shift_mask; // invert the mask
{
int src_warp_shift = (warp_id + (split & shift_mask)) * queue_.k;
// The last argument serves as a condition for loading
// -- to make sure threads within a full warp do not diverge on `bitonic::merge()`
queue_.load_sorted(
val_smem + src_warp_shift, idx_smem + src_warp_shift, warp_id < nwarps - split);
}
}
}
/** Save the content by the pointer location. */
template <typename OutT,
typename OutIdxT,
typename ValF = identity_op,
typename IdxF = identity_op>
_RAFT_DEVICE void store(OutT* out,
OutIdxT* out_idx,
ValF valF = raft::identity_op{},
IdxF idxF = raft::identity_op{}) const
{
if (threadIdx.x < subwarp_align::Value) { queue_.store(out, out_idx, valF, idxF); }
}
private:
using subwarp_align = Pow2<queue_t::kWarpWidth>;
queue_t queue_;
};
/**
* Uses the `WarpSortClass` to sort chunks of data within one block with no interblock
* communication. It can be arranged so, that multiple blocks process one row of input; in this
* case, they output multiple results of length k each. Then, a second pass is needed to merge
* those into one final output.
*/
template <template <int, bool, typename, typename> class WarpSortClass,
int Capacity,
bool Ascending,
typename T,
typename IdxT>
__launch_bounds__(256) RAFT_KERNEL
block_kernel(const T* in, const IdxT* in_idx, IdxT len, int k, T* out, IdxT* out_idx)
{
extern __shared__ __align__(256) uint8_t smem_buf_bytes[];
using bq_t = block_sort<WarpSortClass, Capacity, Ascending, T, IdxT>;
uint8_t* warp_smem = bq_t::queue_t::mem_required(blockDim.x) > 0 ? smem_buf_bytes : nullptr;
bq_t queue(k, warp_smem);
in += blockIdx.y * len;
if (in_idx != nullptr) { in_idx += blockIdx.y * len; }
const IdxT stride = gridDim.x * blockDim.x;
const IdxT per_thread_lim = len + laneId();
for (IdxT i = threadIdx.x + blockIdx.x * blockDim.x; i < per_thread_lim; i += stride) {
queue.add(i < len ? __ldcs(in + i) : WarpSortClass<Capacity, Ascending, T, IdxT>::kDummy,
(i < len && in_idx != nullptr) ? __ldcs(in_idx + i) : i);
}
queue.done(smem_buf_bytes);
const int block_id = blockIdx.x + gridDim.x * blockIdx.y;
queue.store(out + block_id * k, out_idx + block_id * k);
}
template <template <int, bool, typename, typename> class WarpSortClass,
typename T,
typename IdxT,
int Capacity = kMaxCapacity>
struct launch_setup {
/**
* @brief Calculate the best block size and minimum grid size for the given `k`.
*
* @param[in] k
* The select-top-k parameter
* @param[out] block_size
* Returned block size
* @param[out] min_grid_size
* Returned minimum grid size needed to achieve the best potential occupancy
* @param[in] block_size_limit
* Forcefully limit the block size (optional)
*/
static void calc_optimal_params(int k,
int* block_size,
int* min_grid_size,
int block_size_limit = 0)
{
const int capacity = bound_by_power_of_two(k);
if constexpr (Capacity > 1) {
if (capacity < Capacity) {
return launch_setup<WarpSortClass, T, IdxT, Capacity / 2>::calc_optimal_params(
capacity, block_size, min_grid_size, block_size_limit);
}
}
ASSERT(capacity <= Capacity, "Requested k is too big (%d)", k);
auto calc_smem = [k](int block_size) {
int num_of_warp = block_size / std::min<int>(WarpSize, Capacity);
return calc_smem_size_for_block_wide<T, IdxT>(num_of_warp, k);
};
RAFT_CUDA_TRY(cudaOccupancyMaxPotentialBlockSizeVariableSMem(
min_grid_size,
block_size,
block_kernel<WarpSortClass, Capacity, true, T, IdxT>,
calc_smem,
block_size_limit));
}
static void kernel(int k,
bool select_min,
size_t batch_size,
size_t len,
int num_blocks,
int block_dim,
int smem_size,
const T* in_key,
const IdxT* in_idx,
T* out_key,
IdxT* out_idx,
rmm::cuda_stream_view stream)
{
const int capacity = bound_by_power_of_two(k);
if constexpr (Capacity > 1) {
if (capacity < Capacity) {
return launch_setup<WarpSortClass, T, IdxT, Capacity / 2>::kernel(k,
select_min,
batch_size,
len,
num_blocks,
block_dim,
smem_size,
in_key,
in_idx,
out_key,
out_idx,
stream);
}
}
ASSERT(capacity <= Capacity, "Requested k is too big (%d)", k);
// This is less than cuda's max block dim along Y axis (65535), but it's a
// power-of-two, which ensures the alignment of batches in memory.
constexpr size_t kMaxGridDimY = 32768;
for (size_t offset = 0; offset < batch_size; offset += kMaxGridDimY) {
size_t batch_chunk = std::min<size_t>(kMaxGridDimY, batch_size - offset);
dim3 gs(num_blocks, batch_chunk, 1);
if (select_min) {
block_kernel<WarpSortClass, Capacity, true, T, IdxT>
<<<gs, block_dim, smem_size, stream>>>(in_key, in_idx, IdxT(len), k, out_key, out_idx);
} else {
block_kernel<WarpSortClass, Capacity, false, T, IdxT>
<<<gs, block_dim, smem_size, stream>>>(in_key, in_idx, IdxT(len), k, out_key, out_idx);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
out_key += batch_chunk * num_blocks * k;
out_idx += batch_chunk * num_blocks * k;
in_key += batch_chunk * len;
if (in_idx != nullptr) { in_idx += batch_chunk * len; }
}
}
};
template <template <int, bool, typename, typename> class WarpSortClass>
struct LaunchThreshold {};
template <>
struct LaunchThreshold<warp_sort_filtered> {
static constexpr int len_factor_for_multi_block = 2;
static constexpr int len_factor_for_single_block = 32;
};
template <>
struct LaunchThreshold<warp_sort_distributed> {
static constexpr int len_factor_for_multi_block = 2;
static constexpr int len_factor_for_single_block = 32;
};
template <>
struct LaunchThreshold<warp_sort_distributed_ext> {
static constexpr int len_factor_for_multi_block = 2;
static constexpr int len_factor_for_single_block = 32;
};
template <>
struct LaunchThreshold<warp_sort_immediate> {
static constexpr int len_factor_for_choosing = 4;
static constexpr int len_factor_for_multi_block = 2;
static constexpr int len_factor_for_single_block = 4;
};
template <template <int, bool, typename, typename> class WarpSortClass, typename T, typename IdxT>
void calc_launch_parameter(
size_t batch_size, size_t len, int k, int* p_num_of_block, int* p_num_of_warp)
{
const int capacity = bound_by_power_of_two(k);
const int capacity_per_full_warp = std::max(capacity, WarpSize);
int block_size = 0;
int min_grid_size = 0;
launch_setup<WarpSortClass, T, IdxT>::calc_optimal_params(k, &block_size, &min_grid_size);
block_size = Pow2<WarpSize>::roundDown(block_size);
int num_of_warp;
int num_of_block;
if (batch_size < size_t(min_grid_size)) { // may use multiple blocks
num_of_warp = block_size / WarpSize;
num_of_block = min_grid_size / int(batch_size);
int len_per_block = int(ceildiv<size_t>(len, num_of_block));
int len_per_warp = ceildiv(len_per_block, num_of_warp);
len_per_warp = Pow2<WarpSize>::roundUp(len_per_warp);
len_per_block = len_per_warp * num_of_warp;
num_of_block = int(ceildiv<size_t>(len, len_per_block));
constexpr int kLenFactor = LaunchThreshold<WarpSortClass>::len_factor_for_multi_block;
if (len_per_warp < capacity_per_full_warp * kLenFactor) {
len_per_warp = capacity_per_full_warp * kLenFactor;
len_per_block = num_of_warp * len_per_warp;
if (size_t(len_per_block) > len) { len_per_block = len; }
num_of_block = int(ceildiv<size_t>(len, len_per_block));
num_of_warp = ceildiv(len_per_block, len_per_warp);
}
} else { // use only single block
num_of_block = 1;
auto adjust_block_size = [len, capacity_per_full_warp](int bs) {
int warps_per_block = bs / WarpSize;
int len_per_warp = int(ceildiv<size_t>(len, warps_per_block));
len_per_warp = Pow2<WarpSize>::roundUp(len_per_warp);
warps_per_block = int(ceildiv<size_t>(len, len_per_warp));
constexpr int kLenFactor = LaunchThreshold<WarpSortClass>::len_factor_for_single_block;
if (len_per_warp < capacity_per_full_warp * kLenFactor) {
len_per_warp = capacity_per_full_warp * kLenFactor;
warps_per_block = int(ceildiv<size_t>(len, len_per_warp));
}
return warps_per_block * WarpSize;
};
// gradually reduce the block size while the batch size allows and the len is not big enough
// to occupy a single block well.
block_size = adjust_block_size(block_size);
do {
num_of_warp = block_size / WarpSize;
int another_block_size = 0;
int another_min_grid_size = 0;
launch_setup<WarpSortClass, T, IdxT>::calc_optimal_params(
k, &another_block_size, &another_min_grid_size, block_size);
another_block_size = adjust_block_size(another_block_size);
if (batch_size >= size_t(another_min_grid_size) // still have enough work
&& another_block_size < block_size // protect against an infinite loop
&& another_min_grid_size * another_block_size >
min_grid_size * block_size // improve occupancy
) {
block_size = another_block_size;
min_grid_size = another_min_grid_size;
} else {
break;
}
} while (block_size > WarpSize);
num_of_warp = std::max(1, num_of_warp);
}
*p_num_of_block = num_of_block;
*p_num_of_warp = num_of_warp * capacity_per_full_warp / capacity;
}
template <template <int, bool, typename, typename> class WarpSortClass, typename T, typename IdxT>
void select_k_(int num_of_block,
int num_of_warp,
const T* in,
const IdxT* in_idx,
size_t batch_size,
size_t len,
int k,
T* out,
IdxT* out_idx,
bool select_min,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = nullptr)
{
auto pool_guard = raft::get_pool_memory_resource(
mr, num_of_block * k * batch_size * 2 * std::max(sizeof(T), sizeof(IdxT)));
if (pool_guard) { RAFT_LOG_DEBUG("warpsort::select_k: using pool memory resource"); }
rmm::device_uvector<T> tmp_val(num_of_block * k * batch_size, stream, mr);
rmm::device_uvector<IdxT> tmp_idx(num_of_block * k * batch_size, stream, mr);
int capacity = bound_by_power_of_two(k);
int warp_width = std::min(capacity, WarpSize);
T* result_val = (num_of_block == 1) ? out : tmp_val.data();
IdxT* result_idx = (num_of_block == 1) ? out_idx : tmp_idx.data();
int block_dim = num_of_warp * warp_width;
int smem_size = calc_smem_size_for_block_wide<T, IdxT>(num_of_warp, k);
smem_size = std::max<int>(smem_size, WarpSortClass<1, true, T, IdxT>::mem_required(block_dim));
launch_setup<WarpSortClass, T, IdxT>::kernel(k,
select_min,
batch_size,
len,
num_of_block,
block_dim,
smem_size,
in,
in_idx,
result_val,
result_idx,
stream);
if (num_of_block > 1) {
// a second pass to merge the results if necessary
launch_setup<WarpSortClass, T, IdxT>::kernel(k,
select_min,
batch_size,
k * num_of_block,
1,
block_dim,
smem_size,
tmp_val.data(),
tmp_idx.data(),
out,
out_idx,
stream);
}
}
template <typename T, typename IdxT, template <int, bool, typename, typename> class WarpSortClass>
void select_k_impl(const T* in,
const IdxT* in_idx,
size_t batch_size,
size_t len,
int k,
T* out,
IdxT* out_idx,
bool select_min,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = nullptr)
{
int num_of_block = 0;
int num_of_warp = 0;
calc_launch_parameter<WarpSortClass, T, IdxT>(batch_size, len, k, &num_of_block, &num_of_warp);
select_k_<WarpSortClass, T, IdxT>(num_of_block,
num_of_warp,
in,
in_idx,
batch_size,
len,
k,
out,
out_idx,
select_min,
stream,
mr);
}
/**
* Select k smallest or largest key/values from each row in the input data.
*
* If you think of the input data `in_keys` as a row-major matrix with len columns and
* batch_size rows, then this function selects k smallest/largest values in each row and fills
* in the row-major matrix `out` of size (batch_size, k).
*
* @tparam T
* the type of the keys (what is being compared).
* @tparam IdxT
* the index type (what is being selected together with the keys).
*
* @param[in] in
* contiguous device array of inputs of size (len * batch_size);
* these are compared and selected.
* @param[in] in_idx
* contiguous device array of inputs of size (len * batch_size);
* typically, these are indices of the corresponding in_keys.
* @param batch_size
* number of input rows, i.e. the batch size.
* @param len
* length of a single input array (row); also sometimes referred as n_cols.
* Invariant: len >= k.
* @param k
* the number of outputs to select in each input row.
* @param[out] out
* contiguous device array of outputs of size (k * batch_size);
* the k smallest/largest values from each row of the `in_keys`.
* @param[out] out_idx
* contiguous device array of outputs of size (k * batch_size);
* the payload selected together with `out`.
* @param select_min
* whether to select k smallest (true) or largest (false) keys.
* @param stream
* @param mr an optional memory resource to use across the calls (you can provide a large enough
* memory pool here to avoid memory allocations within the call).
*/
template <typename T, typename IdxT>
void select_k(const T* in,
const IdxT* in_idx,
size_t batch_size,
size_t len,
int k,
T* out,
IdxT* out_idx,
bool select_min,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = nullptr)
{
ASSERT(k <= kMaxCapacity, "Current max k is %d (requested %d)", kMaxCapacity, k);
ASSERT(len <= size_t(std::numeric_limits<IdxT>::max()),
"The `len` (%zu) does not fit the indexing type",
len);
int capacity = bound_by_power_of_two(k);
int num_of_block = 0;
int num_of_warp = 0;
calc_launch_parameter<warp_sort_immediate, T, IdxT>(
batch_size, len, k, &num_of_block, &num_of_warp);
int len_per_thread = len / (num_of_block * num_of_warp * std::min(capacity, WarpSize));
if (len_per_thread <= LaunchThreshold<warp_sort_immediate>::len_factor_for_choosing) {
select_k_<warp_sort_immediate, T, IdxT>(num_of_block,
num_of_warp,
in,
in_idx,
batch_size,
len,
k,
out,
out_idx,
select_min,
stream,
mr);
} else {
calc_launch_parameter<warp_sort_filtered, T, IdxT>(
batch_size, len, k, &num_of_block, &num_of_warp);
select_k_<warp_sort_filtered, T, IdxT>(num_of_block,
num_of_warp,
in,
in_idx,
batch_size,
len,
k,
out,
out_idx,
select_min,
stream,
mr);
}
}
} // namespace raft::matrix::detail::select::warpsort
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/scatter_inplace.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/linalg/map.cuh>
#include <raft/util/cuda_dev_essentials.cuh>
#include <raft/util/fast_int_div.cuh>
#include <thrust/iterator/counting_iterator.h>
namespace raft {
namespace matrix {
namespace detail {
/**
* @brief In-place scatter elements in a row-major matrix according to a
* map. The length of the map is equal to the number of rows. The
* map specifies the destination index for each row, i.e. in the
* resulting matrix, row map[i] is assigned to row i. For example,
* the matrix [[1, 2, 3], [4, 5, 6], [7, 8, 9]] with the map [2, 0, 1] will
* be transformed to [[4, 5, 6], [7, 8, 9], [1, 2, 3]]. Batching is done on
* columns and an additional scratch space of shape n_rows * cols_batch_size
* is created. For each batch, chunks of columns from each row are copied
* into the appropriate location in the scratch space and copied back to
* the corresponding locations in the input matrix.
*
* @tparam InputIteratorT
* @tparam MapIteratorT
* @tparam IndexT
*
* @param[inout] handle raft handle
* @param[inout] inout input matrix (n_rows * n_cols)
* @param[inout] map map containing the destination index for each row (n_rows)
* @param[inout] batch_size column batch size
*/
template <typename MatrixT, typename IndexT>
void scatterInplaceImpl(
raft::resources const& handle,
raft::device_matrix_view<MatrixT, IndexT, raft::layout_c_contiguous> inout,
raft::device_vector_view<const IndexT, IndexT, raft::layout_c_contiguous> map,
IndexT batch_size)
{
IndexT m = inout.extent(0);
IndexT n = inout.extent(1);
IndexT map_length = map.extent(0);
// skip in case of 0 length input
if (map_length <= 0 || m <= 0 || n <= 0 || batch_size < 0) return;
RAFT_EXPECTS(map_length == m,
"Length of map should be equal to number of rows for inplace scatter");
RAFT_EXPECTS(batch_size >= 0, "batch size should be >= 0");
// re-assign batch_size for default case
if (batch_size == 0 || batch_size > n) batch_size = n;
auto exec_policy = resource::get_thrust_policy(handle);
IndexT n_batches = raft::ceildiv(n, batch_size);
auto scratch_space = raft::make_device_vector<MatrixT, IndexT>(handle, m * batch_size);
for (IndexT bid = 0; bid < n_batches; bid++) {
IndexT batch_offset = bid * batch_size;
IndexT cols_per_batch = min(batch_size, n - batch_offset);
auto copy_op = [inout = inout.data_handle(),
map = map.data_handle(),
batch_offset,
cols_per_batch = raft::util::FastIntDiv(cols_per_batch),
n] __device__(auto idx) {
IndexT row = idx / cols_per_batch;
IndexT col = idx % cols_per_batch;
return inout[row * n + batch_offset + col];
};
raft::linalg::map_offset(
handle,
raft::make_device_vector_view(scratch_space.data_handle(), m * cols_per_batch),
copy_op);
auto scatter_op = [inout = inout.data_handle(),
map = map.data_handle(),
scratch_space = scratch_space.data_handle(),
batch_offset,
cols_per_batch = raft::util::FastIntDiv(cols_per_batch),
n] __device__(auto idx) {
IndexT row = idx / cols_per_batch;
IndexT col = idx % cols_per_batch;
IndexT map_val = map[row];
inout[map_val * n + batch_offset + col] = scratch_space[idx];
return;
};
auto counting = thrust::make_counting_iterator<IndexT>(0);
thrust::for_each(exec_policy, counting, counting + m * cols_per_batch, scatter_op);
}
}
template <typename MatrixT, typename IndexT>
void scatter(raft::resources const& handle,
raft::device_matrix_view<MatrixT, IndexT, raft::layout_c_contiguous> inout,
raft::device_vector_view<const IndexT, IndexT, raft::layout_c_contiguous> map,
IndexT batch_size)
{
scatterInplaceImpl(handle, inout, map, batch_size);
}
} // end namespace detail
} // end namespace matrix
} // end namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/math.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resources.hpp>
#include <cub/cub.cuh>
#include <raft/core/operators.hpp>
#include <raft/linalg/binary_op.cuh>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace matrix {
namespace detail {
template <typename math_t>
void power(math_t* in, math_t* out, math_t scalar, int len, cudaStream_t stream)
{
auto d_src = in;
auto d_dest = out;
raft::linalg::binaryOp(
d_dest,
d_src,
d_src,
len,
[=] __device__(math_t a, math_t b) { return scalar * a * b; },
stream);
}
template <typename math_t>
void power(math_t* inout, math_t scalar, int len, cudaStream_t stream)
{
power(inout, inout, scalar, len, stream);
}
template <typename math_t>
void power(math_t* inout, int len, cudaStream_t stream)
{
math_t scalar = 1.0;
power(inout, scalar, len, stream);
}
template <typename math_t>
void power(math_t* in, math_t* out, int len, cudaStream_t stream)
{
math_t scalar = 1.0;
power(in, out, scalar, len, stream);
}
template <typename math_t, typename IdxType = int>
void seqRoot(math_t* in,
math_t* out,
math_t scalar,
IdxType len,
cudaStream_t stream,
bool set_neg_zero = false)
{
auto d_src = in;
auto d_dest = out;
raft::linalg::unaryOp(
d_dest,
d_src,
len,
[=] __device__(math_t a) {
if (set_neg_zero) {
if (a < math_t(0)) {
return math_t(0);
} else {
return raft::sqrt(a * scalar);
}
} else {
return raft::sqrt(a * scalar);
}
},
stream);
}
template <typename math_t, typename IdxType = int>
void seqRoot(
math_t* inout, math_t scalar, IdxType len, cudaStream_t stream, bool set_neg_zero = false)
{
seqRoot(inout, inout, scalar, len, stream, set_neg_zero);
}
template <typename math_t, typename IdxType = int>
void seqRoot(math_t* in, math_t* out, IdxType len, cudaStream_t stream)
{
math_t scalar = 1.0;
seqRoot(in, out, scalar, len, stream);
}
template <typename math_t, typename IdxType = int>
void seqRoot(math_t* inout, IdxType len, cudaStream_t stream)
{
math_t scalar = 1.0;
seqRoot(inout, inout, scalar, len, stream);
}
template <typename math_t, typename IdxType = int>
void setSmallValuesZero(
math_t* out, const math_t* in, IdxType len, cudaStream_t stream, math_t thres = 1e-15)
{
raft::linalg::unaryOp(
out,
in,
len,
[=] __device__(math_t a) {
if (a <= thres && -a <= thres) {
return math_t(0);
} else {
return a;
}
},
stream);
}
template <typename math_t, typename IdxType = int>
void setSmallValuesZero(math_t* inout, IdxType len, cudaStream_t stream, math_t thres = 1e-15)
{
setSmallValuesZero(inout, inout, len, stream, thres);
}
template <typename math_t, typename IdxType = int>
void reciprocal(const math_t* in,
math_t* out,
math_t scalar,
int len,
cudaStream_t stream,
bool setzero = false,
math_t thres = 1e-15)
{
auto d_src = in;
auto d_dest = out;
raft::linalg::unaryOp(
d_dest,
d_src,
len,
[=] __device__(math_t a) { return setzero && (abs(a) <= thres) ? math_t{0} : scalar / a; },
stream);
}
template <typename math_t, typename IdxType = int>
void reciprocal(math_t* inout,
math_t scalar,
IdxType len,
cudaStream_t stream,
bool setzero = false,
math_t thres = 1e-15)
{
reciprocal(inout, inout, scalar, len, stream, setzero, thres);
}
template <typename math_t, typename IdxType = int>
void reciprocal(math_t* inout, IdxType len, cudaStream_t stream)
{
math_t scalar = 1.0;
reciprocal(inout, scalar, len, stream);
}
template <typename math_t, typename IdxType = int>
void reciprocal(math_t* in, math_t* out, IdxType len, cudaStream_t stream)
{
math_t scalar = 1.0;
reciprocal(in, out, scalar, len, stream);
}
template <typename math_t>
void setValue(math_t* out, const math_t* in, math_t scalar, int len, cudaStream_t stream = 0)
{
raft::linalg::unaryOp(out, in, len, raft::const_op(scalar), stream);
}
template <typename math_t, typename IdxType = int>
void ratio(
raft::resources const& handle, math_t* src, math_t* dest, IdxType len, cudaStream_t stream)
{
auto d_src = src;
auto d_dest = dest;
rmm::device_scalar<math_t> d_sum(stream);
auto* d_sum_ptr = d_sum.data();
raft::linalg::mapThenSumReduce(d_sum_ptr, len, raft::identity_op{}, stream, src);
raft::linalg::unaryOp(
d_dest, d_src, len, [=] __device__(math_t a) { return a / (*d_sum_ptr); }, stream);
}
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryMult(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
data, data, vec, n_col, n_row, rowMajor, bcastAlongRows, raft::mul_op(), stream);
}
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryMultSkipZero(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
data,
data,
vec,
n_col,
n_row,
rowMajor,
bcastAlongRows,
[] __device__(Type a, Type b) {
if (b == Type(0))
return a;
else
return a * b;
},
stream);
}
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryDiv(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
data, data, vec, n_col, n_row, rowMajor, bcastAlongRows, raft::div_op(), stream);
}
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryDivSkipZero(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream,
bool return_zero = false)
{
if (return_zero) {
raft::linalg::matrixVectorOp(
data,
data,
vec,
n_col,
n_row,
rowMajor,
bcastAlongRows,
[] __device__(Type a, Type b) {
if (raft::abs(b) < Type(1e-10))
return Type(0);
else
return a / b;
},
stream);
} else {
raft::linalg::matrixVectorOp(
data,
data,
vec,
n_col,
n_row,
rowMajor,
bcastAlongRows,
[] __device__(Type a, Type b) {
if (raft::abs(b) < Type(1e-10))
return a;
else
return a / b;
},
stream);
}
}
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinaryAdd(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
data, data, vec, n_col, n_row, rowMajor, bcastAlongRows, raft::add_op(), stream);
}
template <typename Type, typename IdxType = int, int TPB = 256>
void matrixVectorBinarySub(Type* data,
const Type* vec,
IdxType n_row,
IdxType n_col,
bool rowMajor,
bool bcastAlongRows,
cudaStream_t stream)
{
raft::linalg::matrixVectorOp(
data, data, vec, n_col, n_row, rowMajor, bcastAlongRows, raft::sub_op(), stream);
}
// Computes an argmin/argmax column-wise in a DxN matrix
template <typename RedOp, int TPB, typename T, typename OutT, typename IdxT>
RAFT_KERNEL argReduceKernel(const T* d_in, IdxT D, IdxT N, OutT* out)
{
typedef cub::
BlockReduce<cub::KeyValuePair<IdxT, T>, TPB, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY>
BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
using KVP = cub::KeyValuePair<IdxT, T>;
IdxT rowStart = static_cast<IdxT>(blockIdx.x) * D;
KVP thread_data(0, std::is_same_v<RedOp, cub::ArgMax> ? -raft::myInf<T>() : raft::myInf<T>());
for (IdxT i = threadIdx.x; i < D; i += TPB) {
IdxT idx = rowStart + i;
thread_data = RedOp()(thread_data, KVP(i, d_in[idx]));
}
auto maxKV = BlockReduce(temp_storage).Reduce(thread_data, RedOp());
if (threadIdx.x == 0) { out[blockIdx.x] = maxKV.key; }
}
/**
* @brief Computes an argmin/argmax coalesced reduction
*
* @tparam RedOp Reduction operation (cub::ArgMin or cub::ArgMax)
* @tparam math_t Value type
* @tparam out_t Output key type
* @tparam idx_t Matrix index type
* @param[in] in Input matrix (DxN column-major or NxD row-major)
* @param[in] D Dimension of the axis to reduce along
* @param[in] N Number of reductions
* @param[out] out Output keys (N)
* @param[in] stream CUDA stream
*/
template <typename RedOp, typename math_t, typename out_t, typename idx_t>
inline void argReduce(const math_t* in, idx_t D, idx_t N, out_t* out, cudaStream_t stream)
{
if (D <= 32) {
argReduceKernel<RedOp, 32><<<N, 32, 0, stream>>>(in, D, N, out);
} else if (D <= 64) {
argReduceKernel<RedOp, 64><<<N, 64, 0, stream>>>(in, D, N, out);
} else if (D <= 128) {
argReduceKernel<RedOp, 128><<<N, 128, 0, stream>>>(in, D, N, out);
} else {
argReduceKernel<RedOp, 256><<<N, 256, 0, stream>>>(in, D, N, out);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename math_t, typename out_t, typename idx_t>
void argmin(const math_t* in, idx_t D, idx_t N, out_t* out, cudaStream_t stream)
{
argReduce<cub::ArgMin>(in, D, N, out, stream);
}
template <typename math_t, typename out_t, typename idx_t>
void argmax(const math_t* in, idx_t D, idx_t N, out_t* out, cudaStream_t stream)
{
argReduce<cub::ArgMax>(in, D, N, out, stream);
}
// Utility kernel needed for signFlip.
// Computes the argmax(abs(d_in)) column-wise in a DxN matrix followed by
// flipping the sign if the |max| value for each column is negative.
template <typename T, int TPB>
RAFT_KERNEL signFlipKernel(T* d_in, int D, int N)
{
typedef cub::BlockReduce<cub::KeyValuePair<int, T>, TPB> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
// compute maxIndex=argMax (with abs()) index for column
using KVP = cub::KeyValuePair<int, T>;
int rowStart = blockIdx.x * D;
KVP thread_data(0, 0);
for (int i = threadIdx.x; i < D; i += TPB) {
int idx = rowStart + i;
thread_data = cub::ArgMax()(thread_data, KVP(idx, abs(d_in[idx])));
}
auto maxKV = BlockReduce(temp_storage).Reduce(thread_data, cub::ArgMax());
// flip column sign if d_in[maxIndex] < 0
__shared__ bool need_sign_flip;
if (threadIdx.x == 0) { need_sign_flip = d_in[maxKV.key] < T(0); }
__syncthreads();
if (need_sign_flip) {
for (int i = threadIdx.x; i < D; i += TPB) {
int idx = rowStart + i;
d_in[idx] = -d_in[idx];
}
}
}
template <typename math_t>
void signFlip(math_t* inout, int n_rows, int n_cols, cudaStream_t stream)
{
int D = n_rows;
int N = n_cols;
auto data = inout;
if (D <= 32) {
signFlipKernel<math_t, 32><<<N, 32, 0, stream>>>(data, D, N);
} else if (D <= 64) {
signFlipKernel<math_t, 64><<<N, 64, 0, stream>>>(data, D, N);
} else if (D <= 128) {
signFlipKernel<math_t, 128><<<N, 128, 0, stream>>>(data, D, N);
} else {
signFlipKernel<math_t, 256><<<N, 256, 0, stream>>>(data, D, N);
}
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // end namespace detail
} // end namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/gather_inplace.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdarray.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/linalg/map.cuh>
#include <raft/util/fast_int_div.cuh>
#include <thrust/iterator/counting_iterator.h>
namespace raft {
namespace matrix {
namespace detail {
template <typename MatrixT, typename MapT, typename MapTransformOp, typename IndexT>
void gatherInplaceImpl(raft::resources const& handle,
raft::device_matrix_view<MatrixT, IndexT, raft::layout_c_contiguous> inout,
raft::device_vector_view<const MapT, IndexT, raft::layout_c_contiguous> map,
MapTransformOp transform_op,
IndexT batch_size)
{
IndexT m = inout.extent(0);
IndexT n = inout.extent(1);
IndexT map_length = map.extent(0);
// skip in case of 0 length input
if (map_length <= 0 || m <= 0 || n <= 0 || batch_size < 0) return;
RAFT_EXPECTS(map_length <= m, "Length of map should be <= number of rows for inplace gather");
RAFT_EXPECTS(batch_size >= 0, "batch size should be >= 0");
// re-assign batch_size for default case
if (batch_size == 0 || batch_size > n) batch_size = n;
auto exec_policy = resource::get_thrust_policy(handle);
IndexT n_batches = raft::ceildiv(n, batch_size);
auto scratch_space = raft::make_device_vector<MatrixT, IndexT>(handle, map_length * batch_size);
for (IndexT bid = 0; bid < n_batches; bid++) {
IndexT batch_offset = bid * batch_size;
IndexT cols_per_batch = min(batch_size, n - batch_offset);
auto gather_op = [inout = inout.data_handle(),
map = map.data_handle(),
transform_op,
batch_offset,
map_length,
cols_per_batch = raft::util::FastIntDiv(cols_per_batch),
n] __device__(auto idx) {
IndexT row = idx / cols_per_batch;
IndexT col = idx % cols_per_batch;
MapT map_val = map[row];
IndexT i_src = transform_op(map_val);
return inout[i_src * n + batch_offset + col];
};
raft::linalg::map_offset(
handle,
raft::make_device_vector_view(scratch_space.data_handle(), map_length * cols_per_batch),
gather_op);
auto copy_op = [inout = inout.data_handle(),
map = map.data_handle(),
scratch_space = scratch_space.data_handle(),
batch_offset,
map_length,
cols_per_batch = raft::util::FastIntDiv(cols_per_batch),
n] __device__(auto idx) {
IndexT row = idx / cols_per_batch;
IndexT col = idx % cols_per_batch;
inout[row * n + batch_offset + col] = scratch_space[idx];
return;
};
auto counting = thrust::make_counting_iterator<IndexT>(0);
thrust::for_each(exec_policy, counting, counting + map_length * cols_per_batch, copy_op);
}
}
template <typename MatrixT, typename MapT, typename MapTransformOp, typename IndexT>
void gather(raft::resources const& handle,
raft::device_matrix_view<MatrixT, IndexT, raft::layout_c_contiguous> inout,
raft::device_vector_view<const MapT, IndexT, raft::layout_c_contiguous> map,
MapTransformOp transform_op,
IndexT batch_size)
{
gatherInplaceImpl(handle, inout, map, transform_op, batch_size);
}
template <typename MatrixT, typename MapT, typename IndexT>
void gather(raft::resources const& handle,
raft::device_matrix_view<MatrixT, IndexT, raft::layout_c_contiguous> inout,
raft::device_vector_view<const MapT, IndexT, raft::layout_c_contiguous> map,
IndexT batch_size)
{
gatherInplaceImpl(handle, inout, map, raft::identity_op(), batch_size);
}
} // namespace detail
} // namespace matrix
} // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/select_k-inl.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "select_radix.cuh"
#include "select_warpsort.cuh"
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/nvtx.hpp>
#include <raft/matrix/init.cuh>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/neighbors/detail/selection_faiss.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <thrust/scan.h>
namespace raft::matrix::detail {
// this is a subset of algorithms, chosen by running the algorithm_selection
// notebook in cpp/scripts/heuristics/select_k
enum class Algo { kRadix11bits, kWarpDistributedShm, kWarpImmediate, kRadix11bitsExtraPass };
/**
* Predict the fastest select_k algorithm based on the number of rows/cols/k
*
* The body of this method is automatically generated, using a DecisionTree
* to predict the fastest algorithm based off of thousands of trial runs
* on different values of rows/cols/k. The decision tree is converted to c++
* code, which is cut and paste below.
*
* NOTE: The code to generate is in cpp/scripts/heuristics/select_k, running the
* 'generate_heuristic' notebook there will replace the body of this function
* with the latest learned heuristic
*/
inline Algo choose_select_k_algorithm(size_t rows, size_t cols, int k)
{
if (k > 256) {
if (cols > 16862) {
if (rows > 1020) {
return Algo::kRadix11bitsExtraPass;
} else {
return Algo::kRadix11bits;
}
} else {
return Algo::kRadix11bitsExtraPass;
}
} else {
if (k > 2) {
if (cols > 22061) {
return Algo::kWarpDistributedShm;
} else {
if (rows > 198) {
return Algo::kWarpDistributedShm;
} else {
return Algo::kWarpImmediate;
}
}
} else {
return Algo::kWarpImmediate;
}
}
}
/**
* Performs a segmented sorting of a keys array with respect to
* the segments of a values array.
* @tparam KeyT
* @tparam ValT
* @param handle
* @param values
* @param keys
* @param n_segments
* @param k
* @param select_min
*/
template <typename KeyT, typename ValT>
void segmented_sort_by_key(raft::resources const& handle,
KeyT* keys,
ValT* values,
size_t n_segments,
size_t n_elements,
const ValT* offsets,
bool asc)
{
auto stream = raft::resource::get_cuda_stream(handle);
auto out_inds = raft::make_device_vector<ValT, ValT>(handle, n_elements);
auto out_dists = raft::make_device_vector<KeyT, ValT>(handle, n_elements);
// Determine temporary device storage requirements
auto d_temp_storage = raft::make_device_vector<char, int>(handle, 0);
size_t temp_storage_bytes = 0;
if (asc) {
cub::DeviceSegmentedRadixSort::SortPairs((void*)d_temp_storage.data_handle(),
temp_storage_bytes,
keys,
out_dists.data_handle(),
values,
out_inds.data_handle(),
n_elements,
n_segments,
offsets,
offsets + 1,
0,
sizeof(ValT) * 8,
stream);
} else {
cub::DeviceSegmentedRadixSort::SortPairsDescending((void*)d_temp_storage.data_handle(),
temp_storage_bytes,
keys,
out_dists.data_handle(),
values,
out_inds.data_handle(),
n_elements,
n_segments,
offsets,
offsets + 1,
0,
sizeof(ValT) * 8,
stream);
}
d_temp_storage = raft::make_device_vector<char, int>(handle, temp_storage_bytes);
if (asc) {
// Run sorting operation
cub::DeviceSegmentedRadixSort::SortPairs((void*)d_temp_storage.data_handle(),
temp_storage_bytes,
keys,
out_dists.data_handle(),
values,
out_inds.data_handle(),
n_elements,
n_segments,
offsets,
offsets + 1,
0,
sizeof(ValT) * 8,
stream);
} else {
// Run sorting operation
cub::DeviceSegmentedRadixSort::SortPairsDescending((void*)d_temp_storage.data_handle(),
temp_storage_bytes,
keys,
out_dists.data_handle(),
values,
out_inds.data_handle(),
n_elements,
n_segments,
offsets,
offsets + 1,
0,
sizeof(ValT) * 8,
stream);
}
raft::copy(values, out_inds.data_handle(), out_inds.size(), stream);
raft::copy(keys, out_dists.data_handle(), out_dists.size(), stream);
}
template <typename KeyT, typename ValT>
void segmented_sort_by_key(raft::resources const& handle,
raft::device_vector_view<const ValT, ValT> offsets,
raft::device_vector_view<KeyT, ValT> keys,
raft::device_vector_view<ValT, ValT> values,
bool asc)
{
RAFT_EXPECTS(keys.size() == values.size(),
"Keys and values must contain the same number of elements.");
segmented_sort_by_key<KeyT, ValT>(handle,
keys.data_handle(),
values.data_handle(),
offsets.size() - 1,
keys.size(),
offsets.data_handle(),
asc);
}
/**
* Select k smallest or largest key/values from each row in the input data.
*
* If you think of the input data `in_val` as a row-major matrix with `len` columns and
* `batch_size` rows, then this function selects `k` smallest/largest values in each row and fills
* in the row-major matrix `out_val` of size (batch_size, k).
*
* @tparam T
* the type of the keys (what is being compared).
* @tparam IdxT
* the index type (what is being selected together with the keys).
*
* @param[in] in_val
* contiguous device array of inputs of size (len * batch_size);
* these are compared and selected.
* @param[in] in_idx
* contiguous device array of inputs of size (len * batch_size);
* typically, these are indices of the corresponding in_val.
* @param batch_size
* number of input rows, i.e. the batch size.
* @param len
* length of a single input array (row); also sometimes referred as n_cols.
* Invariant: len >= k.
* @param k
* the number of outputs to select in each input row.
* @param[out] out_val
* contiguous device array of outputs of size (k * batch_size);
* the k smallest/largest values from each row of the `in_val`.
* @param[out] out_idx
* contiguous device array of outputs of size (k * batch_size);
* the payload selected together with `out_val`.
* @param select_min
* whether to select k smallest (true) or largest (false) keys.
* @param stream
* @param mr an optional memory resource to use across the calls (you can provide a large enough
* memory pool here to avoid memory allocations within the call).
*/
template <typename T, typename IdxT>
void select_k(raft::resources const& handle,
const T* in_val,
const IdxT* in_idx,
size_t batch_size,
size_t len,
int k,
T* out_val,
IdxT* out_idx,
bool select_min,
rmm::mr::device_memory_resource* mr = nullptr,
bool sorted = false)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"matrix::select_k(batch_size = %zu, len = %zu, k = %d)", batch_size, len, k);
auto stream = raft::resource::get_cuda_stream(handle);
auto algo = choose_select_k_algorithm(batch_size, len, k);
switch (algo) {
case Algo::kRadix11bits:
case Algo::kRadix11bitsExtraPass: {
bool fused_last_filter = algo == Algo::kRadix11bits;
detail::select::radix::select_k<T, IdxT, 11, 512>(in_val,
in_idx,
batch_size,
len,
k,
out_val,
out_idx,
select_min,
fused_last_filter,
stream,
mr);
if (sorted) {
auto offsets = raft::make_device_vector<IdxT, IdxT>(handle, (IdxT)(batch_size + 1));
raft::matrix::fill(handle, offsets.view(), (IdxT)k);
thrust::exclusive_scan(raft::resource::get_thrust_policy(handle),
offsets.data_handle(),
offsets.data_handle() + offsets.size(),
offsets.data_handle(),
0);
auto keys = raft::make_device_vector_view<T, IdxT>(out_val, (IdxT)(batch_size * k));
auto vals = raft::make_device_vector_view<IdxT, IdxT>(out_idx, (IdxT)(batch_size * k));
segmented_sort_by_key<T, IdxT>(
handle, raft::make_const_mdspan(offsets.view()), keys, vals, select_min);
}
return;
}
case Algo::kWarpDistributedShm:
return detail::select::warpsort::
select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_distributed_ext>(
in_val, in_idx, batch_size, len, k, out_val, out_idx, select_min, stream, mr);
case Algo::kWarpImmediate:
return detail::select::warpsort::
select_k_impl<T, IdxT, detail::select::warpsort::warp_sort_immediate>(
in_val, in_idx, batch_size, len, k, out_val, out_idx, select_min, stream, mr);
default: RAFT_FAIL("K-selection Algorithm not supported.");
}
}
} // namespace raft::matrix::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/print.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cache_util.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <cstddef>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft::matrix::detail {
template <typename m_t, typename idx_t = int>
void printHost(
const m_t* in, idx_t n_rows, idx_t n_cols, char h_separator = ' ', char v_separator = '\n', )
{
for (idx_t i = 0; i < n_rows; i++) {
for (idx_t j = 0; j < n_cols; j++) {
printf("%1.4f%c", in[j * n_rows + i], j < n_cols - 1 ? h_separator : v_separator);
}
}
}
} // end namespace raft::matrix::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix | rapidsai_public_repos/raft/cpp/include/raft/matrix/detail/matrix.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/util/cache_util.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/exec_policy.hpp>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <algorithm>
#include <cstddef>
#include <cuda_runtime.h>
#include <cusolverDn.h>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace matrix {
namespace detail {
template <typename m_t, typename idx_array_t = int, typename idx_t = size_t>
void copyRows(const m_t* in,
idx_t n_rows,
idx_t n_cols,
m_t* out,
const idx_array_t* indices,
idx_t n_rows_indices,
cudaStream_t stream,
bool rowMajor = false)
{
if (rowMajor) {
const idx_t TPB = 256;
cache::get_vecs<<<raft::ceildiv(n_rows_indices * n_cols, TPB), TPB, 0, stream>>>(
in, n_cols, indices, n_rows_indices, out);
RAFT_CUDA_TRY(cudaPeekAtLastError());
return;
}
idx_t size = n_rows_indices * n_cols;
auto counting = thrust::make_counting_iterator<idx_t>(0);
thrust::for_each(rmm::exec_policy(stream), counting, counting + size, [=] __device__(idx_t idx) {
idx_t row = idx % n_rows_indices;
idx_t col = idx / n_rows_indices;
out[col * n_rows_indices + row] = in[col * n_rows + indices[row]];
});
}
template <typename m_t, typename idx_t = int>
void truncZeroOrigin(
const m_t* in, idx_t in_n_rows, m_t* out, idx_t out_n_rows, idx_t out_n_cols, cudaStream_t stream)
{
auto m = out_n_rows;
auto k = in_n_rows;
idx_t size = out_n_rows * out_n_cols;
auto d_q = in;
auto d_q_trunc = out;
auto counting = thrust::make_counting_iterator<idx_t>(0);
thrust::for_each(rmm::exec_policy(stream), counting, counting + size, [=] __device__(idx_t idx) {
idx_t row = idx % m;
idx_t col = idx / m;
d_q_trunc[col * m + row] = d_q[col * k + row];
});
}
template <typename m_t, typename idx_t = int>
void colReverse(m_t* inout, idx_t n_rows, idx_t n_cols, cudaStream_t stream)
{
auto n = n_cols;
auto m = n_rows;
idx_t size = n_rows * n_cols;
auto d_q = inout;
auto d_q_reversed = inout;
auto counting = thrust::make_counting_iterator<idx_t>(0);
thrust::for_each(
rmm::exec_policy(stream), counting, counting + (size / 2), [=] __device__(idx_t idx) {
idx_t dest_row = idx % m;
idx_t dest_col = idx / m;
idx_t src_row = dest_row;
idx_t src_col = (n - dest_col) - 1;
m_t temp = (m_t)d_q_reversed[idx];
d_q_reversed[idx] = d_q[src_col * m + src_row];
d_q[src_col * m + src_row] = temp;
});
}
template <typename m_t, typename idx_t = int>
void rowReverse(m_t* inout, idx_t n_rows, idx_t n_cols, cudaStream_t stream)
{
auto m = n_rows;
idx_t size = n_rows * n_cols;
auto d_q = inout;
auto d_q_reversed = inout;
auto counting = thrust::make_counting_iterator<idx_t>(0);
thrust::for_each(
rmm::exec_policy(stream), counting, counting + (size / 2), [=] __device__(idx_t idx) {
idx_t dest_row = idx % (m / 2);
idx_t dest_col = idx / (m / 2);
idx_t src_row = (m - dest_row) - 1;
idx_t src_col = dest_col;
m_t temp = (m_t)d_q_reversed[dest_col * m + dest_row];
d_q_reversed[dest_col * m + dest_row] = d_q[src_col * m + src_row];
d_q[src_col * m + src_row] = temp;
});
}
template <typename m_t, typename idx_t = int>
void print(const m_t* in,
idx_t n_rows,
idx_t n_cols,
char h_separator = ' ',
char v_separator = '\n',
cudaStream_t stream = rmm::cuda_stream_default)
{
std::vector<m_t> h_matrix = std::vector<m_t>(n_cols * n_rows);
raft::update_host(h_matrix.data(), in, n_cols * n_rows, stream);
for (idx_t i = 0; i < n_rows; i++) {
for (idx_t j = 0; j < n_cols; j++) {
printf("%1.4f%c", h_matrix[j * n_rows + i], j < n_cols - 1 ? h_separator : v_separator);
}
}
}
template <typename m_t, typename idx_t = int>
void printHost(const m_t* in, idx_t n_rows, idx_t n_cols)
{
for (idx_t i = 0; i < n_rows; i++) {
for (idx_t j = 0; j < n_cols; j++) {
printf("%1.4f ", in[j * n_rows + i]);
}
printf("\n");
}
}
/**
* @brief Kernel for copying a slice of a big matrix to a small matrix with a
* size matches that slice
* @param src_d: input matrix
* @param m: number of rows of input matrix
* @param n: number of columns of input matrix
* @param dst_d: output matrix
* @param x1, y1: coordinate of the top-left point of the wanted area (0-based)
* @param x2, y2: coordinate of the bottom-right point of the wanted area
* (1-based)
*/
template <typename m_t, typename idx_t = int>
RAFT_KERNEL slice(const m_t* src_d, idx_t lda, m_t* dst_d, idx_t x1, idx_t y1, idx_t x2, idx_t y2)
{
idx_t idx = threadIdx.x + blockDim.x * blockIdx.x;
idx_t dm = x2 - x1, dn = y2 - y1;
if (idx < dm * dn) {
idx_t i = idx % dm, j = idx / dm;
idx_t is = i + x1, js = j + y1;
dst_d[idx] = src_d[is + js * lda];
}
}
template <typename m_t, typename idx_t = int>
void sliceMatrix(const m_t* in,
idx_t n_rows,
idx_t n_cols,
m_t* out,
idx_t x1,
idx_t y1,
idx_t x2,
idx_t y2,
bool row_major,
cudaStream_t stream)
{
auto lda = row_major ? n_cols : n_rows;
dim3 block(64);
dim3 grid(((x2 - x1) * (y2 - y1) + block.x - 1) / block.x);
if (row_major)
slice<<<grid, block, 0, stream>>>(in, lda, out, y1, x1, y2, x2);
else
slice<<<grid, block, 0, stream>>>(in, lda, out, x1, y1, x2, y2);
}
/**
* @brief Kernel for copying the upper triangular part of a matrix to another
* @param src: input matrix with a size of mxn
* @param dst: output matrix with a size of kxk
* @param n_rows: number of rows of input matrix
* @param n_cols: number of columns of input matrix
* @param k: min(n_rows, n_cols)
*/
template <typename m_t, typename idx_t = int>
RAFT_KERNEL getUpperTriangular(const m_t* src, m_t* dst, idx_t n_rows, idx_t n_cols, idx_t k)
{
idx_t idx = threadIdx.x + blockDim.x * blockIdx.x;
idx_t m = n_rows, n = n_cols;
if (idx < m * n) {
idx_t i = idx % m, j = idx / m;
if (i < k && j < k && j >= i) { dst[i + j * k] = src[idx]; }
}
}
template <typename m_t, typename idx_t = int>
void copyUpperTriangular(const m_t* src, m_t* dst, idx_t n_rows, idx_t n_cols, cudaStream_t stream)
{
idx_t m = n_rows, n = n_cols;
idx_t k = std::min(m, n);
dim3 block(64);
dim3 grid((m * n + block.x - 1) / block.x);
getUpperTriangular<<<grid, block, 0, stream>>>(src, dst, m, n, k);
}
/**
* @brief Copy a vector to the diagonal of a matrix
* @param vec: vector of length k = min(n_rows, n_cols)
* @param matrix: matrix of size n_rows x n_cols (leading dimension = lda)
* @param lda: leading dimension of the matrix
* @param k: dimensionality
*/
template <typename m_t, typename idx_t = int>
RAFT_KERNEL copyVectorToMatrixDiagonal(const m_t* vec, m_t* matrix, idx_t lda, idx_t k)
{
idx_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < k) { matrix[idx + idx * lda] = vec[idx]; }
}
/**
* @brief Copy matrix diagonal to vector
* @param vec: vector of length k = min(n_rows, n_cols)
* @param matrix: matrix of size n_rows x n_cols (leading dimension = lda)
* @param lda: leading dimension of the matrix
* @param k: dimensionality
*/
template <typename m_t, typename idx_t = int>
RAFT_KERNEL copyVectorFromMatrixDiagonal(m_t* vec, const m_t* matrix, idx_t lda, idx_t k)
{
idx_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < k) { vec[idx] = matrix[idx + idx * lda]; }
}
template <typename m_t, typename idx_t = int>
void initializeDiagonalMatrix(
const m_t* vec, m_t* matrix, idx_t n_rows, idx_t n_cols, bool row_major, cudaStream_t stream)
{
idx_t k = std::min(n_rows, n_cols);
idx_t lda = row_major ? n_cols : n_rows;
dim3 block(64);
dim3 grid((k + block.x - 1) / block.x);
copyVectorToMatrixDiagonal<<<grid, block, 0, stream>>>(vec, matrix, lda, k);
}
template <typename m_t, typename idx_t = int>
void getDiagonalMatrix(
m_t* vec, const m_t* matrix, idx_t n_rows, idx_t n_cols, bool row_major, cudaStream_t stream)
{
idx_t k = std::min(n_rows, n_cols);
idx_t lda = row_major ? n_cols : n_rows;
dim3 block(64);
dim3 grid((k + block.x - 1) / block.x);
copyVectorFromMatrixDiagonal<<<grid, block, 0, stream>>>(vec, matrix, lda, k);
}
/**
* @brief Calculate the inverse of the diagonal of a square matrix
* element-wise and in place
* @param in: square input matrix with size len x len
* @param len: size of one side of the matrix
*/
template <typename m_t, typename idx_t = int>
RAFT_KERNEL matrixDiagonalInverse(m_t* in, idx_t len)
{
idx_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < len) { in[idx + idx * len] = 1.0 / in[idx + idx * len]; }
}
template <typename m_t, typename idx_t = int>
void getDiagonalInverseMatrix(m_t* in, idx_t len, cudaStream_t stream)
{
dim3 block(64);
dim3 grid((len + block.x - 1) / block.x);
matrixDiagonalInverse<m_t><<<grid, block, 0, stream>>>(in, len);
}
template <typename m_t, typename idx_t = int>
m_t getL2Norm(raft::resources const& handle, const m_t* in, idx_t size, cudaStream_t stream)
{
cublasHandle_t cublasH = resource::get_cublas_handle(handle);
m_t normval = 0;
RAFT_EXPECTS(
std::is_integral_v<idx_t> && (std::size_t)size <= (std::size_t)std::numeric_limits<int>::max(),
"Index type not supported");
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublasnrm2(cublasH, static_cast<int>(size), in, 1, &normval, stream));
return normval;
}
} // end namespace detail
} // end namespace matrix
} // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/matrix/specializations | rapidsai_public_repos/raft/cpp/include/raft/matrix/specializations/detail/select_k.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/common/seive.hpp | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use lap.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft/util version instead.")
#include <raft/util/seive.hpp>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/common/cub_wrappers.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use lanczos.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please note that there is no equivalent in RAFT's public API"
" so this file will eventually be removed altogether.")
#include <raft/util/detail/cub_wrappers.cuh>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/common/logger.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.08.
* Please use the include/core/logger.hpp instead.
*/
#pragma once
#include <raft/core/logger.hpp> | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/common/device_loads_stores.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use lap.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft/util version instead.")
#include <raft/util/device_loads_stores.cuh>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/common/scatter.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.06.
* Please use the cuh version instead.
*/
/**
* DISCLAIMER: this file is deprecated: use lap.cuh instead
*/
#pragma once
#pragma message(__FILE__ \
" is deprecated and will be removed in a future release." \
" Please use the raft/matrix version instead.")
#include <raft/util/scatter.cuh>
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/common/nvtx.hpp | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file is deprecated and will be removed in release 22.08.
* Please use the include/core/nvtx.hpp instead.
*/
#pragma once
#include <raft/core/nvtx.hpp> | 0 |
rapidsai_public_repos/raft/cpp/include/raft/common | rapidsai_public_repos/raft/cpp/include/raft/common/detail/scatter.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/util/cuda_utils.cuh>
#include <raft/util/vectorized.cuh>
namespace raft::detail {
template <typename DataT, int VecLen, typename Lambda, typename IdxT>
RAFT_KERNEL scatterKernel(DataT* out, const DataT* in, const IdxT* idx, IdxT len, Lambda op)
{
typedef TxN_t<DataT, VecLen> DataVec;
typedef TxN_t<IdxT, VecLen> IdxVec;
IdxT tid = threadIdx.x + ((IdxT)blockIdx.x * blockDim.x);
tid *= VecLen;
if (tid >= len) return;
IdxVec idxIn;
idxIn.load(idx, tid);
DataVec dataIn;
#pragma unroll
for (int i = 0; i < VecLen; ++i) {
auto inPos = idxIn.val.data[i];
dataIn.val.data[i] = op(in[inPos], tid + i);
}
dataIn.store(out, tid);
}
template <typename DataT, int VecLen, typename Lambda, typename IdxT, int TPB>
void scatterImpl(
DataT* out, const DataT* in, const IdxT* idx, IdxT len, Lambda op, cudaStream_t stream)
{
const IdxT nblks = raft::ceildiv(VecLen ? len / VecLen : len, (IdxT)TPB);
scatterKernel<DataT, VecLen, Lambda, IdxT><<<nblks, TPB, 0, stream>>>(out, in, idx, len, op);
RAFT_CUDA_TRY(cudaGetLastError());
}
} // namespace raft::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/cluster/single_linkage_types.hpp | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/device_mdspan.hpp>
namespace raft::cluster::hierarchy {
/**
* Determines the method for computing the minimum spanning tree (MST)
*/
enum LinkageDistance {
/**
* Use a pairwise distance matrix as input to the mst. This
* is very fast and the best option for fairly small datasets (~50k data points)
*/
PAIRWISE = 0,
/**
* Construct a KNN graph as input to the mst and provide additional
* edges if the mst does not converge. This is slower but scales
* to very large datasets.
*/
KNN_GRAPH = 1
};
}; // end namespace raft::cluster::hierarchy
// The code below is now considered legacy
namespace raft::cluster {
using hierarchy::LinkageDistance;
/**
* Simple container object for consolidating linkage results. This closely
* mirrors the trained instance variables populated in
* Scikit-learn's AgglomerativeClustering estimator.
* @tparam value_idx
* @tparam value_t
*/
template <typename idx_t>
class linkage_output {
public:
idx_t m;
idx_t n_clusters;
idx_t n_leaves;
idx_t n_connected_components;
// TODO: These will be made private in a future release
idx_t* labels; // size: m
idx_t* children; // size: (m-1, 2)
raft::device_vector_view<idx_t> get_labels()
{
return raft::make_device_vector_view<idx_t>(labels, m);
}
raft::device_matrix_view<idx_t> get_children()
{
return raft::make_device_matrix_view<idx_t>(children, m - 1, 2);
}
};
class linkage_output_int : public linkage_output<int> {};
class linkage_output_int64 : public linkage_output<int64_t> {};
}; // namespace raft::cluster
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/cluster/kmeans_balanced_types.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/cluster/kmeans_types.hpp>
#include <raft/core/logger.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/random/rng_state.hpp>
namespace raft::cluster::kmeans_balanced {
/**
* Simple object to specify hyper-parameters to the balanced k-means algorithm.
*
* The following metrics are currently supported in k-means balanced:
* - InnerProduct
* - L2Expanded
* - L2SqrtExpanded
*/
struct kmeans_balanced_params : kmeans_base_params {
/**
* Number of training iterations
*/
uint32_t n_iters = 20;
};
} // namespace raft::cluster::kmeans_balanced
namespace raft::cluster {
using kmeans_balanced::kmeans_balanced_params;
} // namespace raft::cluster
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/cluster/kmeans_deprecated.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/cluster/detail/kmeans_deprecated.cuh>
namespace raft {
namespace cluster {
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param tol Tolerance for convergence. k-means stops when the
* change in residual divided by n is less than tol.
* @param maxiter Maximum number of k-means iterations.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param residual On exit, residual sum of squares (sum of squares
* of distances between observation vectors and centroids).
* @param iters on exit, number of k-means iterations.
* @param seed random seed to be used.
* @return error flag
*/
template <typename index_type_t, typename value_type_t>
int kmeans(raft::resources const& handle,
index_type_t n,
index_type_t d,
index_type_t k,
value_type_t tol,
index_type_t maxiter,
const value_type_t* __restrict__ obs,
index_type_t* __restrict__ codes,
value_type_t& residual,
index_type_t& iters,
unsigned long long seed = 123456)
{
return detail::kmeans<index_type_t, value_type_t>(
handle, n, d, k, tol, maxiter, obs, codes, residual, iters, seed);
}
} // namespace cluster
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/cluster/kmeans_types.hpp | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/logger.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/random/rng_state.hpp>
namespace raft::cluster {
/** Base structure for parameters that are common to all k-means algorithms */
struct kmeans_base_params {
/**
* Metric to use for distance computation. The supported metrics can vary per algorithm.
*/
raft::distance::DistanceType metric = raft::distance::DistanceType::L2Expanded;
};
} // namespace raft::cluster
namespace raft::cluster::kmeans {
/**
* Simple object to specify hyper-parameters to the kmeans algorithm.
*/
struct KMeansParams : kmeans_base_params {
enum InitMethod {
/**
* Sample the centroids using the kmeans++ strategy
*/
KMeansPlusPlus,
/**
* Sample the centroids uniformly at random
*/
Random,
/**
* User provides the array of initial centroids
*/
Array
};
/**
* The number of clusters to form as well as the number of centroids to generate (default:8).
*/
int n_clusters = 8;
/**
* Method for initialization, defaults to k-means++:
* - InitMethod::KMeansPlusPlus (k-means++): Use scalable k-means++ algorithm
* to select the initial cluster centers.
* - InitMethod::Random (random): Choose 'n_clusters' observations (rows) at
* random from the input data for the initial centroids.
* - InitMethod::Array (ndarray): Use 'centroids' as initial cluster centers.
*/
InitMethod init = KMeansPlusPlus;
/**
* Maximum number of iterations of the k-means algorithm for a single run.
*/
int max_iter = 300;
/**
* Relative tolerance with regards to inertia to declare convergence.
*/
double tol = 1e-4;
/**
* verbosity level.
*/
int verbosity = RAFT_LEVEL_INFO;
/**
* Seed to the random number generator.
*/
raft::random::RngState rng_state{0};
/**
* Number of instance k-means algorithm will be run with different seeds.
*/
int n_init = 1;
/**
* Oversampling factor for use in the k-means|| algorithm
*/
double oversampling_factor = 2.0;
// batch_samples and batch_centroids are used to tile 1NN computation which is
// useful to optimize/control the memory footprint
// Default tile is [batch_samples x n_clusters] i.e. when batch_centroids is 0
// then don't tile the centroids
int batch_samples = 1 << 15;
/**
* if 0 then batch_centroids = n_clusters
*/
int batch_centroids = 0; //
bool inertia_check = false;
};
} // namespace raft::cluster::kmeans
namespace raft::cluster {
using kmeans::KMeansParams;
} // namespace raft::cluster
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/cluster/kmeans.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <optional>
#include <raft/cluster/detail/kmeans.cuh>
#include <raft/cluster/detail/kmeans_auto_find_k.cuh>
#include <raft/cluster/kmeans_types.hpp>
#include <raft/core/kvp.hpp>
#include <raft/core/mdarray.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resource/cuda_stream.hpp>
namespace raft::cluster::kmeans {
/**
* Functor used for sampling centroids
*/
template <typename DataT, typename IndexT>
using SamplingOp = detail::SamplingOp<DataT, IndexT>;
/**
* Functor used to extract the index from a KeyValue pair
* storing both index and a distance.
*/
template <typename IndexT, typename DataT>
using KeyValueIndexOp = detail::KeyValueIndexOp<IndexT, DataT>;
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/cluster/kmeans.cuh>
* #include <raft/cluster/kmeans_types.hpp>
* using namespace raft::cluster;
* ...
* raft::raft::resources handle;
* raft::cluster::KMeansParams params;
* int n_features = 15, inertia, n_iter;
* auto centroids = raft::make_device_matrix<float, int>(handle, params.n_clusters, n_features);
*
* kmeans::fit(handle,
* params,
* X,
* std::nullopt,
* centroids,
* raft::make_scalar_view(&inertia),
* raft::make_scalar_view(&n_iter));
* @endcode
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers.
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT>
void fit(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
detail::kmeans_fit<DataT, IndexT>(handle, params, X, sample_weight, centroids, inertia, n_iter);
}
/**
* @brief Predict the closest cluster each sample in X belongs to.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/cluster/kmeans.cuh>
* #include <raft/cluster/kmeans_types.hpp>
* using namespace raft::cluster;
* ...
* raft::raft::resources handle;
* raft::cluster::KMeansParams params;
* int n_features = 15, inertia, n_iter;
* auto centroids = raft::make_device_matrix<float, int>(handle, params.n_clusters, n_features);
*
* kmeans::fit(handle,
* params,
* X,
* std::nullopt,
* centroids.view(),
* raft::make_scalar_view(&inertia),
* raft::make_scalar_view(&n_iter));
* ...
* auto labels = raft::make_device_vector<int, int>(handle, X.extent(0));
*
* kmeans::predict(handle,
* params,
* X,
* std::nullopt,
* centroids.view(),
* false,
* labels.view(),
* raft::make_scalar_view(&ineratia));
* @endcode
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X New data to predict.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[in] centroids Cluster centroids. The data must be in
* row-major format.
* [dim = n_clusters x n_features]
* @param[in] normalize_weight True if the weights should be normalized
* @param[out] labels Index of the cluster each sample in X
* belongs to.
* [len = n_samples]
* @param[out] inertia Sum of squared distances of samples to
* their closest cluster center.
*/
template <typename DataT, typename IndexT>
void predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
bool normalize_weight,
raft::host_scalar_view<DataT> inertia)
{
detail::kmeans_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, labels, normalize_weight, inertia);
}
/**
* @brief Compute k-means clustering and predicts cluster index for each sample
* in the input.
*
* @code{.cpp}
* #include <raft/core/resources.hpp>
* #include <raft/cluster/kmeans.cuh>
* #include <raft/cluster/kmeans_types.hpp>
* using namespace raft::cluster;
* ...
* raft::raft::resources handle;
* raft::cluster::KMeansParams params;
* int n_features = 15, inertia, n_iter;
* auto centroids = raft::make_device_matrix<float, int>(handle, params.n_clusters, n_features);
* auto labels = raft::make_device_vector<int, int>(handle, X.extent(0));
*
* kmeans::fit_predict(handle,
* params,
* X,
* std::nullopt,
* centroids.view(),
* labels.view(),
* raft::make_scalar_view(&inertia),
* raft::make_scalar_view(&n_iter));
* @endcode
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must be
* in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids Optional
* [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] labels Index of the cluster each sample in X belongs
* to.
* [len = n_samples]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT>
void fit_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
std::optional<raft::device_matrix_view<DataT, IndexT>> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
detail::kmeans_fit_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, labels, inertia, n_iter);
}
/**
* @brief Transform X to a cluster-distance space.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Cluster centroids. The data must be in row-major format.
* [dim = n_clusters x n_features]
* @param[out] X_new X transformed in the new space.
* [dim = n_samples x n_features]
*/
template <typename DataT, typename IndexT>
void transform(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_matrix_view<DataT, IndexT> X_new)
{
detail::kmeans_transform<DataT, IndexT>(handle, params, X, centroids, X_new);
}
template <typename DataT, typename IndexT>
void transform(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT* X_new)
{
detail::kmeans_transform<DataT, IndexT>(
handle, params, X, centroids, n_samples, n_features, X_new);
}
/**
* Automatically find the optimal value of k using a binary search.
* This method maximizes the Calinski-Harabasz Index while minimizing the per-cluster inertia.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <raft/cluster/kmeans.cuh>
* #include <raft/cluster/kmeans_types.hpp>
*
* #include <raft/random/make_blobs.cuh>
*
* using namespace raft::cluster;
*
* raft::handle_t handle;
* int n_samples = 100, n_features = 15, n_clusters = 10;
* auto X = raft::make_device_matrix<float, int>(handle, n_samples, n_features);
* auto labels = raft::make_device_vector<float, int>(handle, n_samples);
*
* raft::random::make_blobs(handle, X, labels, n_clusters);
*
* auto best_k = raft::make_host_scalar<int>(0);
* auto n_iter = raft::make_host_scalar<int>(0);
* auto inertia = raft::make_host_scalar<int>(0);
*
* kmeans::find_k(handle, X, best_k.view(), inertia.view(), n_iter.view(), n_clusters+1);
*
* @endcode
*
* @tparam idx_t indexing type (should be integral)
* @tparam value_t value type (should be floating point)
* @param handle raft handle
* @param X input observations (shape n_samples, n_dims)
* @param best_k best k found from binary search
* @param inertia inertia of best k found
* @param n_iter number of iterations used to find best k
* @param kmax maximum k to try in search
* @param kmin minimum k to try in search (should be >= 1)
* @param maxiter maximum number of iterations to run
* @param tol tolerance for early stopping convergence
*/
template <typename idx_t, typename value_t>
void find_k(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t> X,
raft::host_scalar_view<idx_t> best_k,
raft::host_scalar_view<value_t> inertia,
raft::host_scalar_view<idx_t> n_iter,
idx_t kmax,
idx_t kmin = 1,
idx_t maxiter = 100,
value_t tol = 1e-3)
{
detail::find_k(handle, X, best_k, inertia, n_iter, kmax, kmin, maxiter, tol);
}
/**
* @brief Select centroids according to a sampling operation
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] isSampleCentroid Flag the sample chosen as initial centroid
* [dim = n_samples]
* @param[in] select_op The sampling operation used to select the centroids
* @param[out] inRankCp The sampled centroids
* [dim = n_selected_centroids x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void sample_centroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<std::uint8_t, IndexT> isSampleCentroid,
SamplingOp<DataT, IndexT>& select_op,
rmm::device_uvector<DataT>& inRankCp,
rmm::device_uvector<char>& workspace)
{
detail::sampleCentroids<DataT, IndexT>(
handle, X, minClusterDistance, isSampleCentroid, select_op, inRankCp, workspace);
}
/**
* @brief Compute cluster cost
*
* @tparam DataT the type of data used for weights, distances.
* @tparam ReductionOpT the type of data used for the reduction operation.
*
* @param[in] handle The raft handle
* @param[in] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] workspace Temporary workspace buffer which can get resized
* @param[out] clusterCost Resulting cluster cost
* @param[in] reduction_op The reduction operation used for the cost
*
*/
template <typename DataT, typename IndexT, typename ReductionOpT>
void cluster_cost(raft::resources const& handle,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
rmm::device_uvector<char>& workspace,
raft::device_scalar_view<DataT> clusterCost,
ReductionOpT reduction_op)
{
detail::computeClusterCost(
handle, minClusterDistance, workspace, clusterCost, raft::identity_op{}, reduction_op);
}
/**
* @brief Update centroids given current centroids and number of points assigned to each centroid.
* This function also produces a vector of RAFT key/value pairs containing the cluster assignment
* for each point and its distance.
*
* @tparam DataT
* @tparam IndexT
* @param[in] handle: Raft handle to use for managing library resources
* @param[in] X: input matrix (size n_samples, n_features)
* @param[in] sample_weights: number of samples currently assigned to each centroid (size n_samples)
* @param[in] centroids: matrix of current centroids (size n_clusters, n_features)
* @param[in] labels: Iterator of labels (can also be a raw pointer)
* @param[out] weight_per_cluster: sum of sample weights per cluster (size n_clusters)
* @param[out] new_centroids: output matrix of updated centroids (size n_clusters, n_features)
*/
template <typename DataT, typename IndexT, typename LabelsIterator>
void update_centroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT, row_major> X,
raft::device_vector_view<const DataT, IndexT> sample_weights,
raft::device_matrix_view<const DataT, IndexT, row_major> centroids,
LabelsIterator labels,
raft::device_vector_view<DataT, IndexT> weight_per_cluster,
raft::device_matrix_view<DataT, IndexT, row_major> new_centroids)
{
// TODO: Passing these into the algorithm doesn't really present much of a benefit
// because they are being resized anyways.
// ref https://github.com/rapidsai/raft/issues/930
rmm::device_uvector<char> workspace(0, resource::get_cuda_stream(handle));
detail::update_centroids<DataT, IndexT>(
handle, X, sample_weights, centroids, labels, weight_per_cluster, new_centroids, workspace);
}
/**
* @brief Compute distance for every sample to it's nearest centroid
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[out] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[out] L2NormBuf_OR_DistBuf Resizable buffer to store L2 norm of centroids or distance
* matrix
* @param[in] metric Distance metric to use
* @param[in] batch_samples batch size for input data samples
* @param[in] batch_centroids batch size for input centroids
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void min_cluster_distance(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
raft::distance::DistanceType metric,
int batch_samples,
int batch_centroids,
rmm::device_uvector<char>& workspace)
{
detail::minClusterDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterDistance,
L2NormX,
L2NormBuf_OR_DistBuf,
metric,
batch_samples,
batch_centroids,
workspace);
}
/**
* @brief Calculates a <key, value> pair for every sample in input 'X' where key is an
* index of one of the 'centroids' (index of the nearest centroid) and 'value'
* is the distance between the sample and the 'centroid[key]'
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[out] minClusterAndDistance Distance vector that contains for every sample, the nearest
* centroid and it's distance
* [dim = n_samples]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[out] L2NormBuf_OR_DistBuf Resizable buffer to store L2 norm of centroids or distance
* matrix
* @param[in] metric distance metric
* @param[in] batch_samples batch size of data samples
* @param[in] batch_centroids batch size of centroids
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void min_cluster_and_distance(
raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<raft::KeyValuePair<IndexT, DataT>, IndexT> minClusterAndDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
raft::distance::DistanceType metric,
int batch_samples,
int batch_centroids,
rmm::device_uvector<char>& workspace)
{
detail::minClusterAndDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance,
L2NormX,
L2NormBuf_OR_DistBuf,
metric,
batch_samples,
batch_centroids,
workspace);
}
/**
* @brief Shuffle and randomly select 'n_samples_to_gather' from input 'in' and stores
* in 'out' does not modify the input
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] in The data to shuffle and gather
* [dim = n_samples x n_features]
* @param[out] out The sampled data
* [dim = n_samples_to_gather x n_features]
* @param[in] n_samples_to_gather Number of sample to gather
* @param[in] seed Seed for the shuffle
*
*/
template <typename DataT, typename IndexT>
void shuffle_and_gather(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> in,
raft::device_matrix_view<DataT, IndexT> out,
uint32_t n_samples_to_gather,
uint64_t seed)
{
detail::shuffleAndGather<DataT, IndexT>(handle, in, out, n_samples_to_gather, seed);
}
/**
* @brief Count the number of samples in each cluster
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
* @param[out] sampleCountInCluster The count for each centroid
* [dim = n_cluster]
*
*/
template <typename DataT, typename IndexT>
void count_samples_in_cluster(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> L2NormX,
raft::device_matrix_view<DataT, IndexT> centroids,
rmm::device_uvector<char>& workspace,
raft::device_vector_view<DataT, IndexT> sampleCountInCluster)
{
detail::countSamplesInCluster<DataT, IndexT>(
handle, params, X, L2NormX, centroids, workspace, sampleCountInCluster);
}
/**
* @brief Selects 'n_clusters' samples from the input X using kmeans++ algorithm.
*
* @see "k-means++: the advantages of careful seeding". 2007, Arthur, D. and Vassilvitskii, S.
* ACM-SIAM symposium on Discrete algorithms.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[out] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
*/
template <typename DataT, typename IndexT>
void init_plus_plus(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids,
rmm::device_uvector<char>& workspace)
{
detail::kmeansPlusPlus<DataT, IndexT>(handle, params, X, centroids, workspace);
}
/*
* @brief Main function used to fit KMeans (after cluster initialization)
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids [in] Initial cluster centers.
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
* @param[in] workspace Temporary workspace buffer which can get resized
*/
template <typename DataT, typename IndexT>
void fit_main(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const DataT, IndexT> sample_weights,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter,
rmm::device_uvector<char>& workspace)
{
detail::kmeans_fit_main<DataT, IndexT>(
handle, params, X, sample_weights, centroids, inertia, n_iter, workspace);
}
}; // end namespace raft::cluster::kmeans
namespace raft::cluster {
/**
* Note: All of the functions below in raft::cluster are deprecated and will
* be removed in a future release. Please use raft::cluster::kmeans instead.
*/
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers.
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT = int>
void kmeans_fit(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
kmeans::fit<DataT, IndexT>(handle, params, X, sample_weight, centroids, inertia, n_iter);
}
template <typename DataT, typename IndexT = int>
void kmeans_fit(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT& inertia,
IndexT& n_iter)
{
kmeans::fit<DataT, IndexT>(
handle, params, X, sample_weight, centroids, n_samples, n_features, inertia, n_iter);
}
/**
* @brief Predict the closest cluster each sample in X belongs to.
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X New data to predict.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[in] centroids Cluster centroids. The data must be in
* row-major format.
* [dim = n_clusters x n_features]
* @param[in] normalize_weight True if the weights should be normalized
* @param[out] labels Index of the cluster each sample in X
* belongs to.
* [len = n_samples]
* @param[out] inertia Sum of squared distances of samples to
* their closest cluster center.
*/
template <typename DataT, typename IndexT = int>
void kmeans_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
bool normalize_weight,
raft::host_scalar_view<DataT> inertia)
{
kmeans::predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, labels, normalize_weight, inertia);
}
template <typename DataT, typename IndexT = int>
void kmeans_predict(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
IndexT* labels,
bool normalize_weight,
DataT& inertia)
{
kmeans::predict<DataT, IndexT>(handle,
params,
X,
sample_weight,
centroids,
n_samples,
n_features,
labels,
normalize_weight,
inertia);
}
/**
* @brief Compute k-means clustering and predicts cluster index for each sample
* in the input.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must be
* in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Optional weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids Optional
* [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] labels Index of the cluster each sample in X belongs
* to.
* [len = n_samples]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT = int>
void kmeans_fit_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
std::optional<raft::device_matrix_view<DataT, IndexT>> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
kmeans::fit_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, labels, inertia, n_iter);
}
template <typename DataT, typename IndexT = int>
void kmeans_fit_predict(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
DataT* centroids,
IndexT n_samples,
IndexT n_features,
IndexT* labels,
DataT& inertia,
IndexT& n_iter)
{
kmeans::fit_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids, n_samples, n_features, labels, inertia, n_iter);
}
/**
* @brief Transform X to a cluster-distance space.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Cluster centroids. The data must be in row-major format.
* [dim = n_clusters x n_features]
* @param[out] X_new X transformed in the new space.
* [dim = n_samples x n_features]
*/
template <typename DataT, typename IndexT = int>
void kmeans_transform(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_matrix_view<DataT, IndexT> X_new)
{
kmeans::transform<DataT, IndexT>(handle, params, X, centroids, X_new);
}
template <typename DataT, typename IndexT = int>
void kmeans_transform(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT* X_new)
{
kmeans::transform<DataT, IndexT>(handle, params, X, centroids, n_samples, n_features, X_new);
}
template <typename DataT, typename IndexT>
using SamplingOp = kmeans::SamplingOp<DataT, IndexT>;
template <typename IndexT, typename DataT>
using KeyValueIndexOp = kmeans::KeyValueIndexOp<IndexT, DataT>;
/**
* @brief Select centroids according to a sampling operation
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] isSampleCentroid Flag the sample chosen as initial centroid
* [dim = n_samples]
* @param[in] select_op The sampling operation used to select the centroids
* @param[out] inRankCp The sampled centroids
* [dim = n_selected_centroids x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void sampleCentroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<std::uint8_t, IndexT> isSampleCentroid,
SamplingOp<DataT, IndexT>& select_op,
rmm::device_uvector<DataT>& inRankCp,
rmm::device_uvector<char>& workspace)
{
kmeans::sample_centroids<DataT, IndexT>(
handle, X, minClusterDistance, isSampleCentroid, select_op, inRankCp, workspace);
}
/**
* @brief Compute cluster cost
*
* @tparam DataT the type of data used for weights, distances.
* @tparam ReductionOpT the type of data used for the reduction operation.
*
* @param[in] handle The raft handle
* @param[in] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] workspace Temporary workspace buffer which can get resized
* @param[out] clusterCost Resulting cluster cost
* @param[in] reduction_op The reduction operation used for the cost
*
*/
template <typename DataT, typename IndexT, typename ReductionOpT>
void computeClusterCost(raft::resources const& handle,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
rmm::device_uvector<char>& workspace,
raft::device_scalar_view<DataT> clusterCost,
ReductionOpT reduction_op)
{
kmeans::cluster_cost(handle, minClusterDistance, workspace, clusterCost, reduction_op);
}
/**
* @brief Compute distance for every sample to it's nearest centroid
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[out] minClusterDistance Distance for every sample to it's nearest centroid
* [dim = n_samples]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[out] L2NormBuf_OR_DistBuf Resizable buffer to store L2 norm of centroids or distance
* matrix
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void minClusterDistanceCompute(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
rmm::device_uvector<char>& workspace)
{
kmeans::min_cluster_distance<DataT, IndexT>(handle,
X,
centroids,
minClusterDistance,
L2NormX,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
}
/**
* @brief Calculates a <key, value> pair for every sample in input 'X' where key is an
* index of one of the 'centroids' (index of the nearest centroid) and 'value'
* is the distance between the sample and the 'centroid[key]'
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[out] minClusterAndDistance Distance vector that contains for every sample, the nearest
* centroid and it's distance
* [dim = n_samples]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[out] L2NormBuf_OR_DistBuf Resizable buffer to store L2 norm of centroids or distance
* matrix
* @param[in] workspace Temporary workspace buffer which can get resized
*
*/
template <typename DataT, typename IndexT>
void minClusterAndDistanceCompute(
raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<raft::KeyValuePair<IndexT, DataT>, IndexT> minClusterAndDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
rmm::device_uvector<char>& workspace)
{
kmeans::min_cluster_and_distance<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance,
L2NormX,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
}
/**
* @brief Shuffle and randomly select 'n_samples_to_gather' from input 'in' and stores
* in 'out' does not modify the input
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] in The data to shuffle and gather
* [dim = n_samples x n_features]
* @param[out] out The sampled data
* [dim = n_samples_to_gather x n_features]
* @param[in] n_samples_to_gather Number of sample to gather
* @param[in] seed Seed for the shuffle
*
*/
template <typename DataT, typename IndexT>
void shuffleAndGather(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> in,
raft::device_matrix_view<DataT, IndexT> out,
uint32_t n_samples_to_gather,
uint64_t seed)
{
kmeans::shuffle_and_gather<DataT, IndexT>(handle, in, out, n_samples_to_gather, seed);
}
/**
* @brief Count the number of samples in each cluster
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[in] L2NormX L2 norm of X : ||x||^2
* [dim = n_samples]
* @param[in] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
* @param[out] sampleCountInCluster The count for each centroid
* [dim = n_cluster]
*
*/
template <typename DataT, typename IndexT>
void countSamplesInCluster(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> L2NormX,
raft::device_matrix_view<DataT, IndexT> centroids,
rmm::device_uvector<char>& workspace,
raft::device_vector_view<DataT, IndexT> sampleCountInCluster)
{
kmeans::count_samples_in_cluster<DataT, IndexT>(
handle, params, X, L2NormX, centroids, workspace, sampleCountInCluster);
}
/*
* @brief Selects 'n_clusters' samples from the input X using kmeans++ algorithm.
* @note This is the algorithm described in
* "k-means++: the advantages of careful seeding". 2007, Arthur, D. and Vassilvitskii, S.
* ACM-SIAM symposium on Discrete algorithms.
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle
* @param[in] params The parameters for KMeans
* @param[in] X The data in row-major format
* [dim = n_samples x n_features]
* @param[out] centroids Centroids data
* [dim = n_cluster x n_features]
* @param[in] workspace Temporary workspace buffer which can get resized
*/
template <typename DataT, typename IndexT>
void kmeansPlusPlus(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
rmm::device_uvector<char>& workspace)
{
kmeans::init_plus_plus<DataT, IndexT>(handle, params, X, centroidsRawData, workspace);
}
/*
* @brief Main function used to fit KMeans (after cluster initialization)
*
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
*
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format.
* [dim = n_samples x n_features]
* @param[in] sample_weight Weights for each observation in X.
* [len = n_samples]
* @param[inout] centroids [in] Initial cluster centers.
* [out] The generated centroids from the
* kmeans algorithm are stored at the address
* pointed by 'centroids'.
* [dim = n_clusters x n_features]
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
* @param[in] workspace Temporary workspace buffer which can get resized
*/
template <typename DataT, typename IndexT>
void kmeans_fit_main(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const DataT, IndexT> weight,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter,
rmm::device_uvector<char>& workspace)
{
kmeans::fit_main<DataT, IndexT>(
handle, params, X, weight, centroidsRawData, inertia, n_iter, workspace);
}
}; // namespace raft::cluster
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/cluster/single_linkage.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/cluster/detail/single_linkage.cuh>
#include <raft/cluster/single_linkage_types.hpp>
#include <raft/core/device_mdspan.hpp>
namespace raft::cluster {
/**
* Note: All of the functions below in the raft::cluster namespace are deprecated
* and will be removed in a future release. Please use raft::cluster::hierarchy
* instead.
*/
/**
* Single-linkage clustering, capable of constructing a KNN graph to
* scale the algorithm beyond the n^2 memory consumption of implementations
* that use the fully-connected graph of pairwise distances by connecting
* a knn graph when k is not large enough to connect it.
* @tparam value_idx
* @tparam value_t
* @tparam dist_type method to use for constructing connectivities graph
* @param[in] handle raft handle
* @param[in] X dense input matrix in row-major layout
* @param[in] m number of rows in X
* @param[in] n number of columns in X
* @param[in] metric distance metrix to use when constructing connectivities graph
* @param[out] out struct containing output dendrogram and cluster assignments
* @param[in] c a constant used when constructing connectivities from knn graph. Allows the indirect
control
* of k. The algorithm will set `k = log(n) + c`
* @param[in] n_clusters number of clusters to assign data samples
*/
template <typename value_idx,
typename value_t,
LinkageDistance dist_type = LinkageDistance::KNN_GRAPH>
void single_linkage(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
raft::distance::DistanceType metric,
linkage_output<value_idx>* out,
int c,
size_t n_clusters)
{
detail::single_linkage<value_idx, value_t, dist_type>(
handle, X, m, n, metric, out, c, n_clusters);
}
}; // namespace raft::cluster
namespace raft::cluster::hierarchy {
constexpr int DEFAULT_CONST_C = 15;
/**
* Single-linkage clustering, capable of constructing a KNN graph to
* scale the algorithm beyond the n^2 memory consumption of implementations
* that use the fully-connected graph of pairwise distances by connecting
* a knn graph when k is not large enough to connect it.
* @tparam value_idx
* @tparam value_t
* @tparam dist_type method to use for constructing connectivities graph
* @param[in] handle raft handle
* @param[in] X dense input matrix in row-major layout
* @param[out] dendrogram output dendrogram (size [n_rows - 1] * 2)
* @param[out] labels output labels vector (size n_rows)
* @param[in] metric distance metrix to use when constructing connectivities graph
* @param[in] n_clusters number of clusters to assign data samples
* @param[in] c a constant used when constructing connectivities from knn graph. Allows the indirect
control of k. The algorithm will set `k = log(n) + c`
*/
template <typename value_t, typename idx_t, LinkageDistance dist_type = LinkageDistance::KNN_GRAPH>
void single_linkage(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t, row_major> X,
raft::device_matrix_view<idx_t, idx_t, row_major> dendrogram,
raft::device_vector_view<idx_t, idx_t> labels,
raft::distance::DistanceType metric,
size_t n_clusters,
std::optional<int> c = std::make_optional<int>(DEFAULT_CONST_C))
{
linkage_output<idx_t> out_arrs;
out_arrs.children = dendrogram.data_handle();
out_arrs.labels = labels.data_handle();
raft::cluster::single_linkage<idx_t, value_t, dist_type>(
handle,
X.data_handle(),
static_cast<std::size_t>(X.extent(0)),
static_cast<std::size_t>(X.extent(1)),
metric,
&out_arrs,
c.has_value() ? c.value() : DEFAULT_CONST_C,
n_clusters);
}
}; // namespace raft::cluster::hierarchy
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/cluster/specializations.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#pragma message( \
__FILE__ \
" is deprecated and will be removed." \
" Including specializations is not necessary any more." \
" For more information, see: https://docs.rapids.ai/api/raft/nightly/using_libraft.html")
| 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/cluster/kmeans_balanced.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/device_memory_resource.hpp>
#include <utility>
#include <raft/cluster/detail/kmeans_balanced.cuh>
#include <raft/core/mdarray.hpp>
#include <raft/util/cuda_utils.cuh>
namespace raft::cluster::kmeans_balanced {
/**
* @brief Find clusters of balanced sizes with a hierarchical k-means algorithm.
*
* This variant of the k-means algorithm first clusters the dataset in mesoclusters, then clusters
* the subsets associated to each mesocluster into fine clusters, and finally runs a few k-means
* iterations over the whole dataset and with all the centroids to obtain the final clusters.
*
* Each k-means iteration applies expectation-maximization-balancing:
* - Balancing: adjust centers for clusters that have a small number of entries. If the size of a
* cluster is below a threshold, the center is moved towards a bigger cluster.
* - Expectation: predict the labels (i.e find closest cluster centroid to each point)
* - Maximization: calculate optimal centroids (i.e find the center of gravity of each cluster)
*
* The number of mesoclusters is chosen by rounding the square root of the number of clusters. E.g
* for 512 clusters, we would have 23 mesoclusters. The number of fine clusters per mesocluster is
* chosen proportionally to the number of points in each mesocluster.
*
* This variant of k-means uses random initialization and a fixed number of iterations, though
* iterations can be repeated if the balancing step moved the centroids.
*
* Additionally, this algorithm supports quantized datasets in arbitrary types but the core part of
* the algorithm will work with a floating-point type, hence a conversion function can be provided
* to map the data type to the math type.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <raft/cluster/kmeans_balanced.cuh>
* #include <raft/cluster/kmeans_balanced_types.hpp>
* ...
* raft::handle_t handle;
* raft::cluster::kmeans_balanced_params params;
* auto centroids = raft::make_device_matrix<float, int>(handle, n_clusters, n_features);
* raft::cluster::kmeans_balanced::fit(handle, params, X, centroids.view());
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] params Structure containing the hyper-parameters
* @param[in] X Training instances to cluster. The data must be in row-major format.
* [dim = n_samples x n_features]
* @param[out] centroids The generated centroids [dim = n_clusters x n_features]
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the arithmetic
* datatype. If DataT == MathT, this must be the identity.
*/
template <typename DataT, typename MathT, typename IndexT, typename MappingOpT = raft::identity_op>
void fit(const raft::resources& handle,
kmeans_balanced_params const& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<MathT, IndexT> centroids,
MappingOpT mapping_op = raft::identity_op())
{
RAFT_EXPECTS(X.extent(1) == centroids.extent(1),
"Number of features in dataset and centroids are different");
RAFT_EXPECTS(static_cast<uint64_t>(X.extent(0)) * static_cast<uint64_t>(X.extent(1)) <=
static_cast<uint64_t>(std::numeric_limits<IndexT>::max()),
"The chosen index type cannot represent all indices for the given dataset");
RAFT_EXPECTS(centroids.extent(0) > IndexT{0} && centroids.extent(0) <= X.extent(0),
"The number of centroids must be strictly positive and cannot exceed the number of "
"points in the training dataset.");
detail::build_hierarchical(handle,
params,
X.extent(1),
X.data_handle(),
X.extent(0),
centroids.data_handle(),
centroids.extent(0),
mapping_op);
}
/**
* @brief Predict the closest cluster each sample in X belongs to.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <raft/cluster/kmeans_balanced.cuh>
* #include <raft/cluster/kmeans_balanced_types.hpp>
* ...
* raft::handle_t handle;
* raft::cluster::kmeans_balanced_params params;
* auto labels = raft::make_device_vector<float, int>(handle, n_rows);
* raft::cluster::kmeans_balanced::predict(handle, params, X, centroids, labels);
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam LabelT Type of the output labels.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] params Structure containing the hyper-parameters
* @param[in] X Dataset for which to infer the closest clusters.
* [dim = n_samples x n_features]
* @param[in] centroids The input centroids [dim = n_clusters x n_features]
* @param[out] labels The output labels [dim = n_samples]
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the arithmetic
* datatype. If DataT == MathT, this must be the identity.
*/
template <typename DataT,
typename MathT,
typename IndexT,
typename LabelT,
typename MappingOpT = raft::identity_op>
void predict(const raft::resources& handle,
kmeans_balanced_params const& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const MathT, IndexT> centroids,
raft::device_vector_view<LabelT, IndexT> labels,
MappingOpT mapping_op = raft::identity_op())
{
RAFT_EXPECTS(X.extent(0) == labels.extent(0),
"Number of rows in dataset and labels are different");
RAFT_EXPECTS(X.extent(1) == centroids.extent(1),
"Number of features in dataset and centroids are different");
RAFT_EXPECTS(static_cast<uint64_t>(X.extent(0)) * static_cast<uint64_t>(X.extent(1)) <=
static_cast<uint64_t>(std::numeric_limits<IndexT>::max()),
"The chosen index type cannot represent all indices for the given dataset");
RAFT_EXPECTS(static_cast<uint64_t>(centroids.extent(0)) <=
static_cast<uint64_t>(std::numeric_limits<LabelT>::max()),
"The chosen label type cannot represent all cluster labels");
detail::predict(handle,
params,
centroids.data_handle(),
centroids.extent(0),
X.extent(1),
X.data_handle(),
X.extent(0),
labels.data_handle(),
mapping_op);
}
/**
* @brief Compute hierarchical balanced k-means clustering and predict cluster index for each sample
* in the input.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <raft/cluster/kmeans_balanced.cuh>
* #include <raft/cluster/kmeans_balanced_types.hpp>
* ...
* raft::handle_t handle;
* raft::cluster::kmeans_balanced_params params;
* auto centroids = raft::make_device_matrix<float, int>(handle, n_clusters, n_features);
* auto labels = raft::make_device_vector<float, int>(handle, n_rows);
* raft::cluster::kmeans_balanced::fit_predict(
* handle, params, X, centroids.view(), labels.view());
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam LabelT Type of the output labels.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] params Structure containing the hyper-parameters
* @param[in] X Training instances to cluster. The data must be in row-major format.
* [dim = n_samples x n_features]
* @param[out] centroids The output centroids [dim = n_clusters x n_features]
* @param[out] labels The output labels [dim = n_samples]
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the arithmetic
* datatype. If DataT and MathT are the same, this must be the identity.
*/
template <typename DataT,
typename MathT,
typename IndexT,
typename LabelT,
typename MappingOpT = raft::identity_op>
void fit_predict(const raft::resources& handle,
kmeans_balanced_params const& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<MathT, IndexT> centroids,
raft::device_vector_view<LabelT, IndexT> labels,
MappingOpT mapping_op = raft::identity_op())
{
auto centroids_const = raft::make_device_matrix_view<const MathT, IndexT>(
centroids.data_handle(), centroids.extent(0), centroids.extent(1));
raft::cluster::kmeans_balanced::fit(handle, params, X, centroids, mapping_op);
raft::cluster::kmeans_balanced::predict(handle, params, X, centroids_const, labels, mapping_op);
}
namespace helpers {
/**
* @brief Randomly initialize centers and apply expectation-maximization-balancing iterations
*
* This is essentially the non-hierarchical balanced k-means algorithm which is used by the
* hierarchical algorithm once to build the mesoclusters and once per mesocluster to build the fine
* clusters.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <raft/cluster/kmeans_balanced.cuh>
* #include <raft/cluster/kmeans_balanced_types.hpp>
* ...
* raft::handle_t handle;
* raft::cluster::kmeans_balanced_params params;
* auto centroids = raft::make_device_matrix<float, int>(handle, n_clusters, n_features);
* auto labels = raft::make_device_vector<int, int>(handle, n_samples);
* auto sizes = raft::make_device_vector<int, int>(handle, n_clusters);
* raft::cluster::kmeans_balanced::build_clusters(
* handle, params, X, centroids.view(), labels.view(), sizes.view());
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam LabelT Type of the output labels.
* @tparam CounterT Counter type supported by CUDA's native atomicAdd.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] params Structure containing the hyper-parameters
* @param[in] X Training instances to cluster. The data must be in row-major format.
* [dim = n_samples x n_features]
* @param[out] centroids The output centroids [dim = n_clusters x n_features]
* @param[out] labels The output labels [dim = n_samples]
* @param[out] cluster_sizes Size of each cluster [dim = n_clusters]
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the
* arithmetic datatype. If DataT == MathT, this must be the identity.
* @param[in] X_norm (optional) Dataset's row norms [dim = n_samples]
*/
template <typename DataT,
typename MathT,
typename IndexT,
typename LabelT,
typename CounterT,
typename MappingOpT>
void build_clusters(const raft::resources& handle,
const kmeans_balanced_params& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<MathT, IndexT> centroids,
raft::device_vector_view<LabelT, IndexT> labels,
raft::device_vector_view<CounterT, IndexT> cluster_sizes,
MappingOpT mapping_op = raft::identity_op(),
std::optional<raft::device_vector_view<const MathT>> X_norm = std::nullopt)
{
RAFT_EXPECTS(X.extent(0) == labels.extent(0),
"Number of rows in dataset and labels are different");
RAFT_EXPECTS(X.extent(1) == centroids.extent(1),
"Number of features in dataset and centroids are different");
RAFT_EXPECTS(centroids.extent(0) == cluster_sizes.extent(0),
"Number of rows in centroids and clusyer_sizes are different");
detail::build_clusters(handle,
params,
X.extent(1),
X.data_handle(),
X.extent(0),
centroids.extent(0),
centroids.data_handle(),
labels.data_handle(),
cluster_sizes.data_handle(),
mapping_op,
resource::get_workspace_resource(handle),
X_norm.has_value() ? X_norm.value().data_handle() : nullptr);
}
/**
* @brief Given the data and labels, calculate cluster centers and sizes in one sweep.
*
* Let `S_i = {x_k | x_k \in X & labels[k] == i}` be the vectors in the dataset with label i.
*
* On exit,
* `centers_i = (\sum_{x \in S_i} x + w_i * center_i) / (|S_i| + w_i)`,
* where `w_i = reset_counters ? 0 : cluster_size[i]`.
*
* In other words, the updated cluster centers are a weighted average of the existing cluster
* center, and the coordinates of the points labeled with i. _This allows calling this function
* multiple times with different datasets with the same effect as if calling this function once
* on the combined dataset_.
*
* @code{.cpp}
* #include <raft/core/handle.hpp>
* #include <raft/cluster/kmeans_balanced.cuh>
* ...
* raft::handle_t handle;
* auto centroids = raft::make_device_matrix<float, int>(handle, n_clusters, n_features);
* auto sizes = raft::make_device_vector<int, int>(handle, n_clusters);
* raft::cluster::kmeans_balanced::calc_centers_and_sizes(
* handle, X, labels, centroids.view(), sizes.view(), true);
* @endcode
*
* @tparam DataT Type of the input data.
* @tparam MathT Type of the centroids and mapped data.
* @tparam IndexT Type used for indexing.
* @tparam LabelT Type of the output labels.
* @tparam CounterT Counter type supported by CUDA's native atomicAdd.
* @tparam MappingOpT Type of the mapping function.
* @param[in] handle The raft resources
* @param[in] X Dataset for which to calculate cluster centers. The data must be in
* row-major format. [dim = n_samples x n_features]
* @param[in] labels The input labels [dim = n_samples]
* @param[out] centroids The output centroids [dim = n_clusters x n_features]
* @param[out] cluster_sizes Size of each cluster [dim = n_clusters]
* @param[in] reset_counters Whether to clear the output arrays before calculating.
* When set to `false`, this function may be used to update existing
* centers and sizes using the weighted average principle.
* @param[in] mapping_op (optional) Functor to convert from the input datatype to the
* arithmetic datatype. If DataT == MathT, this must be the identity.
*/
template <typename DataT,
typename MathT,
typename IndexT,
typename LabelT,
typename CounterT,
typename MappingOpT = raft::identity_op>
void calc_centers_and_sizes(const raft::resources& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const LabelT, IndexT> labels,
raft::device_matrix_view<MathT, IndexT> centroids,
raft::device_vector_view<CounterT, IndexT> cluster_sizes,
bool reset_counters = true,
MappingOpT mapping_op = raft::identity_op())
{
RAFT_EXPECTS(X.extent(0) == labels.extent(0),
"Number of rows in dataset and labels are different");
RAFT_EXPECTS(X.extent(1) == centroids.extent(1),
"Number of features in dataset and centroids are different");
RAFT_EXPECTS(centroids.extent(0) == cluster_sizes.extent(0),
"Number of rows in centroids and clusyer_sizes are different");
detail::calc_centers_and_sizes(handle,
centroids.data_handle(),
cluster_sizes.data_handle(),
centroids.extent(0),
X.extent(1),
X.data_handle(),
X.extent(0),
labels.data_handle(),
reset_counters,
mapping_op);
}
} // namespace helpers
} // namespace raft::cluster::kmeans_balanced
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/kmeans_deprecated.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Note: This file is deprecated and will be removed in a future release
* Please use include/raft/cluster/kmeans.cuh instead
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <ctime>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <cuda.h>
#include <thrust/binary_search.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/find.h>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/memory.h>
#include <thrust/random.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <raft/core/resources.hpp>
#include <raft/linalg/detail/cublas_wrappers.hpp>
#include <raft/spectral/detail/warn_dbg.hpp>
#include <raft/spectral/matrix_wrappers.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/util/device_atomics.cuh>
namespace raft {
namespace cluster {
namespace detail {
// =========================================================
// Useful grid settings
// =========================================================
constexpr unsigned int BLOCK_SIZE = 1024;
constexpr unsigned int WARP_SIZE = 32;
constexpr unsigned int BSIZE_DIV_WSIZE = (BLOCK_SIZE / WARP_SIZE);
// =========================================================
// CUDA kernels
// =========================================================
/**
* @brief Compute distances between observation vectors and centroids
* Block dimensions should be (warpSize, 1,
* blockSize/warpSize). Ideally, the grid is large enough so there
* are d threads in the x-direction, k threads in the y-direction,
* and n threads in the z-direction.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, d*n entries) Observation matrix. Matrix is
* stored column-major and each column is an observation
* vector. Matrix dimensions are d x n.
* @param centroids (Input, d*k entries) Centroid matrix. Matrix is
* stored column-major and each column is a centroid. Matrix
* dimensions are d x k.
* @param dists (Output, n*k entries) Distance matrix. Matrix is
* stored column-major and the (i,j)-entry is the square of the
* Euclidean distance between the ith observation vector and jth
* centroid. Matrix dimensions are n x k. Entries must be
* initialized to zero.
*/
template <typename index_type_t, typename value_type_t>
RAFT_KERNEL computeDistances(index_type_t n,
index_type_t d,
index_type_t k,
const value_type_t* __restrict__ obs,
const value_type_t* __restrict__ centroids,
value_type_t* __restrict__ dists)
{
// Loop index
index_type_t i;
// Block indices
index_type_t bidx;
// Global indices
index_type_t gidx, gidy, gidz;
// Private memory
value_type_t centroid_private, dist_private;
// Global x-index indicates index of vector entry
bidx = blockIdx.x;
while (bidx * blockDim.x < d) {
gidx = threadIdx.x + bidx * blockDim.x;
// Global y-index indicates centroid
gidy = threadIdx.y + blockIdx.y * blockDim.y;
while (gidy < k) {
// Load centroid coordinate from global memory
centroid_private = (gidx < d) ? centroids[IDX(gidx, gidy, d)] : 0;
// Global z-index indicates observation vector
gidz = threadIdx.z + blockIdx.z * blockDim.z;
while (gidz < n) {
// Load observation vector coordinate from global memory
dist_private = (gidx < d) ? obs[IDX(gidx, gidz, d)] : 0;
// Compute contribution of current entry to distance
dist_private = centroid_private - dist_private;
dist_private = dist_private * dist_private;
// Perform reduction on warp
for (i = WARP_SIZE / 2; i > 0; i /= 2)
dist_private += __shfl_down_sync(warp_full_mask(), dist_private, i, 2 * i);
// Write result to global memory
if (threadIdx.x == 0) atomicAdd(dists + IDX(gidz, gidy, n), dist_private);
// Move to another observation vector
gidz += blockDim.z * gridDim.z;
}
// Move to another centroid
gidy += blockDim.y * gridDim.y;
}
// Move to another vector entry
bidx += gridDim.x;
}
}
/**
* @brief Find closest centroid to observation vectors.
* Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param n Number of observation vectors.
* @param k Number of clusters.
* @param centroids (Input, d*k entries) Centroid matrix. Matrix is
* stored column-major and each column is a centroid. Matrix
* dimensions are d x k.
* @param dists (Input/output, n*k entries) Distance matrix. Matrix
* is stored column-major and the (i,j)-entry is the square of
* the Euclidean distance between the ith observation vector and
* jth centroid. Matrix dimensions are n x k. On exit, the first
* n entries give the square of the Euclidean distance between
* observation vectors and closest centroids.
* @param codes (Output, n entries) Cluster assignments.
* @param clusterSizes (Output, k entries) Number of points in each
* cluster. Entries must be initialized to zero.
*/
template <typename index_type_t, typename value_type_t>
RAFT_KERNEL minDistances(index_type_t n,
index_type_t k,
value_type_t* __restrict__ dists,
index_type_t* __restrict__ codes,
index_type_t* __restrict__ clusterSizes)
{
// Loop index
index_type_t i, j;
// Current matrix entry
value_type_t dist_curr;
// Smallest entry in row
value_type_t dist_min;
index_type_t code_min;
// Each row in observation matrix is processed by a thread
i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
// Find minimum entry in row
code_min = 0;
dist_min = dists[IDX(i, 0, n)];
for (j = 1; j < k; ++j) {
dist_curr = dists[IDX(i, j, n)];
code_min = (dist_curr < dist_min) ? j : code_min;
dist_min = (dist_curr < dist_min) ? dist_curr : dist_min;
}
// Transfer result to global memory
dists[i] = dist_min;
codes[i] = code_min;
// Increment cluster sizes
atomicAdd(clusterSizes + code_min, 1);
// Move to another row
i += blockDim.x * gridDim.x;
}
}
/**
* @brief Check if newly computed distances are smaller than old distances.
* Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param n Number of observation vectors.
* @param dists_old (Input/output, n entries) Distances between
* observation vectors and closest centroids. On exit, entries
* are replaced by entries in 'dists_new' if the corresponding
* observation vectors are closest to the new centroid.
* @param dists_new (Input, n entries) Distance between observation
* vectors and new centroid.
* @param codes_old (Input/output, n entries) Cluster
* assignments. On exit, entries are replaced with 'code_new' if
* the corresponding observation vectors are closest to the new
* centroid.
* @param code_new Index associated with new centroid.
*/
template <typename index_type_t, typename value_type_t>
RAFT_KERNEL minDistances2(index_type_t n,
value_type_t* __restrict__ dists_old,
const value_type_t* __restrict__ dists_new,
index_type_t* __restrict__ codes_old,
index_type_t code_new)
{
// Loop index
index_type_t i = threadIdx.x + blockIdx.x * blockDim.x;
// Distances
value_type_t dist_old_private;
value_type_t dist_new_private;
// Each row is processed by a thread
while (i < n) {
// Get old and new distances
dist_old_private = dists_old[i];
dist_new_private = dists_new[i];
// Update if new distance is smaller than old distance
if (dist_new_private < dist_old_private) {
dists_old[i] = dist_new_private;
codes_old[i] = code_new;
}
// Move to another row
i += blockDim.x * gridDim.x;
}
}
/**
* @brief Compute size of k-means clusters.
* Block and grid dimensions should be 1-dimensional. Ideally the
* grid is large enough so there are n threads.
* @tparam index_type_t the type of data used for indexing.
* @param n Number of observation vectors.
* @param k Number of clusters.
* @param codes (Input, n entries) Cluster assignments.
* @param clusterSizes (Output, k entries) Number of points in each
* cluster. Entries must be initialized to zero.
*/
template <typename index_type_t>
RAFT_KERNEL computeClusterSizes(index_type_t n,
const index_type_t* __restrict__ codes,
index_type_t* __restrict__ clusterSizes)
{
index_type_t i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
atomicAdd(clusterSizes + codes[i], 1);
i += blockDim.x * gridDim.x;
}
}
/**
* @brief Divide rows of centroid matrix by cluster sizes.
* Divides the ith column of the sum matrix by the size of the ith
* cluster. If the sum matrix has been initialized so that the ith
* row is the sum of all observation vectors in the ith cluster,
* this kernel produces cluster centroids. The grid and block
* dimensions should be 2-dimensional. Ideally the grid is large
* enough so there are d threads in the x-direction and k threads
* in the y-direction.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param clusterSizes (Input, k entries) Number of points in each
* cluster.
* @param centroids (Input/output, d*k entries) Sum matrix. Matrix
* is stored column-major and matrix dimensions are d x k. The
* ith column is the sum of all observation vectors in the ith
* cluster. On exit, the matrix is the centroid matrix (each
* column is the mean position of a cluster).
*/
template <typename index_type_t, typename value_type_t>
RAFT_KERNEL divideCentroids(index_type_t d,
index_type_t k,
const index_type_t* __restrict__ clusterSizes,
value_type_t* __restrict__ centroids)
{
// Global indices
index_type_t gidx, gidy;
// Current cluster size
index_type_t clusterSize_private;
// Observation vector is determined by global y-index
gidy = threadIdx.y + blockIdx.y * blockDim.y;
while (gidy < k) {
// Get cluster size from global memory
clusterSize_private = clusterSizes[gidy];
// Add vector entries to centroid matrix
// vector entris are determined by global x-index
gidx = threadIdx.x + blockIdx.x * blockDim.x;
while (gidx < d) {
centroids[IDX(gidx, gidy, d)] /= clusterSize_private;
gidx += blockDim.x * gridDim.x;
}
// Move to another centroid
gidy += blockDim.y * gridDim.y;
}
}
// =========================================================
// Helper functions
// =========================================================
/**
* @brief Randomly choose new centroids.
* Centroid is randomly chosen with k-means++ algorithm.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param rand Random number drawn uniformly from [0,1).
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are n x d.
* @param dists (Input, device memory, 2*n entries) Workspace. The
* first n entries should be the distance between observation
* vectors and the closest centroid.
* @param centroid (Output, device memory, d entries) Centroid
* coordinates.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename index_type_t, typename value_type_t>
static int chooseNewCentroid(raft::resources const& handle,
index_type_t n,
index_type_t d,
value_type_t rand,
const value_type_t* __restrict__ obs,
value_type_t* __restrict__ dists,
value_type_t* __restrict__ centroid)
{
// Cumulative sum of distances
value_type_t* distsCumSum = dists + n;
// Residual sum of squares
value_type_t distsSum{0};
// Observation vector that is chosen as new centroid
index_type_t obsIndex;
auto stream = resource::get_cuda_stream(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
// Compute cumulative sum of distances
thrust::inclusive_scan(thrust_exec_policy,
thrust::device_pointer_cast(dists),
thrust::device_pointer_cast(dists + n),
thrust::device_pointer_cast(distsCumSum));
RAFT_CHECK_CUDA(stream);
RAFT_CUDA_TRY(cudaMemcpyAsync(
&distsSum, distsCumSum + n - 1, sizeof(value_type_t), cudaMemcpyDeviceToHost, stream));
// Randomly choose observation vector
// Probabilities are proportional to square of distance to closest
// centroid (see k-means++ algorithm)
//
// seg-faults due to Thrust bug
// on binary-search-like algorithms
// when run with stream dependent
// execution policies; fixed on Thrust GitHub
// hence replace w/ linear interpolation,
// until the Thrust issue gets resolved:
//
// obsIndex = (thrust::lower_bound(
// thrust_exec_policy, thrust::device_pointer_cast(distsCumSum),
// thrust::device_pointer_cast(distsCumSum + n), distsSum * rand) -
// thrust::device_pointer_cast(distsCumSum));
//
// linear interpolation logic:
//{
value_type_t minSum{0};
RAFT_CUDA_TRY(
cudaMemcpyAsync(&minSum, distsCumSum, sizeof(value_type_t), cudaMemcpyDeviceToHost, stream));
RAFT_CHECK_CUDA(stream);
if (distsSum > minSum) {
value_type_t vIndex = static_cast<value_type_t>(n - 1);
obsIndex = static_cast<index_type_t>(vIndex * (distsSum * rand - minSum) / (distsSum - minSum));
} else {
obsIndex = 0;
}
//}
RAFT_CHECK_CUDA(stream);
obsIndex = std::max(obsIndex, static_cast<index_type_t>(0));
obsIndex = std::min(obsIndex, n - 1);
// Record new centroid position
RAFT_CUDA_TRY(cudaMemcpyAsync(centroid,
obs + IDX(0, obsIndex, d),
d * sizeof(value_type_t),
cudaMemcpyDeviceToDevice,
stream));
return 0;
}
/**
* @brief Choose initial cluster centroids for k-means algorithm.
* Centroids are randomly chosen with k-means++ algorithm
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param dists (Output, device memory, 2*n entries) Workspace. On
* exit, the first n entries give the square of the Euclidean
* distance between observation vectors and the closest centroid.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename index_type_t, typename value_type_t>
static int initializeCentroids(raft::resources const& handle,
index_type_t n,
index_type_t d,
index_type_t k,
const value_type_t* __restrict__ obs,
value_type_t* __restrict__ centroids,
index_type_t* __restrict__ codes,
index_type_t* __restrict__ clusterSizes,
value_type_t* __restrict__ dists,
unsigned long long seed)
{
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Loop index
index_type_t i;
// Random number generator
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<value_type_t> uniformDist(0, 1);
auto stream = resource::get_cuda_stream(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
constexpr unsigned grid_lower_bound{65535};
// -------------------------------------------------------
// Implementation
// -------------------------------------------------------
// Initialize grid dimensions
dim3 blockDim_warp{WARP_SIZE, 1, BSIZE_DIV_WSIZE};
// CUDA grid dimensions
dim3 gridDim_warp{std::min(ceildiv<unsigned>(d, WARP_SIZE), grid_lower_bound),
1,
std::min(ceildiv<unsigned>(n, BSIZE_DIV_WSIZE), grid_lower_bound)};
// CUDA grid dimensions
dim3 gridDim_block{std::min(ceildiv<unsigned>(n, BLOCK_SIZE), grid_lower_bound), 1, 1};
// Assign observation vectors to code 0
RAFT_CUDA_TRY(cudaMemsetAsync(codes, 0, n * sizeof(index_type_t), stream));
// Choose first centroid
thrust::fill(thrust_exec_policy,
thrust::device_pointer_cast(dists),
thrust::device_pointer_cast(dists + n),
1);
RAFT_CHECK_CUDA(stream);
if (chooseNewCentroid(handle, n, d, uniformDist(rng), obs, dists, centroids))
WARNING("error in k-means++ (could not pick centroid)");
// Compute distances from first centroid
RAFT_CUDA_TRY(cudaMemsetAsync(dists, 0, n * sizeof(value_type_t), stream));
computeDistances<<<gridDim_warp, blockDim_warp, 0, stream>>>(n, d, 1, obs, centroids, dists);
RAFT_CHECK_CUDA(stream);
// Choose remaining centroids
for (i = 1; i < k; ++i) {
// Choose ith centroid
if (chooseNewCentroid(handle, n, d, uniformDist(rng), obs, dists, centroids + IDX(0, i, d)))
WARNING("error in k-means++ (could not pick centroid)");
// Compute distances from ith centroid
RAFT_CUDA_TRY(cudaMemsetAsync(dists + n, 0, n * sizeof(value_type_t), stream));
computeDistances<<<gridDim_warp, blockDim_warp, 0, stream>>>(
n, d, 1, obs, centroids + IDX(0, i, d), dists + n);
RAFT_CHECK_CUDA(stream);
// Recompute minimum distances
minDistances2<<<gridDim_block, BLOCK_SIZE, 0, stream>>>(n, dists, dists + n, codes, i);
RAFT_CHECK_CUDA(stream);
}
// Compute cluster sizes
RAFT_CUDA_TRY(cudaMemsetAsync(clusterSizes, 0, k * sizeof(index_type_t), stream));
computeClusterSizes<<<gridDim_block, BLOCK_SIZE, 0, stream>>>(n, codes, clusterSizes);
RAFT_CHECK_CUDA(stream);
return 0;
}
/**
* @brief Find cluster centroids closest to observation vectors.
* Distance is measured with Euclidean norm.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param centroids (Input, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param dists (Output, device memory, n*k entries) Workspace. On
* exit, the first n entries give the square of the Euclidean
* distance between observation vectors and the closest centroid.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param residual_host (Output, host memory, 1 entry) Residual sum
* of squares of assignment.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename index_type_t, typename value_type_t>
static int assignCentroids(raft::resources const& handle,
index_type_t n,
index_type_t d,
index_type_t k,
const value_type_t* __restrict__ obs,
const value_type_t* __restrict__ centroids,
value_type_t* __restrict__ dists,
index_type_t* __restrict__ codes,
index_type_t* __restrict__ clusterSizes,
value_type_t* residual_host)
{
auto stream = resource::get_cuda_stream(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
// Compute distance between centroids and observation vectors
RAFT_CUDA_TRY(cudaMemsetAsync(dists, 0, n * k * sizeof(value_type_t), stream));
// CUDA grid dimensions
dim3 blockDim{WARP_SIZE, 1, BLOCK_SIZE / WARP_SIZE};
dim3 gridDim;
constexpr unsigned grid_lower_bound{65535};
gridDim.x = std::min(ceildiv<unsigned>(d, WARP_SIZE), grid_lower_bound);
gridDim.y = std::min(static_cast<unsigned>(k), grid_lower_bound);
gridDim.z = std::min(ceildiv<unsigned>(n, BSIZE_DIV_WSIZE), grid_lower_bound);
computeDistances<<<gridDim, blockDim, 0, stream>>>(n, d, k, obs, centroids, dists);
RAFT_CHECK_CUDA(stream);
// Find centroid closest to each observation vector
RAFT_CUDA_TRY(cudaMemsetAsync(clusterSizes, 0, k * sizeof(index_type_t), stream));
blockDim.x = BLOCK_SIZE;
blockDim.y = 1;
blockDim.z = 1;
gridDim.x = std::min(ceildiv<unsigned>(n, BLOCK_SIZE), grid_lower_bound);
gridDim.y = 1;
gridDim.z = 1;
minDistances<<<gridDim, blockDim, 0, stream>>>(n, k, dists, codes, clusterSizes);
RAFT_CHECK_CUDA(stream);
// Compute residual sum of squares
*residual_host = thrust::reduce(
thrust_exec_policy, thrust::device_pointer_cast(dists), thrust::device_pointer_cast(dists + n));
return 0;
}
/**
* @brief Update cluster centroids for k-means algorithm.
* All clusters are assumed to be non-empty.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Input, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Input, device memory, k entries) Number of
* points in each cluster.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param work (Output, device memory, n*d entries) Workspace.
* @param work_int (Output, device memory, 2*d*n entries)
* Workspace.
* @return Zero if successful. Otherwise non-zero.
*/
template <typename index_type_t, typename value_type_t>
static int updateCentroids(raft::resources const& handle,
index_type_t n,
index_type_t d,
index_type_t k,
const value_type_t* __restrict__ obs,
const index_type_t* __restrict__ codes,
const index_type_t* __restrict__ clusterSizes,
value_type_t* __restrict__ centroids,
value_type_t* __restrict__ work,
index_type_t* __restrict__ work_int)
{
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Useful constants
const value_type_t one = 1;
const value_type_t zero = 0;
constexpr unsigned grid_lower_bound{65535};
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
// Device memory
thrust::device_ptr<value_type_t> obs_copy(work);
thrust::device_ptr<index_type_t> codes_copy(work_int);
thrust::device_ptr<index_type_t> rows(work_int + d * n);
// Take transpose of observation matrix
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(raft::linalg::detail::cublasgeam(cublas_h,
CUBLAS_OP_T,
CUBLAS_OP_N,
n,
d,
&one,
obs,
d,
&zero,
(value_type_t*)NULL,
n,
thrust::raw_pointer_cast(obs_copy),
n,
stream));
// Cluster assigned to each observation matrix entry
thrust::sequence(thrust_exec_policy, rows, rows + d * n);
RAFT_CHECK_CUDA(stream);
thrust::transform(thrust_exec_policy,
rows,
rows + d * n,
thrust::make_constant_iterator<index_type_t>(n),
rows,
thrust::modulus<index_type_t>());
RAFT_CHECK_CUDA(stream);
thrust::gather(
thrust_exec_policy, rows, rows + d * n, thrust::device_pointer_cast(codes), codes_copy);
RAFT_CHECK_CUDA(stream);
// Row associated with each observation matrix entry
thrust::sequence(thrust_exec_policy, rows, rows + d * n);
RAFT_CHECK_CUDA(stream);
thrust::transform(thrust_exec_policy,
rows,
rows + d * n,
thrust::make_constant_iterator<index_type_t>(n),
rows,
thrust::divides<index_type_t>());
RAFT_CHECK_CUDA(stream);
// Sort and reduce to add observation vectors in same cluster
thrust::stable_sort_by_key(thrust_exec_policy,
codes_copy,
codes_copy + d * n,
make_zip_iterator(make_tuple(obs_copy, rows)));
RAFT_CHECK_CUDA(stream);
thrust::reduce_by_key(thrust_exec_policy,
rows,
rows + d * n,
obs_copy,
codes_copy, // Output to codes_copy is ignored
thrust::device_pointer_cast(centroids));
RAFT_CHECK_CUDA(stream);
// Divide sums by cluster size to get centroid matrix
//
// CUDA grid dimensions
dim3 blockDim{WARP_SIZE, BLOCK_SIZE / WARP_SIZE, 1};
// CUDA grid dimensions
dim3 gridDim{std::min(ceildiv<unsigned>(d, WARP_SIZE), grid_lower_bound),
std::min(ceildiv<unsigned>(k, BSIZE_DIV_WSIZE), grid_lower_bound),
1};
divideCentroids<<<gridDim, blockDim, 0, stream>>>(d, k, clusterSizes, centroids);
RAFT_CHECK_CUDA(stream);
return 0;
}
// =========================================================
// k-means algorithm
// =========================================================
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param tol Tolerance for convergence. k-means stops when the
* change in residual divided by n is less than tol.
* @param maxiter Maximum number of k-means iterations.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param clusterSizes (Output, device memory, k entries) Number of
* points in each cluster.
* @param centroids (Output, device memory, d*k entries) Centroid
* matrix. Matrix is stored column-major and each column is a
* centroid. Matrix dimensions are d x k.
* @param work (Output, device memory, n*max(k,d) entries)
* Workspace.
* @param work_int (Output, device memory, 2*d*n entries)
* Workspace.
* @param residual_host (Output, host memory, 1 entry) Residual sum
* of squares (sum of squares of distances between observation
* vectors and centroids).
* @param iters_host (Output, host memory, 1 entry) Number of
* k-means iterations.
* @param seed random seed to be used.
* @return error flag.
*/
template <typename index_type_t, typename value_type_t>
int kmeans(raft::resources const& handle,
index_type_t n,
index_type_t d,
index_type_t k,
value_type_t tol,
index_type_t maxiter,
const value_type_t* __restrict__ obs,
index_type_t* __restrict__ codes,
index_type_t* __restrict__ clusterSizes,
value_type_t* __restrict__ centroids,
value_type_t* __restrict__ work,
index_type_t* __restrict__ work_int,
value_type_t* residual_host,
index_type_t* iters_host,
unsigned long long seed)
{
// -------------------------------------------------------
// Variable declarations
// -------------------------------------------------------
// Current iteration
index_type_t iter;
constexpr unsigned grid_lower_bound{65535};
// Residual sum of squares at previous iteration
value_type_t residualPrev = 0;
// Random number generator
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<value_type_t> uniformDist(0, 1);
// -------------------------------------------------------
// Initialization
// -------------------------------------------------------
auto stream = resource::get_cuda_stream(handle);
auto cublas_h = resource::get_cublas_handle(handle);
auto thrust_exec_policy = resource::get_thrust_policy(handle);
// Trivial cases
if (k == 1) {
RAFT_CUDA_TRY(cudaMemsetAsync(codes, 0, n * sizeof(index_type_t), stream));
RAFT_CUDA_TRY(
cudaMemcpyAsync(clusterSizes, &n, sizeof(index_type_t), cudaMemcpyHostToDevice, stream));
if (updateCentroids(handle, n, d, k, obs, codes, clusterSizes, centroids, work, work_int))
WARNING("could not compute k-means centroids");
dim3 blockDim{WARP_SIZE, 1, BLOCK_SIZE / WARP_SIZE};
dim3 gridDim{std::min(ceildiv<unsigned>(d, WARP_SIZE), grid_lower_bound),
1,
std::min(ceildiv<unsigned>(n, BLOCK_SIZE / WARP_SIZE), grid_lower_bound)};
RAFT_CUDA_TRY(cudaMemsetAsync(work, 0, n * k * sizeof(value_type_t), stream));
computeDistances<<<gridDim, blockDim, 0, stream>>>(n, d, 1, obs, centroids, work);
RAFT_CHECK_CUDA(stream);
*residual_host = thrust::reduce(
thrust_exec_policy, thrust::device_pointer_cast(work), thrust::device_pointer_cast(work + n));
RAFT_CHECK_CUDA(stream);
return 0;
}
if (n <= k) {
thrust::sequence(thrust_exec_policy,
thrust::device_pointer_cast(codes),
thrust::device_pointer_cast(codes + n));
RAFT_CHECK_CUDA(stream);
thrust::fill_n(thrust_exec_policy, thrust::device_pointer_cast(clusterSizes), n, 1);
RAFT_CHECK_CUDA(stream);
if (n < k)
RAFT_CUDA_TRY(cudaMemsetAsync(clusterSizes + n, 0, (k - n) * sizeof(index_type_t), stream));
RAFT_CUDA_TRY(cudaMemcpyAsync(
centroids, obs, d * n * sizeof(value_type_t), cudaMemcpyDeviceToDevice, stream));
*residual_host = 0;
return 0;
}
// Initialize cuBLAS
// #TODO: Call from public API when ready
RAFT_CUBLAS_TRY(
raft::linalg::detail::cublassetpointermode(cublas_h, CUBLAS_POINTER_MODE_HOST, stream));
// -------------------------------------------------------
// k-means++ algorithm
// -------------------------------------------------------
// Choose initial cluster centroids
if (initializeCentroids(handle, n, d, k, obs, centroids, codes, clusterSizes, work, seed))
WARNING("could not initialize k-means centroids");
// Apply k-means iteration until convergence
for (iter = 0; iter < maxiter; ++iter) {
// Update cluster centroids
if (updateCentroids(handle, n, d, k, obs, codes, clusterSizes, centroids, work, work_int))
WARNING("could not update k-means centroids");
// Determine centroid closest to each observation
residualPrev = *residual_host;
if (assignCentroids(handle, n, d, k, obs, centroids, work, codes, clusterSizes, residual_host))
WARNING("could not assign observation vectors to k-means clusters");
// Reinitialize empty clusters with new centroids
index_type_t emptyCentroid = (thrust::find(thrust_exec_policy,
thrust::device_pointer_cast(clusterSizes),
thrust::device_pointer_cast(clusterSizes + k),
0) -
thrust::device_pointer_cast(clusterSizes));
// FIXME: emptyCentroid never reaches k (infinite loop) under certain
// conditions, such as if obs is corrupt (as seen as a result of a
// DataFrame column of NULL edge vals used to create the Graph)
while (emptyCentroid < k) {
if (chooseNewCentroid(
handle, n, d, uniformDist(rng), obs, work, centroids + IDX(0, emptyCentroid, d)))
WARNING("could not replace empty centroid");
if (assignCentroids(
handle, n, d, k, obs, centroids, work, codes, clusterSizes, residual_host))
WARNING("could not assign observation vectors to k-means clusters");
emptyCentroid = (thrust::find(thrust_exec_policy,
thrust::device_pointer_cast(clusterSizes),
thrust::device_pointer_cast(clusterSizes + k),
0) -
thrust::device_pointer_cast(clusterSizes));
RAFT_CHECK_CUDA(stream);
}
// Check for convergence
if (std::fabs(residualPrev - (*residual_host)) / n < tol) {
++iter;
break;
}
}
// Warning if k-means has failed to converge
if (std::fabs(residualPrev - (*residual_host)) / n >= tol) WARNING("k-means failed to converge");
*iters_host = iter;
return 0;
}
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
* @tparam index_type_t the type of data used for indexing.
* @tparam value_type_t the type of data used for weights, distances.
* @param handle the raft handle.
* @param n Number of observation vectors.
* @param d Dimension of observation vectors.
* @param k Number of clusters.
* @param tol Tolerance for convergence. k-means stops when the
* change in residual divided by n is less than tol.
* @param maxiter Maximum number of k-means iterations.
* @param obs (Input, device memory, d*n entries) Observation
* matrix. Matrix is stored column-major and each column is an
* observation vector. Matrix dimensions are d x n.
* @param codes (Output, device memory, n entries) Cluster
* assignments.
* @param residual On exit, residual sum of squares (sum of squares
* of distances between observation vectors and centroids).
* @param iters on exit, number of k-means iterations.
* @param seed random seed to be used.
* @return error flag
*/
template <typename index_type_t, typename value_type_t>
int kmeans(raft::resources const& handle,
index_type_t n,
index_type_t d,
index_type_t k,
value_type_t tol,
index_type_t maxiter,
const value_type_t* __restrict__ obs,
index_type_t* __restrict__ codes,
value_type_t& residual,
index_type_t& iters,
unsigned long long seed = 123456)
{
// Check that parameters are valid
RAFT_EXPECTS(n > 0, "invalid parameter (n<1)");
RAFT_EXPECTS(d > 0, "invalid parameter (d<1)");
RAFT_EXPECTS(k > 0, "invalid parameter (k<1)");
RAFT_EXPECTS(tol > 0, "invalid parameter (tol<=0)");
RAFT_EXPECTS(maxiter >= 0, "invalid parameter (maxiter<0)");
// Allocate memory
raft::spectral::matrix::vector_t<index_type_t> clusterSizes(handle, k);
raft::spectral::matrix::vector_t<value_type_t> centroids(handle, d * k);
raft::spectral::matrix::vector_t<value_type_t> work(handle, n * std::max(k, d));
raft::spectral::matrix::vector_t<index_type_t> work_int(handle, 2 * d * n);
// Perform k-means
return kmeans<index_type_t, value_type_t>(handle,
n,
d,
k,
tol,
maxiter,
obs,
codes,
clusterSizes.raw(),
centroids.raw(),
work.raw(),
work_int.raw(),
&residual,
&iters,
seed);
}
} // namespace detail
} // namespace cluster
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/connectivities.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/linalg/unary_op.cuh>
#include <rmm/device_uvector.hpp>
#include <raft/cluster/single_linkage_types.hpp>
#include <raft/distance/distance.cuh>
#include <raft/distance/distance_types.hpp>
#include <raft/sparse/convert/csr.cuh>
#include <raft/sparse/coo.hpp>
#include <raft/sparse/neighbors/knn_graph.cuh>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
namespace raft::cluster::detail {
template <raft::cluster::LinkageDistance dist_type, typename value_idx, typename value_t>
struct distance_graph_impl {
void run(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
raft::distance::DistanceType metric,
rmm::device_uvector<value_idx>& indptr,
rmm::device_uvector<value_idx>& indices,
rmm::device_uvector<value_t>& data,
int c);
};
/**
* Connectivities specialization to build a knn graph
* @tparam value_idx
* @tparam value_t
*/
template <typename value_idx, typename value_t>
struct distance_graph_impl<raft::cluster::LinkageDistance::KNN_GRAPH, value_idx, value_t> {
void run(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
raft::distance::DistanceType metric,
rmm::device_uvector<value_idx>& indptr,
rmm::device_uvector<value_idx>& indices,
rmm::device_uvector<value_t>& data,
int c)
{
auto stream = resource::get_cuda_stream(handle);
auto thrust_policy = resource::get_thrust_policy(handle);
// Need to symmetrize knn into undirected graph
raft::sparse::COO<value_t, value_idx> knn_graph_coo(stream);
raft::sparse::neighbors::knn_graph(handle, X, m, n, metric, knn_graph_coo, c);
indices.resize(knn_graph_coo.nnz, stream);
data.resize(knn_graph_coo.nnz, stream);
// self-loops get max distance
auto transform_in = thrust::make_zip_iterator(
thrust::make_tuple(knn_graph_coo.rows(), knn_graph_coo.cols(), knn_graph_coo.vals()));
thrust::transform(thrust_policy,
transform_in,
transform_in + knn_graph_coo.nnz,
knn_graph_coo.vals(),
[=] __device__(const thrust::tuple<value_idx, value_idx, value_t>& tup) {
bool self_loop = thrust::get<0>(tup) == thrust::get<1>(tup);
return (self_loop * std::numeric_limits<value_t>::max()) +
(!self_loop * thrust::get<2>(tup));
});
raft::sparse::convert::sorted_coo_to_csr(
knn_graph_coo.rows(), knn_graph_coo.nnz, indptr.data(), m + 1, stream);
// TODO: Wouldn't need to copy here if we could compute knn
// graph directly on the device uvectors
// ref: https://github.com/rapidsai/raft/issues/227
raft::copy_async(indices.data(), knn_graph_coo.cols(), knn_graph_coo.nnz, stream);
raft::copy_async(data.data(), knn_graph_coo.vals(), knn_graph_coo.nnz, stream);
}
};
template <typename value_idx>
RAFT_KERNEL fill_indices2(value_idx* indices, size_t m, size_t nnz)
{
value_idx tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid >= nnz) return;
value_idx v = tid % m;
indices[tid] = v;
}
/**
* Compute connected CSR of pairwise distances
* @tparam value_idx
* @tparam value_t
* @param handle
* @param X
* @param m
* @param n
* @param metric
* @param[out] indptr
* @param[out] indices
* @param[out] data
*/
template <typename value_idx, typename value_t>
void pairwise_distances(const raft::resources& handle,
const value_t* X,
size_t m,
size_t n,
raft::distance::DistanceType metric,
value_idx* indptr,
value_idx* indices,
value_t* data)
{
auto stream = resource::get_cuda_stream(handle);
auto exec_policy = resource::get_thrust_policy(handle);
value_idx nnz = m * m;
value_idx blocks = raft::ceildiv(nnz, (value_idx)256);
fill_indices2<value_idx><<<blocks, 256, 0, stream>>>(indices, m, nnz);
thrust::sequence(exec_policy, indptr, indptr + m, 0, (int)m);
raft::update_device(indptr + m, &nnz, 1, stream);
// TODO: It would ultimately be nice if the MST could accept
// dense inputs directly so we don't need to double the memory
// usage to hand it a sparse array here.
distance::pairwise_distance<value_t, value_idx>(handle, X, X, data, m, m, n, metric);
// self-loops get max distance
auto transform_in =
thrust::make_zip_iterator(thrust::make_tuple(thrust::make_counting_iterator(0), data));
thrust::transform(exec_policy,
transform_in,
transform_in + nnz,
data,
[=] __device__(const thrust::tuple<value_idx, value_t>& tup) {
value_idx idx = thrust::get<0>(tup);
bool self_loop = idx % m == idx / m;
return (self_loop * std::numeric_limits<value_t>::max()) +
(!self_loop * thrust::get<1>(tup));
});
}
/**
* Connectivities specialization for pairwise distances
* @tparam value_idx
* @tparam value_t
*/
template <typename value_idx, typename value_t>
struct distance_graph_impl<raft::cluster::LinkageDistance::PAIRWISE, value_idx, value_t> {
void run(const raft::resources& handle,
const value_t* X,
size_t m,
size_t n,
raft::distance::DistanceType metric,
rmm::device_uvector<value_idx>& indptr,
rmm::device_uvector<value_idx>& indices,
rmm::device_uvector<value_t>& data,
int c)
{
auto stream = resource::get_cuda_stream(handle);
size_t nnz = m * m;
indices.resize(nnz, stream);
data.resize(nnz, stream);
pairwise_distances(handle, X, m, n, metric, indptr.data(), indices.data(), data.data());
}
};
/**
* Returns a CSR connectivities graph based on the given linkage distance.
* @tparam value_idx
* @tparam value_t
* @tparam dist_type
* @param[in] handle raft handle
* @param[in] X dense data for which to construct connectivites
* @param[in] m number of rows in X
* @param[in] n number of columns in X
* @param[in] metric distance metric to use
* @param[out] indptr indptr array of connectivities graph
* @param[out] indices column indices array of connectivities graph
* @param[out] data distances array of connectivities graph
* @param[out] c constant 'c' used for nearest neighbors-based distances
* which will guarantee k <= log(n) + c
*/
template <typename value_idx, typename value_t, raft::cluster::LinkageDistance dist_type>
void get_distance_graph(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
raft::distance::DistanceType metric,
rmm::device_uvector<value_idx>& indptr,
rmm::device_uvector<value_idx>& indices,
rmm::device_uvector<value_t>& data,
int c)
{
auto stream = resource::get_cuda_stream(handle);
indptr.resize(m + 1, stream);
distance_graph_impl<dist_type, value_idx, value_t> dist_graph;
dist_graph.run(handle, X, m, n, metric, indptr, indices, data, c);
}
}; // namespace raft::cluster::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/mst.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <raft/sparse/neighbors/cross_component_nn.cuh>
#include <raft/sparse/op/sort.cuh>
#include <raft/sparse/solver/mst.cuh>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace raft::cluster::detail {
template <typename value_idx, typename value_t>
void merge_msts(sparse::solver::Graph_COO<value_idx, value_idx, value_t>& coo1,
sparse::solver::Graph_COO<value_idx, value_idx, value_t>& coo2,
cudaStream_t stream)
{
/** Add edges to existing mst **/
int final_nnz = coo2.n_edges + coo1.n_edges;
coo1.src.resize(final_nnz, stream);
coo1.dst.resize(final_nnz, stream);
coo1.weights.resize(final_nnz, stream);
/**
* Construct final edge list
*/
raft::copy_async(coo1.src.data() + coo1.n_edges, coo2.src.data(), coo2.n_edges, stream);
raft::copy_async(coo1.dst.data() + coo1.n_edges, coo2.dst.data(), coo2.n_edges, stream);
raft::copy_async(coo1.weights.data() + coo1.n_edges, coo2.weights.data(), coo2.n_edges, stream);
coo1.n_edges = final_nnz;
}
/**
* Connect an unconnected knn graph (one in which mst returns an msf). The
* device buffers underlying the Graph_COO object are modified in-place.
* @tparam value_idx index type
* @tparam value_t floating-point value type
* @param[in] handle raft handle
* @param[in] X original dense data from which knn grpah was constructed
* @param[inout] msf edge list containing the mst result
* @param[in] m number of rows in X
* @param[in] n number of columns in X
* @param[inout] color the color labels array returned from the mst invocation
* @return updated MST edge list
*/
template <typename value_idx, typename value_t, typename red_op>
void connect_knn_graph(
raft::resources const& handle,
const value_t* X,
sparse::solver::Graph_COO<value_idx, value_idx, value_t>& msf,
size_t m,
size_t n,
value_idx* color,
red_op reduction_op,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded)
{
auto stream = resource::get_cuda_stream(handle);
raft::sparse::COO<value_t, value_idx> connected_edges(stream);
// default row and column batch sizes are chosen for computing cross component nearest neighbors.
// Reference: PR #1445
static constexpr size_t default_row_batch_size = 4096;
static constexpr size_t default_col_batch_size = 16;
raft::sparse::neighbors::cross_component_nn<value_idx, value_t>(handle,
connected_edges,
X,
color,
m,
n,
reduction_op,
min(m, default_row_batch_size),
min(n, default_col_batch_size));
rmm::device_uvector<value_idx> indptr2(m + 1, stream);
raft::sparse::convert::sorted_coo_to_csr(
connected_edges.rows(), connected_edges.nnz, indptr2.data(), m + 1, stream);
// On the second call, we hand the MST the original colors
// and the new set of edges and let it restart the optimization process
auto new_mst =
raft::sparse::solver::mst<value_idx, value_idx, value_t, double>(handle,
indptr2.data(),
connected_edges.cols(),
connected_edges.vals(),
m,
connected_edges.nnz,
color,
stream,
false,
false);
merge_msts<value_idx, value_t>(msf, new_mst, stream);
}
/**
* Constructs an MST and sorts the resulting edges in ascending
* order by their weight.
*
* Hierarchical clustering heavily relies upon the ordering
* and vertices returned in the MST. If the result of the
* MST was actually a minimum-spanning forest, the CSR
* being passed into the MST is not connected. In such a
* case, this graph will be connected by performing a
* KNN across the components.
* @tparam value_idx
* @tparam value_t
* @param[in] handle raft handle
* @param[in] indptr CSR indptr of connectivities graph
* @param[in] indices CSR indices array of connectivities graph
* @param[in] pw_dists CSR weights array of connectivities graph
* @param[in] m number of rows in X / src vertices in connectivities graph
* @param[in] n number of columns in X
* @param[out] mst_src output src edges
* @param[out] mst_dst output dst edges
* @param[out] mst_weight output weights (distances)
* @param[in] max_iter maximum iterations to run knn graph connection. This
* argument is really just a safeguard against the potential for infinite loops.
*/
template <typename value_idx, typename value_t, typename red_op>
void build_sorted_mst(
raft::resources const& handle,
const value_t* X,
const value_idx* indptr,
const value_idx* indices,
const value_t* pw_dists,
size_t m,
size_t n,
value_idx* mst_src,
value_idx* mst_dst,
value_t* mst_weight,
value_idx* color,
size_t nnz,
red_op reduction_op,
raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded,
int max_iter = 10)
{
auto stream = resource::get_cuda_stream(handle);
// We want to have MST initialize colors on first call.
auto mst_coo = raft::sparse::solver::mst<value_idx, value_idx, value_t, double>(
handle, indptr, indices, pw_dists, (value_idx)m, nnz, color, stream, false, true);
int iters = 1;
int n_components = raft::sparse::neighbors::get_n_components(color, m, stream);
while (n_components > 1 && iters < max_iter) {
connect_knn_graph<value_idx, value_t>(handle, X, mst_coo, m, n, color, reduction_op);
iters++;
n_components = raft::sparse::neighbors::get_n_components(color, m, stream);
}
/**
* The `max_iter` argument was introduced only to prevent the potential for an infinite loop.
* Ideally the log2(n) guarantees of the MST should be enough to connect KNN graphs with a
* massive number of data samples in very few iterations. If it does not, there are 3 likely
* reasons why (in order of their likelihood):
* 1. There is a bug in this code somewhere
* 2. Either the given KNN graph wasn't generated from X or the same metric is not being used
* to generate the 1-nn (currently only L2SqrtExpanded is supported).
* 3. max_iter was not large enough to connect the graph (less likely).
*
* Note that a KNN graph generated from 50 random isotropic balls (with significant overlap)
* was able to be connected in a single iteration.
*/
RAFT_EXPECTS(n_components == 1,
"KNN graph could not be connected in %d iterations. "
"Please verify that the input knn graph is generated from X "
"(and the same distance metric used),"
" or increase 'max_iter'",
max_iter);
raft::sparse::op::coo_sort_by_weight(
mst_coo.src.data(), mst_coo.dst.data(), mst_coo.weights.data(), mst_coo.n_edges, stream);
raft::copy_async(mst_src, mst_coo.src.data(), mst_coo.n_edges, stream);
raft::copy_async(mst_dst, mst_coo.dst.data(), mst_coo.n_edges, stream);
raft::copy_async(mst_weight, mst_coo.weights.data(), mst_coo.n_edges, stream);
}
}; // namespace raft::cluster::detail | 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/kmeans.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <ctime>
#include <optional>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <random>
#include <cuda.h>
#include <thrust/fill.h>
#include <thrust/transform.h>
#include <raft/cluster/detail/kmeans_common.cuh>
#include <raft/cluster/kmeans_types.hpp>
#include <raft/common/nvtx.hpp>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/kvp.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/mdarray.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/linalg/map_then_reduce.cuh>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/reduce_cols_by_key.cuh>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace cluster {
namespace detail {
// =========================================================
// Init functions
// =========================================================
// Selects 'n_clusters' samples randomly from X
template <typename DataT, typename IndexT>
void initRandom(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("initRandom");
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_clusters = params.n_clusters;
detail::shuffleAndGather<DataT, IndexT>(handle, X, centroids, n_clusters, params.rng_state.seed);
}
/*
* @brief Selects 'n_clusters' samples from the input X using kmeans++ algorithm.
* @note This is the algorithm described in
* "k-means++: the advantages of careful seeding". 2007, Arthur, D. and Vassilvitskii, S.
* ACM-SIAM symposium on Discrete algorithms.
*
* Scalable kmeans++ pseudocode
* 1: C = sample a point uniformly at random from X
* 2: while |C| < k
* 3: Sample x in X with probability p_x = d^2(x, C) / phi_X (C)
* 4: C = C U {x}
* 5: end for
*/
template <typename DataT, typename IndexT>
void kmeansPlusPlus(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
rmm::device_uvector<char>& workspace)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("kmeansPlusPlus");
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
auto metric = params.metric;
// number of seeding trials for each center (except the first)
auto n_trials = 2 + static_cast<int>(std::ceil(log(n_clusters)));
RAFT_LOG_DEBUG(
"Run sequential k-means++ to select %d centroids from %d input samples "
"(%d seeding trials per iterations)",
n_clusters,
n_samples,
n_trials);
auto dataBatchSize = getDataBatchSize(params.batch_samples, n_samples);
// temporary buffers
auto indices = raft::make_device_vector<IndexT, IndexT>(handle, n_trials);
auto centroidCandidates = raft::make_device_matrix<DataT, IndexT>(handle, n_trials, n_features);
auto costPerCandidate = raft::make_device_vector<DataT, IndexT>(handle, n_trials);
auto minClusterDistance = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
auto distBuffer = raft::make_device_matrix<DataT, IndexT>(handle, n_trials, n_samples);
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
rmm::device_scalar<DataT> clusterCost(stream);
rmm::device_scalar<cub::KeyValuePair<int, DataT>> minClusterIndexAndDistance(stream);
// Device and matrix views
raft::device_vector_view<IndexT, IndexT> indices_view(indices.data_handle(), n_trials);
auto const_weights_view =
raft::make_device_vector_view<const DataT, IndexT>(minClusterDistance.data_handle(), n_samples);
auto const_indices_view =
raft::make_device_vector_view<const IndexT, IndexT>(indices.data_handle(), n_trials);
auto const_X_view =
raft::make_device_matrix_view<const DataT, IndexT>(X.data_handle(), n_samples, n_features);
raft::device_matrix_view<DataT, IndexT> candidates_view(
centroidCandidates.data_handle(), n_trials, n_features);
// L2 norm of X: ||c||^2
auto L2NormX = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
if (metric == raft::distance::DistanceType::L2Expanded ||
metric == raft::distance::DistanceType::L2SqrtExpanded) {
raft::linalg::rowNorm(L2NormX.data_handle(),
X.data_handle(),
X.extent(1),
X.extent(0),
raft::linalg::L2Norm,
true,
stream);
}
raft::random::RngState rng(params.rng_state.seed, params.rng_state.type);
std::mt19937 gen(params.rng_state.seed);
std::uniform_int_distribution<> dis(0, n_samples - 1);
// <<< Step-1 >>>: C <-- sample a point uniformly at random from X
auto initialCentroid = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + dis(gen) * n_features, 1, n_features);
int n_clusters_picked = 1;
// store the chosen centroid in the buffer
raft::copy(
centroidsRawData.data_handle(), initialCentroid.data_handle(), initialCentroid.size(), stream);
// C = initial set of centroids
auto centroids = raft::make_device_matrix_view<DataT, IndexT>(
centroidsRawData.data_handle(), initialCentroid.extent(0), initialCentroid.extent(1));
// <<< End of Step-1 >>>
// Calculate cluster distance, d^2(x, C), for all the points x in X to the nearest centroid
detail::minClusterDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterDistance.view(),
L2NormX.view(),
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
RAFT_LOG_DEBUG(" k-means++ - Sampled %d/%d centroids", n_clusters_picked, n_clusters);
// <<<< Step-2 >>> : while |C| < k
while (n_clusters_picked < n_clusters) {
// <<< Step-3 >>> : Sample x in X with probability p_x = d^2(x, C) / phi_X (C)
// Choose 'n_trials' centroid candidates from X with probability proportional to the squared
// distance to the nearest existing cluster
raft::random::discrete(handle, rng, indices_view, const_weights_view);
raft::matrix::gather(handle, const_X_view, const_indices_view, candidates_view);
// Calculate pairwise distance between X and the centroid candidates
// Output - pwd [n_trials x n_samples]
auto pwd = distBuffer.view();
detail::pairwise_distance_kmeans<DataT, IndexT>(
handle, centroidCandidates.view(), X, pwd, workspace, metric);
// Update nearest cluster distance for each centroid candidate
// Note pwd and minDistBuf points to same buffer which currently holds pairwise distance values.
// Outputs minDistanceBuf[n_trials x n_samples] where minDistance[i, :] contains updated
// minClusterDistance that includes candidate-i
auto minDistBuf = distBuffer.view();
raft::linalg::matrixVectorOp(minDistBuf.data_handle(),
pwd.data_handle(),
minClusterDistance.data_handle(),
pwd.extent(1),
pwd.extent(0),
true,
true,
raft::min_op{},
stream);
// Calculate costPerCandidate[n_trials] where costPerCandidate[i] is the cluster cost when using
// centroid candidate-i
raft::linalg::reduce(costPerCandidate.data_handle(),
minDistBuf.data_handle(),
minDistBuf.extent(1),
minDistBuf.extent(0),
static_cast<DataT>(0),
true,
true,
stream);
// Greedy Choice - Choose the candidate that has minimum cluster cost
// ArgMin operation below identifies the index of minimum cost in costPerCandidate
{
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
cub::DeviceReduce::ArgMin(nullptr,
temp_storage_bytes,
costPerCandidate.data_handle(),
minClusterIndexAndDistance.data(),
costPerCandidate.extent(0),
stream);
// Allocate temporary storage
workspace.resize(temp_storage_bytes, stream);
// Run argmin-reduction
cub::DeviceReduce::ArgMin(workspace.data(),
temp_storage_bytes,
costPerCandidate.data_handle(),
minClusterIndexAndDistance.data(),
costPerCandidate.extent(0),
stream);
int bestCandidateIdx = -1;
raft::copy(&bestCandidateIdx, &minClusterIndexAndDistance.data()->key, 1, stream);
resource::sync_stream(handle);
/// <<< End of Step-3 >>>
/// <<< Step-4 >>>: C = C U {x}
// Update minimum cluster distance corresponding to the chosen centroid candidate
raft::copy(minClusterDistance.data_handle(),
minDistBuf.data_handle() + bestCandidateIdx * n_samples,
n_samples,
stream);
raft::copy(centroidsRawData.data_handle() + n_clusters_picked * n_features,
centroidCandidates.data_handle() + bestCandidateIdx * n_features,
n_features,
stream);
++n_clusters_picked;
/// <<< End of Step-4 >>>
}
RAFT_LOG_DEBUG(" k-means++ - Sampled %d/%d centroids", n_clusters_picked, n_clusters);
} /// <<<< Step-5 >>>
}
/**
*
* @tparam DataT
* @tparam IndexT
* @param handle
* @param[in] X input matrix (size n_samples, n_features)
* @param[in] weight number of samples currently assigned to each centroid
* @param[in] cur_centroids matrix of current centroids (size n_clusters, n_features)
* @param[in] l2norm_x
* @param[out] min_cluster_and_dist
* @param[out] new_centroids
* @param[out] new_weight
* @param[inout] workspace
*/
template <typename DataT, typename IndexT, typename LabelsIterator>
void update_centroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT, row_major> X,
raft::device_vector_view<const DataT, IndexT> sample_weights,
raft::device_matrix_view<const DataT, IndexT, row_major> centroids,
// TODO: Figure out how to best wrap iterator types in mdspan
LabelsIterator cluster_labels,
raft::device_vector_view<DataT, IndexT> weight_per_cluster,
raft::device_matrix_view<DataT, IndexT, row_major> new_centroids,
rmm::device_uvector<char>& workspace)
{
auto n_clusters = centroids.extent(0);
auto n_samples = X.extent(0);
workspace.resize(n_samples, resource::get_cuda_stream(handle));
// Calculates weighted sum of all the samples assigned to cluster-i and stores the
// result in new_centroids[i]
raft::linalg::reduce_rows_by_key((DataT*)X.data_handle(),
X.extent(1),
cluster_labels,
sample_weights.data_handle(),
workspace.data(),
X.extent(0),
X.extent(1),
n_clusters,
new_centroids.data_handle(),
resource::get_cuda_stream(handle));
// Reduce weights by key to compute weight in each cluster
raft::linalg::reduce_cols_by_key(sample_weights.data_handle(),
cluster_labels,
weight_per_cluster.data_handle(),
(IndexT)1,
(IndexT)sample_weights.extent(0),
(IndexT)n_clusters,
resource::get_cuda_stream(handle));
// Computes new_centroids[i] = new_centroids[i]/weight_per_cluster[i] where
// new_centroids[n_clusters x n_features] - 2D array, new_centroids[i] has sum of all the
// samples assigned to cluster-i
// weight_per_cluster[n_clusters] - 1D array, weight_per_cluster[i] contains sum of weights in
// cluster-i.
// Note - when weight_per_cluster[i] is 0, new_centroids[i] is reset to 0
raft::linalg::matrixVectorOp(new_centroids.data_handle(),
new_centroids.data_handle(),
weight_per_cluster.data_handle(),
new_centroids.extent(1),
new_centroids.extent(0),
true,
false,
raft::div_checkzero_op{},
resource::get_cuda_stream(handle));
// copy centroids[i] to new_centroids[i] when weight_per_cluster[i] is 0
cub::ArgIndexInputIterator<DataT*> itr_wt(weight_per_cluster.data_handle());
raft::matrix::gather_if(
const_cast<DataT*>(centroids.data_handle()),
static_cast<int>(centroids.extent(1)),
static_cast<int>(centroids.extent(0)),
itr_wt,
itr_wt,
static_cast<int>(weight_per_cluster.size()),
new_centroids.data_handle(),
[=] __device__(raft::KeyValuePair<ptrdiff_t, DataT> map) { // predicate
// copy when the sum of weights in the cluster is 0
return map.value == 0;
},
raft::key_op{},
resource::get_cuda_stream(handle));
}
// TODO: Resizing is needed to use mdarray instead of rmm::device_uvector
template <typename DataT, typename IndexT>
void kmeans_fit_main(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const DataT, IndexT> weight,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter,
rmm::device_uvector<char>& workspace)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("kmeans_fit_main");
logger::get(RAFT_NAME).set_level(params.verbosity);
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
auto metric = params.metric;
// stores (key, value) pair corresponding to each sample where
// - key is the index of nearest cluster
// - value is the distance to the nearest cluster
auto minClusterAndDistance =
raft::make_device_vector<raft::KeyValuePair<IndexT, DataT>, IndexT>(handle, n_samples);
// temporary buffer to store L2 norm of centroids or distance matrix,
// destructor releases the resource
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
// temporary buffer to store intermediate centroids, destructor releases the
// resource
auto newCentroids = raft::make_device_matrix<DataT, IndexT>(handle, n_clusters, n_features);
// temporary buffer to store weights per cluster, destructor releases the
// resource
auto wtInCluster = raft::make_device_vector<DataT, IndexT>(handle, n_clusters);
rmm::device_scalar<DataT> clusterCostD(stream);
// L2 norm of X: ||x||^2
auto L2NormX = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
auto l2normx_view =
raft::make_device_vector_view<const DataT, IndexT>(L2NormX.data_handle(), n_samples);
if (metric == raft::distance::DistanceType::L2Expanded ||
metric == raft::distance::DistanceType::L2SqrtExpanded) {
raft::linalg::rowNorm(L2NormX.data_handle(),
X.data_handle(),
X.extent(1),
X.extent(0),
raft::linalg::L2Norm,
true,
stream);
}
RAFT_LOG_DEBUG(
"Calling KMeans.fit with %d samples of input data and the initialized "
"cluster centers",
n_samples);
DataT priorClusteringCost = 0;
for (n_iter[0] = 1; n_iter[0] <= params.max_iter; ++n_iter[0]) {
RAFT_LOG_DEBUG(
"KMeans.fit: Iteration-%d: fitting the model using the initialized "
"cluster centers",
n_iter[0]);
auto centroids = raft::make_device_matrix_view<DataT, IndexT>(
centroidsRawData.data_handle(), n_clusters, n_features);
// computes minClusterAndDistance[0:n_samples) where
// minClusterAndDistance[i] is a <key, value> pair where
// 'key' is index to a sample in 'centroids' (index of the nearest
// centroid) and 'value' is the distance between the sample 'X[i]' and the
// 'centroid[key]'
detail::minClusterAndDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance.view(),
l2normx_view,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// Using TransformInputIteratorT to dereference an array of
// raft::KeyValuePair and converting them to just return the Key to be used
// in reduce_rows_by_key prims
detail::KeyValueIndexOp<IndexT, DataT> conversion_op;
cub::TransformInputIterator<IndexT,
detail::KeyValueIndexOp<IndexT, DataT>,
raft::KeyValuePair<IndexT, DataT>*>
itr(minClusterAndDistance.data_handle(), conversion_op);
update_centroids(handle,
X,
weight,
raft::make_device_matrix_view<const DataT, IndexT>(
centroidsRawData.data_handle(), n_clusters, n_features),
itr,
wtInCluster.view(),
newCentroids.view(),
workspace);
// compute the squared norm between the newCentroids and the original
// centroids, destructor releases the resource
auto sqrdNorm = raft::make_device_scalar(handle, DataT(0));
raft::linalg::mapThenSumReduce(sqrdNorm.data_handle(),
newCentroids.size(),
raft::sqdiff_op{},
stream,
centroids.data_handle(),
newCentroids.data_handle());
DataT sqrdNormError = 0;
raft::copy(&sqrdNormError, sqrdNorm.data_handle(), sqrdNorm.size(), stream);
raft::copy(
centroidsRawData.data_handle(), newCentroids.data_handle(), newCentroids.size(), stream);
bool done = false;
if (params.inertia_check) {
// calculate cluster cost phi_x(C)
detail::computeClusterCost(handle,
minClusterAndDistance.view(),
workspace,
raft::make_device_scalar_view(clusterCostD.data()),
raft::value_op{},
raft::add_op{});
DataT curClusteringCost = clusterCostD.value(stream);
ASSERT(curClusteringCost != (DataT)0.0,
"Too few points and centroids being found is getting 0 cost from "
"centers");
if (n_iter[0] > 1) {
DataT delta = curClusteringCost / priorClusteringCost;
if (delta > 1 - params.tol) done = true;
}
priorClusteringCost = curClusteringCost;
}
resource::sync_stream(handle, stream);
if (sqrdNormError < params.tol) done = true;
if (done) {
RAFT_LOG_DEBUG("Threshold triggered after %d iterations. Terminating early.", n_iter[0]);
break;
}
}
auto centroids = raft::make_device_matrix_view<DataT, IndexT>(
centroidsRawData.data_handle(), n_clusters, n_features);
detail::minClusterAndDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance.view(),
l2normx_view,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// TODO: add different templates for InType of binaryOp to avoid thrust transform
thrust::transform(resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
weight.data_handle(),
minClusterAndDistance.data_handle(),
[=] __device__(const raft::KeyValuePair<IndexT, DataT> kvp, DataT wt) {
raft::KeyValuePair<IndexT, DataT> res;
res.value = kvp.value * wt;
res.key = kvp.key;
return res;
});
// calculate cluster cost phi_x(C)
detail::computeClusterCost(handle,
minClusterAndDistance.view(),
workspace,
raft::make_device_scalar_view(clusterCostD.data()),
raft::value_op{},
raft::add_op{});
inertia[0] = clusterCostD.value(stream);
RAFT_LOG_DEBUG("KMeans.fit: completed after %d iterations with %f inertia[0] ",
n_iter[0] > params.max_iter ? n_iter[0] - 1 : n_iter[0],
inertia[0]);
}
/*
* @brief Selects 'n_clusters' samples from X using scalable kmeans++ algorithm.
* @note This is the algorithm described in
* "Scalable K-Means++", 2012, Bahman Bahmani, Benjamin Moseley,
* Andrea Vattani, Ravi Kumar, Sergei Vassilvitskii,
* https://arxiv.org/abs/1203.6402
* Scalable kmeans++ pseudocode
* 1: C = sample a point uniformly at random from X
* 2: psi = phi_X (C)
* 3: for O( log(psi) ) times do
* 4: C' = sample each point x in X independently with probability
* p_x = l * (d^2(x, C) / phi_X (C) )
* 5: C = C U C'
* 6: end for
* 7: For x in C, set w_x to be the number of points in X closer to x than any
* other point in C
* 8: Recluster the weighted points in C into k clusters
* TODO: Resizing is needed to use mdarray instead of rmm::device_uvector
*/
template <typename DataT, typename IndexT>
void initScalableKMeansPlusPlus(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroidsRawData,
rmm::device_uvector<char>& workspace)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("initScalableKMeansPlusPlus");
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
auto metric = params.metric;
raft::random::RngState rng(params.rng_state.seed, params.rng_state.type);
// <<<< Step-1 >>> : C <- sample a point uniformly at random from X
std::mt19937 gen(params.rng_state.seed);
std::uniform_int_distribution<> dis(0, n_samples - 1);
auto cIdx = dis(gen);
auto initialCentroid = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + cIdx * n_features, 1, n_features);
// flag the sample that is chosen as initial centroid
std::vector<uint8_t> h_isSampleCentroid(n_samples);
std::fill(h_isSampleCentroid.begin(), h_isSampleCentroid.end(), 0);
h_isSampleCentroid[cIdx] = 1;
// device buffer to flag the sample that is chosen as initial centroid
auto isSampleCentroid = raft::make_device_vector<uint8_t, IndexT>(handle, n_samples);
raft::copy(
isSampleCentroid.data_handle(), h_isSampleCentroid.data(), isSampleCentroid.size(), stream);
rmm::device_uvector<DataT> centroidsBuf(initialCentroid.size(), stream);
// reset buffer to store the chosen centroid
raft::copy(centroidsBuf.data(), initialCentroid.data_handle(), initialCentroid.size(), stream);
auto potentialCentroids = raft::make_device_matrix_view<DataT, IndexT>(
centroidsBuf.data(), initialCentroid.extent(0), initialCentroid.extent(1));
// <<< End of Step-1 >>>
// temporary buffer to store L2 norm of centroids or distance matrix,
// destructor releases the resource
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
// L2 norm of X: ||x||^2
auto L2NormX = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
if (metric == raft::distance::DistanceType::L2Expanded ||
metric == raft::distance::DistanceType::L2SqrtExpanded) {
raft::linalg::rowNorm(L2NormX.data_handle(),
X.data_handle(),
X.extent(1),
X.extent(0),
raft::linalg::L2Norm,
true,
stream);
}
auto minClusterDistanceVec = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
auto uniformRands = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
rmm::device_scalar<DataT> clusterCost(stream);
// <<< Step-2 >>>: psi <- phi_X (C)
detail::minClusterDistanceCompute<DataT, IndexT>(handle,
X,
potentialCentroids,
minClusterDistanceVec.view(),
L2NormX.view(),
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// compute partial cluster cost from the samples in rank
detail::computeClusterCost(handle,
minClusterDistanceVec.view(),
workspace,
raft::make_device_scalar_view(clusterCost.data()),
raft::identity_op{},
raft::add_op{});
auto psi = clusterCost.value(stream);
// <<< End of Step-2 >>>
// Scalable kmeans++ paper claims 8 rounds is sufficient
resource::sync_stream(handle, stream);
int niter = std::min(8, (int)ceil(log(psi)));
RAFT_LOG_DEBUG("KMeans||: psi = %g, log(psi) = %g, niter = %d ", psi, log(psi), niter);
// <<<< Step-3 >>> : for O( log(psi) ) times do
for (int iter = 0; iter < niter; ++iter) {
RAFT_LOG_DEBUG("KMeans|| - Iteration %d: # potential centroids sampled - %d",
iter,
potentialCentroids.extent(0));
detail::minClusterDistanceCompute<DataT, IndexT>(handle,
X,
potentialCentroids,
minClusterDistanceVec.view(),
L2NormX.view(),
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
detail::computeClusterCost(handle,
minClusterDistanceVec.view(),
workspace,
raft::make_device_scalar_view<DataT>(clusterCost.data()),
raft::identity_op{},
raft::add_op{});
psi = clusterCost.value(stream);
// <<<< Step-4 >>> : Sample each point x in X independently and identify new
// potentialCentroids
raft::random::uniform(
handle, rng, uniformRands.data_handle(), uniformRands.extent(0), (DataT)0, (DataT)1);
detail::SamplingOp<DataT, IndexT> select_op(psi,
params.oversampling_factor,
n_clusters,
uniformRands.data_handle(),
isSampleCentroid.data_handle());
rmm::device_uvector<DataT> CpRaw(0, stream);
detail::sampleCentroids<DataT, IndexT>(handle,
X,
minClusterDistanceVec.view(),
isSampleCentroid.view(),
select_op,
CpRaw,
workspace);
auto Cp = raft::make_device_matrix_view<DataT, IndexT>(
CpRaw.data(), CpRaw.size() / n_features, n_features);
/// <<<< End of Step-4 >>>>
/// <<<< Step-5 >>> : C = C U C'
// append the data in Cp to the buffer holding the potentialCentroids
centroidsBuf.resize(centroidsBuf.size() + Cp.size(), stream);
raft::copy(
centroidsBuf.data() + centroidsBuf.size() - Cp.size(), Cp.data_handle(), Cp.size(), stream);
IndexT tot_centroids = potentialCentroids.extent(0) + Cp.extent(0);
potentialCentroids =
raft::make_device_matrix_view<DataT, IndexT>(centroidsBuf.data(), tot_centroids, n_features);
/// <<<< End of Step-5 >>>
} /// <<<< Step-6 >>>
RAFT_LOG_DEBUG("KMeans||: total # potential centroids sampled - %d",
potentialCentroids.extent(0));
if ((int)potentialCentroids.extent(0) > n_clusters) {
// <<< Step-7 >>>: For x in C, set w_x to be the number of pts closest to X
// temporary buffer to store the sample count per cluster, destructor
// releases the resource
auto weight = raft::make_device_vector<DataT, IndexT>(handle, potentialCentroids.extent(0));
detail::countSamplesInCluster<DataT, IndexT>(
handle, params, X, L2NormX.view(), potentialCentroids, workspace, weight.view());
// <<< end of Step-7 >>>
// Step-8: Recluster the weighted points in C into k clusters
detail::kmeansPlusPlus<DataT, IndexT>(
handle, params, potentialCentroids, centroidsRawData, workspace);
auto inertia = make_host_scalar<DataT>(0);
auto n_iter = make_host_scalar<IndexT>(0);
KMeansParams default_params;
default_params.n_clusters = params.n_clusters;
detail::kmeans_fit_main<DataT, IndexT>(handle,
default_params,
potentialCentroids,
weight.view(),
centroidsRawData,
inertia.view(),
n_iter.view(),
workspace);
} else if ((int)potentialCentroids.extent(0) < n_clusters) {
// supplement with random
auto n_random_clusters = n_clusters - potentialCentroids.extent(0);
RAFT_LOG_DEBUG(
"[Warning!] KMeans||: found fewer than %d centroids during "
"initialization (found %d centroids, remaining %d centroids will be "
"chosen randomly from input samples)",
n_clusters,
potentialCentroids.extent(0),
n_random_clusters);
// generate `n_random_clusters` centroids
KMeansParams rand_params;
rand_params.init = KMeansParams::InitMethod::Random;
rand_params.n_clusters = n_random_clusters;
initRandom<DataT, IndexT>(handle, rand_params, X, centroidsRawData);
// copy centroids generated during kmeans|| iteration to the buffer
raft::copy(centroidsRawData.data_handle() + n_random_clusters * n_features,
potentialCentroids.data_handle(),
potentialCentroids.size(),
stream);
} else {
// found the required n_clusters
raft::copy(centroidsRawData.data_handle(),
potentialCentroids.data_handle(),
potentialCentroids.size(),
stream);
}
}
/**
* @brief Find clusters with k-means algorithm.
* Initial centroids are chosen with k-means++ algorithm. Empty
* clusters are reinitialized by choosing new centroids with
* k-means++ algorithm.
* @tparam DataT the type of data used for weights, distances.
* @tparam IndexT the type of data used for indexing.
* @param[in] handle The raft handle.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. It must be noted
* that the data must be in row-major format and stored in device accessible
* location.
* @param[in] n_samples Number of samples in the input X.
* @param[in] n_features Number of features or the dimensions of each
* sample.
* @param[in] sample_weight Optional weights for each observation in X.
* @param[inout] centroids [in] When init is InitMethod::Array, use
* centroids as the initial cluster centers
* [out] Otherwise, generated centroids from the
* kmeans algorithm is stored at the address pointed by 'centroids'.
* @param[out] inertia Sum of squared distances of samples to their
* closest cluster center.
* @param[out] n_iter Number of iterations run.
*/
template <typename DataT, typename IndexT>
void kmeans_fit(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("kmeans_fit");
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
cudaStream_t stream = resource::get_cuda_stream(handle);
// Check that parameters are valid
if (sample_weight.has_value())
RAFT_EXPECTS(sample_weight.value().extent(0) == n_samples,
"invalid parameter (sample_weight!=n_samples)");
RAFT_EXPECTS(n_clusters > 0, "invalid parameter (n_clusters<=0)");
RAFT_EXPECTS(params.tol > 0, "invalid parameter (tol<=0)");
RAFT_EXPECTS(params.oversampling_factor >= 0, "invalid parameter (oversampling_factor<0)");
RAFT_EXPECTS((int)centroids.extent(0) == params.n_clusters,
"invalid parameter (centroids.extent(0) != n_clusters)");
RAFT_EXPECTS(centroids.extent(1) == n_features,
"invalid parameter (centroids.extent(1) != n_features)");
// Display a message if the batch size is smaller than n_samples but will be ignored
if (params.batch_samples < (int)n_samples &&
(params.metric == raft::distance::DistanceType::L2Expanded ||
params.metric == raft::distance::DistanceType::L2SqrtExpanded)) {
RAFT_LOG_DEBUG(
"batch_samples=%d was passed, but batch_samples=%d will be used (reason: "
"batch_samples has no impact on the memory footprint when FusedL2NN can be used)",
params.batch_samples,
(int)n_samples);
}
// Display a message if batch_centroids is set and a fusedL2NN-compatible metric is used
if (params.batch_centroids != 0 && params.batch_centroids != params.n_clusters &&
(params.metric == raft::distance::DistanceType::L2Expanded ||
params.metric == raft::distance::DistanceType::L2SqrtExpanded)) {
RAFT_LOG_DEBUG(
"batch_centroids=%d was passed, but batch_centroids=%d will be used (reason: "
"batch_centroids has no impact on the memory footprint when FusedL2NN can be used)",
params.batch_centroids,
params.n_clusters);
}
logger::get(RAFT_NAME).set_level(params.verbosity);
// Allocate memory
rmm::device_uvector<char> workspace(0, stream);
auto weight = raft::make_device_vector<DataT>(handle, n_samples);
if (sample_weight.has_value())
raft::copy(weight.data_handle(), sample_weight.value().data_handle(), n_samples, stream);
else
thrust::fill(resource::get_thrust_policy(handle),
weight.data_handle(),
weight.data_handle() + weight.size(),
1);
// check if weights sum up to n_samples
checkWeight<DataT>(handle, weight.view(), workspace);
auto centroidsRawData = raft::make_device_matrix<DataT, IndexT>(handle, n_clusters, n_features);
auto n_init = params.n_init;
if (params.init == KMeansParams::InitMethod::Array && n_init != 1) {
RAFT_LOG_DEBUG(
"Explicit initial center position passed: performing only one init in "
"k-means instead of n_init=%d",
n_init);
n_init = 1;
}
std::mt19937 gen(params.rng_state.seed);
inertia[0] = std::numeric_limits<DataT>::max();
for (auto seed_iter = 0; seed_iter < n_init; ++seed_iter) {
KMeansParams iter_params = params;
iter_params.rng_state.seed = gen();
DataT iter_inertia = std::numeric_limits<DataT>::max();
IndexT n_current_iter = 0;
if (iter_params.init == KMeansParams::InitMethod::Random) {
// initializing with random samples from input dataset
RAFT_LOG_DEBUG(
"KMeans.fit (Iteration-%d/%d): initialize cluster centers by "
"randomly choosing from the "
"input data.",
seed_iter + 1,
n_init);
initRandom<DataT, IndexT>(handle, iter_params, X, centroidsRawData.view());
} else if (iter_params.init == KMeansParams::InitMethod::KMeansPlusPlus) {
// default method to initialize is kmeans++
RAFT_LOG_DEBUG(
"KMeans.fit (Iteration-%d/%d): initialize cluster centers using "
"k-means++ algorithm.",
seed_iter + 1,
n_init);
if (iter_params.oversampling_factor == 0)
detail::kmeansPlusPlus<DataT, IndexT>(
handle, iter_params, X, centroidsRawData.view(), workspace);
else
detail::initScalableKMeansPlusPlus<DataT, IndexT>(
handle, iter_params, X, centroidsRawData.view(), workspace);
} else if (iter_params.init == KMeansParams::InitMethod::Array) {
RAFT_LOG_DEBUG(
"KMeans.fit (Iteration-%d/%d): initialize cluster centers from "
"the ndarray array input "
"passed to init argument.",
seed_iter + 1,
n_init);
raft::copy(
centroidsRawData.data_handle(), centroids.data_handle(), n_clusters * n_features, stream);
} else {
THROW("unknown initialization method to select initial centers");
}
detail::kmeans_fit_main<DataT, IndexT>(handle,
iter_params,
X,
weight.view(),
centroidsRawData.view(),
raft::make_host_scalar_view<DataT>(&iter_inertia),
raft::make_host_scalar_view<IndexT>(&n_current_iter),
workspace);
if (iter_inertia < inertia[0]) {
inertia[0] = iter_inertia;
n_iter[0] = n_current_iter;
raft::copy(
centroids.data_handle(), centroidsRawData.data_handle(), n_clusters * n_features, stream);
}
RAFT_LOG_DEBUG("KMeans.fit after iteration-%d/%d: inertia - %f, n_iter[0] - %d",
seed_iter + 1,
n_init,
inertia[0],
n_iter[0]);
}
RAFT_LOG_DEBUG("KMeans.fit: async call returned (fit could still be running on the device)");
}
template <typename DataT, typename IndexT = int>
void kmeans_fit(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT& inertia,
IndexT& n_iter)
{
auto XView = raft::make_device_matrix_view<const DataT, IndexT>(X, n_samples, n_features);
auto centroidsView =
raft::make_device_matrix_view<DataT, IndexT>(centroids, params.n_clusters, n_features);
std::optional<raft::device_vector_view<const DataT>> sample_weightView = std::nullopt;
if (sample_weight)
sample_weightView =
raft::make_device_vector_view<const DataT, IndexT>(sample_weight, n_samples);
auto inertiaView = raft::make_host_scalar_view(&inertia);
auto n_iterView = raft::make_host_scalar_view(&n_iter);
detail::kmeans_fit<DataT, IndexT>(
handle, params, XView, sample_weightView, centroidsView, inertiaView, n_iterView);
}
template <typename DataT, typename IndexT>
void kmeans_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
bool normalize_weight,
raft::host_scalar_view<DataT> inertia)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("kmeans_predict");
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
cudaStream_t stream = resource::get_cuda_stream(handle);
// Check that parameters are valid
if (sample_weight.has_value())
RAFT_EXPECTS(sample_weight.value().extent(0) == n_samples,
"invalid parameter (sample_weight!=n_samples)");
RAFT_EXPECTS(params.n_clusters > 0, "invalid parameter (n_clusters<=0)");
RAFT_EXPECTS(params.tol > 0, "invalid parameter (tol<=0)");
RAFT_EXPECTS(params.oversampling_factor >= 0, "invalid parameter (oversampling_factor<0)");
RAFT_EXPECTS((int)centroids.extent(0) == params.n_clusters,
"invalid parameter (centroids.extent(0) != n_clusters)");
RAFT_EXPECTS(centroids.extent(1) == n_features,
"invalid parameter (centroids.extent(1) != n_features)");
logger::get(RAFT_NAME).set_level(params.verbosity);
auto metric = params.metric;
// Allocate memory
// Device-accessible allocation of expandable storage used as temporary buffers
rmm::device_uvector<char> workspace(0, stream);
auto weight = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
if (sample_weight.has_value())
raft::copy(weight.data_handle(), sample_weight.value().data_handle(), n_samples, stream);
else
thrust::fill(resource::get_thrust_policy(handle),
weight.data_handle(),
weight.data_handle() + weight.size(),
1);
// check if weights sum up to n_samples
if (normalize_weight) checkWeight(handle, weight.view(), workspace);
auto minClusterAndDistance =
raft::make_device_vector<raft::KeyValuePair<IndexT, DataT>, IndexT>(handle, n_samples);
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
// L2 norm of X: ||x||^2
auto L2NormX = raft::make_device_vector<DataT, IndexT>(handle, n_samples);
if (metric == raft::distance::DistanceType::L2Expanded ||
metric == raft::distance::DistanceType::L2SqrtExpanded) {
raft::linalg::rowNorm(L2NormX.data_handle(),
X.data_handle(),
X.extent(1),
X.extent(0),
raft::linalg::L2Norm,
true,
stream);
}
// computes minClusterAndDistance[0:n_samples) where minClusterAndDistance[i]
// is a <key, value> pair where
// 'key' is index to a sample in 'centroids' (index of the nearest
// centroid) and 'value' is the distance between the sample 'X[i]' and the
// 'centroid[key]'
auto l2normx_view =
raft::make_device_vector_view<const DataT, IndexT>(L2NormX.data_handle(), n_samples);
detail::minClusterAndDistanceCompute<DataT, IndexT>(handle,
X,
centroids,
minClusterAndDistance.view(),
l2normx_view,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// calculate cluster cost phi_x(C)
rmm::device_scalar<DataT> clusterCostD(stream);
// TODO: add different templates for InType of binaryOp to avoid thrust transform
thrust::transform(resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
weight.data_handle(),
minClusterAndDistance.data_handle(),
[=] __device__(const raft::KeyValuePair<IndexT, DataT> kvp, DataT wt) {
raft::KeyValuePair<IndexT, DataT> res;
res.value = kvp.value * wt;
res.key = kvp.key;
return res;
});
detail::computeClusterCost(handle,
minClusterAndDistance.view(),
workspace,
raft::make_device_scalar_view(clusterCostD.data()),
raft::value_op{},
raft::add_op{});
thrust::transform(resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
labels.data_handle(),
raft::key_op{});
inertia[0] = clusterCostD.value(stream);
}
template <typename DataT, typename IndexT = int>
void kmeans_predict(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
IndexT* labels,
bool normalize_weight,
DataT& inertia)
{
auto XView = raft::make_device_matrix_view<const DataT, IndexT>(X, n_samples, n_features);
auto centroidsView =
raft::make_device_matrix_view<const DataT, IndexT>(centroids, params.n_clusters, n_features);
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weightView{std::nullopt};
if (sample_weight)
sample_weightView.emplace(
raft::make_device_vector_view<const DataT, IndexT>(sample_weight, n_samples));
auto labelsView = raft::make_device_vector_view<IndexT, IndexT>(labels, n_samples);
auto inertiaView = raft::make_host_scalar_view(&inertia);
detail::kmeans_predict<DataT, IndexT>(handle,
params,
XView,
sample_weightView,
centroidsView,
labelsView,
normalize_weight,
inertiaView);
}
template <typename DataT, typename IndexT = int>
void kmeans_fit_predict(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weight,
std::optional<raft::device_matrix_view<DataT, IndexT>> centroids,
raft::device_vector_view<IndexT, IndexT> labels,
raft::host_scalar_view<DataT> inertia,
raft::host_scalar_view<IndexT> n_iter)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("kmeans_fit_predict");
if (!centroids.has_value()) {
auto n_features = X.extent(1);
auto centroids_matrix =
raft::make_device_matrix<DataT, IndexT>(handle, params.n_clusters, n_features);
detail::kmeans_fit<DataT, IndexT>(
handle, params, X, sample_weight, centroids_matrix.view(), inertia, n_iter);
detail::kmeans_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids_matrix.view(), labels, true, inertia);
} else {
detail::kmeans_fit<DataT, IndexT>(
handle, params, X, sample_weight, centroids.value(), inertia, n_iter);
detail::kmeans_predict<DataT, IndexT>(
handle, params, X, sample_weight, centroids.value(), labels, true, inertia);
}
}
template <typename DataT, typename IndexT = int>
void kmeans_fit_predict(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* sample_weight,
DataT* centroids,
IndexT n_samples,
IndexT n_features,
IndexT* labels,
DataT& inertia,
IndexT& n_iter)
{
auto XView = raft::make_device_matrix_view<const DataT, IndexT>(X, n_samples, n_features);
std::optional<raft::device_vector_view<const DataT, IndexT>> sample_weightView{std::nullopt};
if (sample_weight)
sample_weightView.emplace(
raft::make_device_vector_view<const DataT, IndexT>(sample_weight, n_samples));
std::optional<raft::device_matrix_view<DataT, IndexT>> centroidsView{std::nullopt};
if (centroids)
centroidsView.emplace(
raft::make_device_matrix_view<DataT, IndexT>(centroids, params.n_clusters, n_features));
auto labelsView = raft::make_device_vector_view<IndexT, IndexT>(labels, n_samples);
auto inertiaView = raft::make_host_scalar_view(&inertia);
auto n_iterView = raft::make_host_scalar_view(&n_iter);
detail::kmeans_fit_predict<DataT, IndexT>(
handle, params, XView, sample_weightView, centroidsView, labelsView, inertiaView, n_iterView);
}
/**
* @brief Transform X to a cluster-distance space.
*
* @param[in] handle The handle to the cuML library context that
* manages the CUDA resources.
* @param[in] params Parameters for KMeans model.
* @param[in] X Training instances to cluster. The data must
* be in row-major format
* @param[in] centroids Cluster centroids. The data must be in row-major format.
* @param[out] X_new X transformed in the new space..
*/
template <typename DataT, typename IndexT = int>
void kmeans_transform(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT> X,
raft::device_matrix_view<const DataT> centroids,
raft::device_matrix_view<DataT> X_new)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("kmeans_transform");
logger::get(RAFT_NAME).set_level(params.verbosity);
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = params.n_clusters;
auto metric = params.metric;
// Device-accessible allocation of expandable storage used as temporary buffers
rmm::device_uvector<char> workspace(0, stream);
auto dataBatchSize = getDataBatchSize(params.batch_samples, n_samples);
// tile over the input data and calculate distance matrix [n_samples x
// n_clusters]
for (IndexT dIdx = 0; dIdx < (IndexT)n_samples; dIdx += dataBatchSize) {
// # of samples for the current batch
auto ns = std::min(static_cast<IndexT>(dataBatchSize), static_cast<IndexT>(n_samples - dIdx));
// datasetView [ns x n_features] - view representing the current batch of
// input dataset
auto datasetView = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + n_features * dIdx, ns, n_features);
// pairwiseDistanceView [ns x n_clusters]
auto pairwiseDistanceView = raft::make_device_matrix_view<DataT, IndexT>(
X_new.data_handle() + n_clusters * dIdx, ns, n_clusters);
// calculate pairwise distance between cluster centroids and current batch
// of input dataset
pairwise_distance_kmeans<DataT, IndexT>(
handle, datasetView, centroids, pairwiseDistanceView, workspace, metric);
}
}
template <typename DataT, typename IndexT = int>
void kmeans_transform(raft::resources const& handle,
const KMeansParams& params,
const DataT* X,
const DataT* centroids,
IndexT n_samples,
IndexT n_features,
DataT* X_new)
{
auto XView = raft::make_device_matrix_view<const DataT, IndexT>(X, n_samples, n_features);
auto centroidsView =
raft::make_device_matrix_view<const DataT, IndexT>(centroids, params.n_clusters, n_features);
auto X_newView = raft::make_device_matrix_view<DataT, IndexT>(X_new, n_samples, n_features);
detail::kmeans_transform<DataT, IndexT>(handle, params, XView, centroidsView, X_newView);
}
} // namespace detail
} // namespace cluster
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/single_linkage.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <raft/cluster/detail/agglomerative.cuh>
#include <raft/cluster/detail/connectivities.cuh>
#include <raft/cluster/detail/mst.cuh>
#include <raft/cluster/single_linkage_types.hpp>
namespace raft::cluster::detail {
static const size_t EMPTY = 0;
/**
* Single-linkage clustering, capable of constructing a KNN graph to
* scale the algorithm beyond the n^2 memory consumption of implementations
* that use the fully-connected graph of pairwise distances by connecting
* a knn graph when k is not large enough to connect it.
* @tparam value_idx
* @tparam value_t
* @tparam dist_type method to use for constructing connectivities graph
* @param[in] handle raft handle
* @param[in] X dense input matrix in row-major layout
* @param[in] m number of rows in X
* @param[in] n number of columns in X
* @param[in] metric distance metrix to use when constructing connectivities graph
* @param[out] out struct containing output dendrogram and cluster assignments
* @param[in] c a constant used when constructing connectivities from knn graph. Allows the indirect
control
* of k. The algorithm will set `k = log(n) + c`
* @param[in] n_clusters number of clusters to assign data samples
*/
template <typename value_idx, typename value_t, LinkageDistance dist_type>
void single_linkage(raft::resources const& handle,
const value_t* X,
size_t m,
size_t n,
raft::distance::DistanceType metric,
linkage_output<value_idx>* out,
int c,
size_t n_clusters)
{
ASSERT(n_clusters <= m, "n_clusters must be less than or equal to the number of data points");
auto stream = resource::get_cuda_stream(handle);
rmm::device_uvector<value_idx> indptr(EMPTY, stream);
rmm::device_uvector<value_idx> indices(EMPTY, stream);
rmm::device_uvector<value_t> pw_dists(EMPTY, stream);
/**
* 1. Construct distance graph
*/
detail::get_distance_graph<value_idx, value_t, dist_type>(
handle, X, m, n, metric, indptr, indices, pw_dists, c);
rmm::device_uvector<value_idx> mst_rows(m - 1, stream);
rmm::device_uvector<value_idx> mst_cols(m - 1, stream);
rmm::device_uvector<value_t> mst_data(m - 1, stream);
/**
* 2. Construct MST, sorted by weights
*/
rmm::device_uvector<value_idx> color(m, stream);
raft::sparse::neighbors::FixConnectivitiesRedOp<value_idx, value_t> op(m);
detail::build_sorted_mst<value_idx, value_t>(handle,
X,
indptr.data(),
indices.data(),
pw_dists.data(),
m,
n,
mst_rows.data(),
mst_cols.data(),
mst_data.data(),
color.data(),
indices.size(),
op,
metric);
pw_dists.release();
/**
* Perform hierarchical labeling
*/
size_t n_edges = mst_rows.size();
rmm::device_uvector<value_t> out_delta(n_edges, stream);
rmm::device_uvector<value_idx> out_size(n_edges, stream);
// Create dendrogram
detail::build_dendrogram_host<value_idx, value_t>(handle,
mst_rows.data(),
mst_cols.data(),
mst_data.data(),
n_edges,
out->children,
out_delta.data(),
out_size.data());
detail::extract_flattened_clusters(handle, out->labels, out->children, n_clusters, m);
out->m = m;
out->n_clusters = n_clusters;
out->n_leaves = m;
out->n_connected_components = 1;
}
}; // namespace raft::cluster::detail | 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/kmeans_common.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <ctime>
#include <optional>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <random>
#include <cub/cub.cuh>
#include <cuda.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <raft/cluster/kmeans_types.hpp>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/kvp.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/mdarray.hpp>
#include <raft/core/operators.hpp>
#include <raft/core/resources.hpp>
#include <raft/distance/distance.cuh>
#include <raft/distance/distance_types.hpp>
#include <raft/distance/fused_l2_nn.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/random/permute.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cuda_utils.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace cluster {
namespace detail {
template <typename DataT, typename IndexT>
struct SamplingOp {
DataT* rnd;
uint8_t* flag;
DataT cluster_cost;
double oversampling_factor;
IndexT n_clusters;
CUB_RUNTIME_FUNCTION __forceinline__
SamplingOp(DataT c, double l, IndexT k, DataT* rand, uint8_t* ptr)
: cluster_cost(c), oversampling_factor(l), n_clusters(k), rnd(rand), flag(ptr)
{
}
__host__ __device__ __forceinline__ bool operator()(
const raft::KeyValuePair<ptrdiff_t, DataT>& a) const
{
DataT prob_threshold = (DataT)rnd[a.key];
DataT prob_x = ((oversampling_factor * n_clusters * a.value) / cluster_cost);
return !flag[a.key] && (prob_x > prob_threshold);
}
};
template <typename IndexT, typename DataT>
struct KeyValueIndexOp {
__host__ __device__ __forceinline__ IndexT
operator()(const raft::KeyValuePair<IndexT, DataT>& a) const
{
return a.key;
}
};
// Computes the intensity histogram from a sequence of labels
template <typename SampleIteratorT, typename CounterT, typename IndexT>
void countLabels(raft::resources const& handle,
SampleIteratorT labels,
CounterT* count,
IndexT n_samples,
IndexT n_clusters,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
// CUB::DeviceHistogram requires a signed index type
typedef typename std::make_signed_t<IndexT> CubIndexT;
CubIndexT num_levels = n_clusters + 1;
CubIndexT lower_level = 0;
CubIndexT upper_level = n_clusters;
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(nullptr,
temp_storage_bytes,
labels,
count,
num_levels,
lower_level,
upper_level,
static_cast<CubIndexT>(n_samples),
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceHistogram::HistogramEven(workspace.data(),
temp_storage_bytes,
labels,
count,
num_levels,
lower_level,
upper_level,
static_cast<CubIndexT>(n_samples),
stream));
}
template <typename DataT, typename IndexT>
void checkWeight(raft::resources const& handle,
raft::device_vector_view<DataT, IndexT> weight,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto wt_aggr = raft::make_device_scalar<DataT>(handle, 0);
auto n_samples = weight.extent(0);
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceReduce::Sum(
nullptr, temp_storage_bytes, weight.data_handle(), wt_aggr.data_handle(), n_samples, stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceReduce::Sum(workspace.data(),
temp_storage_bytes,
weight.data_handle(),
wt_aggr.data_handle(),
n_samples,
stream));
DataT wt_sum = 0;
raft::copy(&wt_sum, wt_aggr.data_handle(), 1, stream);
resource::sync_stream(handle, stream);
if (wt_sum != n_samples) {
RAFT_LOG_DEBUG(
"[Warning!] KMeans: normalizing the user provided sample weight to "
"sum up to %d samples",
n_samples);
auto scale = static_cast<DataT>(n_samples) / wt_sum;
raft::linalg::unaryOp(weight.data_handle(),
weight.data_handle(),
n_samples,
raft::mul_const_op<DataT>{scale},
stream);
}
}
template <typename IndexT>
IndexT getDataBatchSize(int batch_samples, IndexT n_samples)
{
auto minVal = std::min(static_cast<IndexT>(batch_samples), n_samples);
return (minVal == 0) ? n_samples : minVal;
}
template <typename IndexT>
IndexT getCentroidsBatchSize(int batch_centroids, IndexT n_local_clusters)
{
auto minVal = std::min(static_cast<IndexT>(batch_centroids), n_local_clusters);
return (minVal == 0) ? n_local_clusters : minVal;
}
template <typename InputT,
typename OutputT,
typename MainOpT,
typename ReductionOpT,
typename IndexT = int>
void computeClusterCost(raft::resources const& handle,
raft::device_vector_view<InputT, IndexT> minClusterDistance,
rmm::device_uvector<char>& workspace,
raft::device_scalar_view<OutputT> clusterCost,
MainOpT main_op,
ReductionOpT reduction_op)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
cub::TransformInputIterator<OutputT, MainOpT, InputT*> itr(minClusterDistance.data_handle(),
main_op);
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceReduce::Reduce(nullptr,
temp_storage_bytes,
itr,
clusterCost.data_handle(),
minClusterDistance.size(),
reduction_op,
OutputT(),
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceReduce::Reduce(workspace.data(),
temp_storage_bytes,
itr,
clusterCost.data_handle(),
minClusterDistance.size(),
reduction_op,
OutputT(),
stream));
}
template <typename DataT, typename IndexT>
void sampleCentroids(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<uint8_t, IndexT> isSampleCentroid,
SamplingOp<DataT, IndexT>& select_op,
rmm::device_uvector<DataT>& inRankCp,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_local_samples = X.extent(0);
auto n_features = X.extent(1);
auto nSelected = raft::make_device_scalar<IndexT>(handle, 0);
cub::ArgIndexInputIterator<DataT*> ip_itr(minClusterDistance.data_handle());
auto sampledMinClusterDistance =
raft::make_device_vector<raft::KeyValuePair<ptrdiff_t, DataT>, IndexT>(handle, n_local_samples);
size_t temp_storage_bytes = 0;
RAFT_CUDA_TRY(cub::DeviceSelect::If(nullptr,
temp_storage_bytes,
ip_itr,
sampledMinClusterDistance.data_handle(),
nSelected.data_handle(),
n_local_samples,
select_op,
stream));
workspace.resize(temp_storage_bytes, stream);
RAFT_CUDA_TRY(cub::DeviceSelect::If(workspace.data(),
temp_storage_bytes,
ip_itr,
sampledMinClusterDistance.data_handle(),
nSelected.data_handle(),
n_local_samples,
select_op,
stream));
IndexT nPtsSampledInRank = 0;
raft::copy(&nPtsSampledInRank, nSelected.data_handle(), 1, stream);
resource::sync_stream(handle, stream);
uint8_t* rawPtr_isSampleCentroid = isSampleCentroid.data_handle();
thrust::for_each_n(resource::get_thrust_policy(handle),
sampledMinClusterDistance.data_handle(),
nPtsSampledInRank,
[=] __device__(raft::KeyValuePair<ptrdiff_t, DataT> val) {
rawPtr_isSampleCentroid[val.key] = 1;
});
inRankCp.resize(nPtsSampledInRank * n_features, stream);
raft::matrix::gather((DataT*)X.data_handle(),
X.extent(1),
X.extent(0),
sampledMinClusterDistance.data_handle(),
nPtsSampledInRank,
inRankCp.data(),
raft::key_op{},
stream);
}
// calculate pairwise distance between 'dataset[n x d]' and 'centroids[k x d]',
// result will be stored in 'pairwiseDistance[n x k]'
template <typename DataT, typename IndexT>
void pairwise_distance_kmeans(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_matrix_view<DataT, IndexT> pairwiseDistance,
rmm::device_uvector<char>& workspace,
raft::distance::DistanceType metric)
{
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = centroids.extent(0);
ASSERT(X.extent(1) == centroids.extent(1),
"# features in dataset and centroids are different (must be same)");
raft::distance::pairwise_distance(handle,
X.data_handle(),
centroids.data_handle(),
pairwiseDistance.data_handle(),
n_samples,
n_clusters,
n_features,
workspace,
metric);
}
// shuffle and randomly select 'n_samples_to_gather' from input 'in' and stores
// in 'out' does not modify the input
template <typename DataT, typename IndexT>
void shuffleAndGather(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> in,
raft::device_matrix_view<DataT, IndexT> out,
uint32_t n_samples_to_gather,
uint64_t seed)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = in.extent(0);
auto n_features = in.extent(1);
auto indices = raft::make_device_vector<IndexT, IndexT>(handle, n_samples);
// shuffle indices on device
raft::random::permute<DataT, IndexT, IndexT>(indices.data_handle(),
nullptr,
nullptr,
(IndexT)in.extent(1),
(IndexT)in.extent(0),
true,
stream);
raft::matrix::gather((DataT*)in.data_handle(),
in.extent(1),
in.extent(0),
indices.data_handle(),
static_cast<IndexT>(n_samples_to_gather),
out.data_handle(),
stream);
}
// Calculates a <key, value> pair for every sample in input 'X' where key is an
// index to an sample in 'centroids' (index of the nearest centroid) and 'value'
// is the distance between the sample and the 'centroid[key]'
template <typename DataT, typename IndexT>
void minClusterAndDistanceCompute(
raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<const DataT, IndexT> centroids,
raft::device_vector_view<raft::KeyValuePair<IndexT, DataT>, IndexT> minClusterAndDistance,
raft::device_vector_view<const DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
raft::distance::DistanceType metric,
int batch_samples,
int batch_centroids,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = centroids.extent(0);
// todo(lsugy): change batch size computation when using fusedL2NN!
bool is_fused = metric == raft::distance::DistanceType::L2Expanded ||
metric == raft::distance::DistanceType::L2SqrtExpanded;
auto dataBatchSize = is_fused ? (IndexT)n_samples : getDataBatchSize(batch_samples, n_samples);
auto centroidsBatchSize = getCentroidsBatchSize(batch_centroids, n_clusters);
if (is_fused) {
L2NormBuf_OR_DistBuf.resize(n_clusters, stream);
raft::linalg::rowNorm(L2NormBuf_OR_DistBuf.data(),
centroids.data_handle(),
centroids.extent(1),
centroids.extent(0),
raft::linalg::L2Norm,
true,
stream);
} else {
// TODO: Unless pool allocator is used, passing in a workspace for this
// isn't really increasing performance because this needs to do a re-allocation
// anyways. ref https://github.com/rapidsai/raft/issues/930
L2NormBuf_OR_DistBuf.resize(dataBatchSize * centroidsBatchSize, stream);
}
// Note - pairwiseDistance and centroidsNorm share the same buffer
// centroidsNorm [n_clusters] - tensor wrapper around centroids L2 Norm
auto centroidsNorm =
raft::make_device_vector_view<DataT, IndexT>(L2NormBuf_OR_DistBuf.data(), n_clusters);
// pairwiseDistance[ns x nc] - tensor wrapper around the distance buffer
auto pairwiseDistance = raft::make_device_matrix_view<DataT, IndexT>(
L2NormBuf_OR_DistBuf.data(), dataBatchSize, centroidsBatchSize);
raft::KeyValuePair<IndexT, DataT> initial_value(0, std::numeric_limits<DataT>::max());
thrust::fill(resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
initial_value);
// tile over the input dataset
for (IndexT dIdx = 0; dIdx < n_samples; dIdx += dataBatchSize) {
// # of samples for the current batch
auto ns = std::min((IndexT)dataBatchSize, n_samples - dIdx);
// datasetView [ns x n_features] - view representing the current batch of
// input dataset
auto datasetView = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + (dIdx * n_features), ns, n_features);
// minClusterAndDistanceView [ns x n_clusters]
auto minClusterAndDistanceView =
raft::make_device_vector_view<raft::KeyValuePair<IndexT, DataT>, IndexT>(
minClusterAndDistance.data_handle() + dIdx, ns);
auto L2NormXView =
raft::make_device_vector_view<const DataT, IndexT>(L2NormX.data_handle() + dIdx, ns);
if (is_fused) {
workspace.resize((sizeof(int)) * ns, stream);
// todo(lsugy): remove cIdx
raft::distance::fusedL2NNMinReduce<DataT, raft::KeyValuePair<IndexT, DataT>, IndexT>(
minClusterAndDistanceView.data_handle(),
datasetView.data_handle(),
centroids.data_handle(),
L2NormXView.data_handle(),
centroidsNorm.data_handle(),
ns,
n_clusters,
n_features,
(void*)workspace.data(),
metric != raft::distance::DistanceType::L2Expanded,
false,
stream);
} else {
// tile over the centroids
for (IndexT cIdx = 0; cIdx < n_clusters; cIdx += centroidsBatchSize) {
// # of centroids for the current batch
auto nc = std::min((IndexT)centroidsBatchSize, n_clusters - cIdx);
// centroidsView [nc x n_features] - view representing the current batch
// of centroids
auto centroidsView = raft::make_device_matrix_view<const DataT, IndexT>(
centroids.data_handle() + (cIdx * n_features), nc, n_features);
// pairwiseDistanceView [ns x nc] - view representing the pairwise
// distance for current batch
auto pairwiseDistanceView =
raft::make_device_matrix_view<DataT, IndexT>(pairwiseDistance.data_handle(), ns, nc);
// calculate pairwise distance between current tile of cluster centroids
// and input dataset
pairwise_distance_kmeans<DataT, IndexT>(
handle, datasetView, centroidsView, pairwiseDistanceView, workspace, metric);
// argmin reduction returning <index, value> pair
// calculates the closest centroid and the distance to the closest
// centroid
raft::linalg::coalescedReduction(
minClusterAndDistanceView.data_handle(),
pairwiseDistanceView.data_handle(),
pairwiseDistanceView.extent(1),
pairwiseDistanceView.extent(0),
initial_value,
stream,
true,
[=] __device__(const DataT val, const IndexT i) {
raft::KeyValuePair<IndexT, DataT> pair;
pair.key = cIdx + i;
pair.value = val;
return pair;
},
raft::argmin_op{},
raft::identity_op{});
}
}
}
}
template <typename DataT, typename IndexT>
void minClusterDistanceCompute(raft::resources const& handle,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_matrix_view<DataT, IndexT> centroids,
raft::device_vector_view<DataT, IndexT> minClusterDistance,
raft::device_vector_view<DataT, IndexT> L2NormX,
rmm::device_uvector<DataT>& L2NormBuf_OR_DistBuf,
raft::distance::DistanceType metric,
int batch_samples,
int batch_centroids,
rmm::device_uvector<char>& workspace)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = centroids.extent(0);
bool is_fused = metric == raft::distance::DistanceType::L2Expanded ||
metric == raft::distance::DistanceType::L2SqrtExpanded;
auto dataBatchSize = is_fused ? (IndexT)n_samples : getDataBatchSize(batch_samples, n_samples);
auto centroidsBatchSize = getCentroidsBatchSize(batch_centroids, n_clusters);
if (is_fused) {
L2NormBuf_OR_DistBuf.resize(n_clusters, stream);
raft::linalg::rowNorm(L2NormBuf_OR_DistBuf.data(),
centroids.data_handle(),
centroids.extent(1),
centroids.extent(0),
raft::linalg::L2Norm,
true,
stream);
} else {
L2NormBuf_OR_DistBuf.resize(dataBatchSize * centroidsBatchSize, stream);
}
// Note - pairwiseDistance and centroidsNorm share the same buffer
// centroidsNorm [n_clusters] - tensor wrapper around centroids L2 Norm
auto centroidsNorm =
raft::make_device_vector_view<DataT, IndexT>(L2NormBuf_OR_DistBuf.data(), n_clusters);
// pairwiseDistance[ns x nc] - tensor wrapper around the distance buffer
auto pairwiseDistance = raft::make_device_matrix_view<DataT, IndexT>(
L2NormBuf_OR_DistBuf.data(), dataBatchSize, centroidsBatchSize);
thrust::fill(resource::get_thrust_policy(handle),
minClusterDistance.data_handle(),
minClusterDistance.data_handle() + minClusterDistance.size(),
std::numeric_limits<DataT>::max());
// tile over the input data and calculate distance matrix [n_samples x
// n_clusters]
for (IndexT dIdx = 0; dIdx < n_samples; dIdx += dataBatchSize) {
// # of samples for the current batch
auto ns = std::min((IndexT)dataBatchSize, n_samples - dIdx);
// datasetView [ns x n_features] - view representing the current batch of
// input dataset
auto datasetView = raft::make_device_matrix_view<const DataT, IndexT>(
X.data_handle() + dIdx * n_features, ns, n_features);
// minClusterDistanceView [ns x n_clusters]
auto minClusterDistanceView =
raft::make_device_vector_view<DataT, IndexT>(minClusterDistance.data_handle() + dIdx, ns);
auto L2NormXView =
raft::make_device_vector_view<DataT, IndexT>(L2NormX.data_handle() + dIdx, ns);
if (is_fused) {
workspace.resize((sizeof(IndexT)) * ns, stream);
raft::distance::fusedL2NNMinReduce<DataT, DataT, IndexT>(
minClusterDistanceView.data_handle(),
datasetView.data_handle(),
centroids.data_handle(),
L2NormXView.data_handle(),
centroidsNorm.data_handle(),
ns,
n_clusters,
n_features,
(void*)workspace.data(),
metric != raft::distance::DistanceType::L2Expanded,
false,
stream);
} else {
// tile over the centroids
for (IndexT cIdx = 0; cIdx < n_clusters; cIdx += centroidsBatchSize) {
// # of centroids for the current batch
auto nc = std::min((IndexT)centroidsBatchSize, n_clusters - cIdx);
// centroidsView [nc x n_features] - view representing the current batch
// of centroids
auto centroidsView = raft::make_device_matrix_view<DataT, IndexT>(
centroids.data_handle() + cIdx * n_features, nc, n_features);
// pairwiseDistanceView [ns x nc] - view representing the pairwise
// distance for current batch
auto pairwiseDistanceView =
raft::make_device_matrix_view<DataT, IndexT>(pairwiseDistance.data_handle(), ns, nc);
// calculate pairwise distance between current tile of cluster centroids
// and input dataset
pairwise_distance_kmeans<DataT, IndexT>(
handle, datasetView, centroidsView, pairwiseDistanceView, workspace, metric);
raft::linalg::coalescedReduction(minClusterDistanceView.data_handle(),
pairwiseDistanceView.data_handle(),
pairwiseDistanceView.extent(1),
pairwiseDistanceView.extent(0),
std::numeric_limits<DataT>::max(),
stream,
true,
raft::identity_op{},
raft::min_op{},
raft::identity_op{});
}
}
}
}
template <typename DataT, typename IndexT>
void countSamplesInCluster(raft::resources const& handle,
const KMeansParams& params,
raft::device_matrix_view<const DataT, IndexT> X,
raft::device_vector_view<const DataT, IndexT> L2NormX,
raft::device_matrix_view<DataT, IndexT> centroids,
rmm::device_uvector<char>& workspace,
raft::device_vector_view<DataT, IndexT> sampleCountInCluster)
{
cudaStream_t stream = resource::get_cuda_stream(handle);
auto n_samples = X.extent(0);
auto n_features = X.extent(1);
auto n_clusters = centroids.extent(0);
// stores (key, value) pair corresponding to each sample where
// - key is the index of nearest cluster
// - value is the distance to the nearest cluster
auto minClusterAndDistance =
raft::make_device_vector<raft::KeyValuePair<IndexT, DataT>, IndexT>(handle, n_samples);
// temporary buffer to store distance matrix, destructor releases the resource
rmm::device_uvector<DataT> L2NormBuf_OR_DistBuf(0, stream);
// computes minClusterAndDistance[0:n_samples) where minClusterAndDistance[i]
// is a <key, value> pair where
// 'key' is index to an sample in 'centroids' (index of the nearest
// centroid) and 'value' is the distance between the sample 'X[i]' and the
// 'centroid[key]'
detail::minClusterAndDistanceCompute(handle,
X,
(raft::device_matrix_view<const DataT, IndexT>)centroids,
minClusterAndDistance.view(),
L2NormX,
L2NormBuf_OR_DistBuf,
params.metric,
params.batch_samples,
params.batch_centroids,
workspace);
// Using TransformInputIteratorT to dereference an array of raft::KeyValuePair
// and converting them to just return the Key to be used in reduce_rows_by_key
// prims
detail::KeyValueIndexOp<IndexT, DataT> conversion_op;
cub::TransformInputIterator<IndexT,
detail::KeyValueIndexOp<IndexT, DataT>,
raft::KeyValuePair<IndexT, DataT>*>
itr(minClusterAndDistance.data_handle(), conversion_op);
// count # of samples in each cluster
countLabels(handle,
itr,
sampleCountInCluster.data_handle(),
(IndexT)n_samples,
(IndexT)n_clusters,
workspace);
}
} // namespace detail
} // namespace cluster
} // namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/agglomerative.cuh | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <raft/core/resources.hpp>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <cstddef>
namespace raft::cluster::detail {
template <typename value_idx, typename value_t>
class UnionFind {
public:
value_idx next_label;
std::vector<value_idx> parent;
std::vector<value_idx> size;
value_idx n_indices;
UnionFind(value_idx N_)
: n_indices(2 * N_ - 1), parent(2 * N_ - 1, -1), size(2 * N_ - 1, 1), next_label(N_)
{
memset(size.data() + N_, 0, (size.size() - N_) * sizeof(value_idx));
}
value_idx find(value_idx n)
{
value_idx p;
p = n;
while (parent[n] != -1)
n = parent[n];
// path compression
while (parent[p] != n) {
p = parent[p == -1 ? n_indices - 1 : p];
parent[p == -1 ? n_indices - 1 : p] = n;
}
return n;
}
void perform_union(value_idx m, value_idx n)
{
size[next_label] = size[m] + size[n];
parent[m] = next_label;
parent[n] = next_label;
next_label += 1;
}
};
/**
* Agglomerative labeling on host. This has not been found to be a bottleneck
* in the algorithm. A parallel version of this can be done using a parallel
* variant of Kruskal's MST algorithm
* (ref http://cucis.ece.northwestern.edu/publications/pdf/HenPat12.pdf),
* which breaks apart the sorted MST results into overlapping subsets and
* independently runs Kruskal's algorithm on each subset, merging them back
* together into a single hierarchy when complete. Unfortunately,
* this is nontrivial and the speedup wouldn't be useful until this
* becomes a bottleneck.
*
* @tparam value_idx
* @tparam value_t
* @param[in] handle the raft handle
* @param[in] rows src edges of the sorted MST
* @param[in] cols dst edges of the sorted MST
* @param[in] nnz the number of edges in the sorted MST
* @param[out] out_src parents of output
* @param[out] out_dst children of output
* @param[out] out_delta distances of output
* @param[out] out_size cluster sizes of output
*/
template <typename value_idx, typename value_t>
void build_dendrogram_host(raft::resources const& handle,
const value_idx* rows,
const value_idx* cols,
const value_t* data,
size_t nnz,
value_idx* children,
value_t* out_delta,
value_idx* out_size)
{
auto stream = resource::get_cuda_stream(handle);
value_idx n_edges = nnz;
std::vector<value_idx> mst_src_h(n_edges);
std::vector<value_idx> mst_dst_h(n_edges);
std::vector<value_t> mst_weights_h(n_edges);
update_host(mst_src_h.data(), rows, n_edges, stream);
update_host(mst_dst_h.data(), cols, n_edges, stream);
update_host(mst_weights_h.data(), data, n_edges, stream);
resource::sync_stream(handle, stream);
std::vector<value_idx> children_h(n_edges * 2);
std::vector<value_idx> out_size_h(n_edges);
std::vector<value_t> out_delta_h(n_edges);
UnionFind<value_idx, value_t> U(nnz + 1);
for (std::size_t i = 0; i < nnz; i++) {
value_idx a = mst_src_h[i];
value_idx b = mst_dst_h[i];
value_t delta = mst_weights_h[i];
value_idx aa = U.find(a);
value_idx bb = U.find(b);
value_idx children_idx = i * 2;
children_h[children_idx] = aa;
children_h[children_idx + 1] = bb;
out_delta_h[i] = delta;
out_size_h[i] = U.size[aa] + U.size[bb];
U.perform_union(aa, bb);
}
raft::update_device(children, children_h.data(), n_edges * 2, stream);
raft::update_device(out_size, out_size_h.data(), n_edges, stream);
raft::update_device(out_delta, out_delta_h.data(), n_edges, stream);
}
template <typename value_idx>
RAFT_KERNEL write_levels_kernel(const value_idx* children, value_idx* parents, value_idx n_vertices)
{
value_idx tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_vertices) {
value_idx level = tid / 2;
value_idx child = children[tid];
parents[child] = level;
}
}
/**
* Instead of propagating a label from roots to children,
* the children each iterate up the tree until they find
* the label of their parent. This increases the potential
* parallelism.
* @tparam value_idx
* @param children
* @param parents
* @param n_leaves
* @param labels
*/
template <typename value_idx>
RAFT_KERNEL inherit_labels(const value_idx* children,
const value_idx* levels,
std::size_t n_leaves,
value_idx* labels,
int cut_level,
value_idx n_vertices)
{
value_idx tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n_vertices) {
value_idx node = children[tid];
value_idx cur_level = tid / 2;
/**
* Any roots above the cut level should be ignored.
* Any leaves at the cut level should already be labeled
*/
if (cur_level > cut_level) return;
value_idx cur_parent = node;
value_idx label = labels[cur_parent];
while (label == -1) {
cur_parent = cur_level + n_leaves;
cur_level = levels[cur_parent];
label = labels[cur_parent];
}
labels[node] = label;
}
}
template <typename value_idx>
struct init_label_roots {
init_label_roots(value_idx* labels_) : labels(labels_) {}
template <typename Tuple>
__host__ __device__ void operator()(Tuple t)
{
labels[thrust::get<1>(t)] = thrust::get<0>(t);
}
private:
value_idx* labels;
};
/**
* Cuts the dendrogram at a particular level where the number of nodes
* is equal to n_clusters, then propagates the resulting labels
* to all the children.
*
* @tparam value_idx
* @param handle
* @param labels
* @param children
* @param n_clusters
* @param n_leaves
*/
template <typename value_idx, int tpb = 256>
void extract_flattened_clusters(raft::resources const& handle,
value_idx* labels,
const value_idx* children,
size_t n_clusters,
size_t n_leaves)
{
auto stream = resource::get_cuda_stream(handle);
auto thrust_policy = resource::get_thrust_policy(handle);
// Handle special case where n_clusters == 1
if (n_clusters == 1) {
thrust::fill(thrust_policy, labels, labels + n_leaves, 0);
} else {
/**
* Compute levels for each node
*
* 1. Initialize "levels" array of size n_leaves * 2
*
* 2. For each entry in children, write parent
* out for each of the children
*/
auto n_edges = (n_leaves - 1) * 2;
thrust::device_ptr<const value_idx> d_ptr = thrust::device_pointer_cast(children);
value_idx n_vertices = *(thrust::max_element(thrust_policy, d_ptr, d_ptr + n_edges)) + 1;
// Prevent potential infinite loop from labeling disconnected
// connectivities graph.
RAFT_EXPECTS(n_leaves > 0, "n_leaves must be positive");
RAFT_EXPECTS(
static_cast<std::size_t>(n_vertices) == static_cast<std::size_t>((n_leaves - 1) * 2),
"Multiple components found in MST or MST is invalid. "
"Cannot find single-linkage solution.");
rmm::device_uvector<value_idx> levels(n_vertices, stream);
value_idx n_blocks = ceildiv(n_vertices, (value_idx)tpb);
write_levels_kernel<<<n_blocks, tpb, 0, stream>>>(children, levels.data(), n_vertices);
/**
* Step 1: Find label roots:
*
* 1. Copying children[children.size()-(n_clusters-1):] entries to
* separate arrayo
* 2. sort array
* 3. take first n_clusters entries
*/
value_idx child_size = (n_clusters - 1) * 2;
rmm::device_uvector<value_idx> label_roots(child_size, stream);
value_idx children_cpy_start = n_edges - child_size;
raft::copy_async(label_roots.data(), children + children_cpy_start, child_size, stream);
thrust::sort(thrust_policy,
label_roots.data(),
label_roots.data() + (child_size),
thrust::greater<value_idx>());
rmm::device_uvector<value_idx> tmp_labels(n_vertices, stream);
// Init labels to -1
thrust::fill(thrust_policy, tmp_labels.data(), tmp_labels.data() + n_vertices, -1);
// Write labels for cluster roots to "labels"
thrust::counting_iterator<uint> first(0);
auto z_iter = thrust::make_zip_iterator(
thrust::make_tuple(first, label_roots.data() + (label_roots.size() - n_clusters)));
thrust::for_each(
thrust_policy, z_iter, z_iter + n_clusters, init_label_roots<value_idx>(tmp_labels.data()));
/**
* Step 2: Propagate labels by having children iterate through their parents
* 1. Initialize labels to -1
* 2. For each element in levels array, propagate until parent's
* label is !=-1
*/
value_idx cut_level = (n_edges / 2) - (n_clusters - 1);
inherit_labels<<<n_blocks, tpb, 0, stream>>>(
children, levels.data(), n_leaves, tmp_labels.data(), cut_level, n_vertices);
// copy tmp labels to actual labels
raft::copy_async(labels, tmp_labels.data(), n_leaves, stream);
}
}
}; // namespace raft::cluster::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/kmeans_balanced.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resource/device_memory_resource.hpp>
#include <raft/core/resource/thrust_policy.hpp>
#include <type_traits>
#include <raft/cluster/detail/kmeans_common.cuh>
#include <raft/cluster/kmeans_balanced_types.hpp>
#include <raft/common/nvtx.hpp>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/logger.hpp>
#include <raft/core/operators.hpp>
#include <raft/distance/distance.cuh>
#include <raft/distance/distance_types.hpp>
#include <raft/distance/fused_l2_nn.cuh>
#include <raft/linalg/add.cuh>
#include <raft/linalg/gemm.cuh>
#include <raft/linalg/map.cuh>
#include <raft/linalg/matrix_vector_op.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/linalg/normalize.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/matrix/argmin.cuh>
#include <raft/matrix/gather.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/device_atomics.cuh>
#include <raft/util/integer_utils.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <thrust/gather.h>
#include <thrust/transform.h>
#include <tuple>
namespace raft::cluster::detail {
constexpr static inline float kAdjustCentersWeight = 7.0f;
/**
* @brief Predict labels for the dataset; floating-point types only.
*
* NB: no minibatch splitting is done here, it may require large amount of temporary memory (n_rows
* * n_cluster * sizeof(MathT)).
*
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
*
* @param[in] handle The raft handle.
* @param[in] params Structure containing the hyper-parameters
* @param[in] centers Pointer to the row-major matrix of cluster centers [n_clusters, dim]
* @param[in] n_clusters Number of clusters/centers
* @param[in] dim Dimensionality of the data
* @param[in] dataset Pointer to the data [n_rows, dim]
* @param[in] dataset_norm Pointer to the precomputed norm (for L2 metrics only) [n_rows]
* @param[in] n_rows Number samples in the `dataset`
* @param[out] labels Output predictions [n_rows]
* @param[inout] mr (optional) Memory resource to use for temporary allocations
*/
template <typename MathT, typename IdxT, typename LabelT>
inline std::enable_if_t<std::is_floating_point_v<MathT>> predict_core(
const raft::resources& handle,
const kmeans_balanced_params& params,
const MathT* centers,
IdxT n_clusters,
IdxT dim,
const MathT* dataset,
const MathT* dataset_norm,
IdxT n_rows,
LabelT* labels,
rmm::mr::device_memory_resource* mr)
{
auto stream = resource::get_cuda_stream(handle);
switch (params.metric) {
case raft::distance::DistanceType::L2Expanded:
case raft::distance::DistanceType::L2SqrtExpanded: {
auto workspace = raft::make_device_mdarray<char, IdxT>(
handle, mr, make_extents<IdxT>((sizeof(int)) * n_rows));
auto minClusterAndDistance = raft::make_device_mdarray<raft::KeyValuePair<IdxT, MathT>, IdxT>(
handle, mr, make_extents<IdxT>(n_rows));
raft::KeyValuePair<IdxT, MathT> initial_value(0, std::numeric_limits<MathT>::max());
thrust::fill(resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + minClusterAndDistance.size(),
initial_value);
auto centroidsNorm =
raft::make_device_mdarray<MathT, IdxT>(handle, mr, make_extents<IdxT>(n_clusters));
raft::linalg::rowNorm<MathT, IdxT>(
centroidsNorm.data_handle(), centers, dim, n_clusters, raft::linalg::L2Norm, true, stream);
raft::distance::fusedL2NNMinReduce<MathT, raft::KeyValuePair<IdxT, MathT>, IdxT>(
minClusterAndDistance.data_handle(),
dataset,
centers,
dataset_norm,
centroidsNorm.data_handle(),
n_rows,
n_clusters,
dim,
(void*)workspace.data_handle(),
(params.metric == raft::distance::DistanceType::L2Expanded) ? false : true,
false,
stream);
// todo(lsugy): use KVP + iterator in caller.
// Copy keys to output labels
thrust::transform(resource::get_thrust_policy(handle),
minClusterAndDistance.data_handle(),
minClusterAndDistance.data_handle() + n_rows,
labels,
raft::compose_op<raft::cast_op<LabelT>, raft::key_op>());
break;
}
case raft::distance::DistanceType::InnerProduct: {
// TODO: pass buffer
rmm::device_uvector<MathT> distances(n_rows * n_clusters, stream, mr);
MathT alpha = -1.0;
MathT beta = 0.0;
linalg::gemm(handle,
true,
false,
n_clusters,
n_rows,
dim,
&alpha,
centers,
dim,
dataset,
dim,
&beta,
distances.data(),
n_clusters,
stream);
auto distances_const_view = raft::make_device_matrix_view<const MathT, IdxT, row_major>(
distances.data(), n_rows, n_clusters);
auto labels_view = raft::make_device_vector_view<LabelT, IdxT>(labels, n_rows);
raft::matrix::argmin(handle, distances_const_view, labels_view);
break;
}
default: {
RAFT_FAIL("The chosen distance metric is not supported (%d)", int(params.metric));
}
}
}
/**
* @brief Suggest a minibatch size for kmeans prediction.
*
* This function is used as a heuristic to split the work over a large dataset
* to reduce the size of temporary memory allocations.
*
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
*
* @param[in] n_clusters number of clusters in kmeans clustering
* @param[in] n_rows Number of samples in the dataset
* @param[in] dim Number of features in the dataset
* @param[in] metric Distance metric
* @param[in] needs_conversion Whether the data needs to be converted to MathT
* @return A suggested minibatch size and the expected memory cost per-row (in bytes)
*/
template <typename MathT, typename IdxT>
constexpr auto calc_minibatch_size(IdxT n_clusters,
IdxT n_rows,
IdxT dim,
raft::distance::DistanceType metric,
bool needs_conversion) -> std::tuple<IdxT, size_t>
{
n_clusters = std::max<IdxT>(1, n_clusters);
// Estimate memory needs per row (i.e element of the batch).
size_t mem_per_row = 0;
switch (metric) {
// fusedL2NN needs a mutex and a key-value pair for each row.
case distance::DistanceType::L2Expanded:
case distance::DistanceType::L2SqrtExpanded: {
mem_per_row += sizeof(int);
mem_per_row += sizeof(raft::KeyValuePair<IdxT, MathT>);
} break;
// Other metrics require storing a distance matrix.
default: {
mem_per_row += sizeof(MathT) * n_clusters;
}
}
// If we need to convert to MathT, space required for the converted batch.
if (!needs_conversion) { mem_per_row += sizeof(MathT) * dim; }
// Heuristic: calculate the minibatch size in order to use at most 1GB of memory.
IdxT minibatch_size = (1 << 30) / mem_per_row;
minibatch_size = 64 * div_rounding_up_safe(minibatch_size, IdxT{64});
minibatch_size = std::min<IdxT>(minibatch_size, n_rows);
return std::make_tuple(minibatch_size, mem_per_row);
}
/**
* @brief Given the data and labels, calculate cluster centers and sizes in one sweep.
*
* @note all pointers must be accessible on the device.
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam CounterT counter type supported by CUDA's native atomicAdd
* @tparam MappingOpT type of the mapping operation
*
* @param[in] handle The raft handle.
* @param[inout] centers Pointer to the output [n_clusters, dim]
* @param[inout] cluster_sizes Number of rows in each cluster [n_clusters]
* @param[in] n_clusters Number of clusters/centers
* @param[in] dim Dimensionality of the data
* @param[in] dataset Pointer to the data [n_rows, dim]
* @param[in] n_rows Number of samples in the `dataset`
* @param[in] labels Output predictions [n_rows]
* @param[in] reset_counters Whether to clear the output arrays before calculating.
* When set to `false`, this function may be used to update existing centers and sizes using
* the weighted average principle.
* @param[in] mapping_op Mapping operation from T to MathT
* @param[inout] mr (optional) Memory resource to use for temporary allocations on the device
*/
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
void calc_centers_and_sizes(const raft::resources& handle,
MathT* centers,
CounterT* cluster_sizes,
IdxT n_clusters,
IdxT dim,
const T* dataset,
IdxT n_rows,
const LabelT* labels,
bool reset_counters,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* mr = nullptr)
{
auto stream = resource::get_cuda_stream(handle);
if (mr == nullptr) { mr = resource::get_workspace_resource(handle); }
if (!reset_counters) {
raft::linalg::matrixVectorOp(
centers, centers, cluster_sizes, dim, n_clusters, true, false, raft::mul_op(), stream);
}
rmm::device_uvector<char> workspace(0, stream, mr);
// If we reset the counters, we can compute directly the new sizes in cluster_sizes.
// If we don't reset, we compute in a temporary buffer and add in a separate step.
rmm::device_uvector<CounterT> temp_cluster_sizes(0, stream, mr);
CounterT* temp_sizes = cluster_sizes;
if (!reset_counters) {
temp_cluster_sizes.resize(n_clusters, stream);
temp_sizes = temp_cluster_sizes.data();
}
// Apply mapping only when the data and math types are different.
if constexpr (std::is_same_v<T, MathT>) {
raft::linalg::reduce_rows_by_key(
dataset, dim, labels, nullptr, n_rows, dim, n_clusters, centers, stream, reset_counters);
} else {
// todo(lsugy): use iterator from KV output of fusedL2NN
cub::TransformInputIterator<MathT, MappingOpT, const T*> mapping_itr(dataset, mapping_op);
raft::linalg::reduce_rows_by_key(
mapping_itr, dim, labels, nullptr, n_rows, dim, n_clusters, centers, stream, reset_counters);
}
// Compute weight of each cluster
raft::cluster::detail::countLabels(handle, labels, temp_sizes, n_rows, n_clusters, workspace);
// Add previous sizes if necessary
if (!reset_counters) {
raft::linalg::add(cluster_sizes, cluster_sizes, temp_sizes, n_clusters, stream);
}
raft::linalg::matrixVectorOp(centers,
centers,
cluster_sizes,
dim,
n_clusters,
true,
false,
raft::div_checkzero_op(),
stream);
}
/** Computes the L2 norm of the dataset, converting to MathT if necessary */
template <typename T, typename MathT, typename IdxT, typename MappingOpT>
void compute_norm(const raft::resources& handle,
MathT* dataset_norm,
const T* dataset,
IdxT dim,
IdxT n_rows,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* mr = nullptr)
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope("compute_norm");
auto stream = resource::get_cuda_stream(handle);
if (mr == nullptr) { mr = resource::get_workspace_resource(handle); }
rmm::device_uvector<MathT> mapped_dataset(0, stream, mr);
const MathT* dataset_ptr = nullptr;
if (std::is_same_v<MathT, T>) {
dataset_ptr = reinterpret_cast<const MathT*>(dataset);
} else {
mapped_dataset.resize(n_rows * dim, stream);
linalg::unaryOp(mapped_dataset.data(), dataset, n_rows * dim, mapping_op, stream);
dataset_ptr = (const MathT*)mapped_dataset.data();
}
raft::linalg::rowNorm<MathT, IdxT>(
dataset_norm, dataset_ptr, dim, n_rows, raft::linalg::L2Norm, true, stream);
}
/**
* @brief Predict labels for the dataset.
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam MappingOpT type of the mapping operation
*
* @param[in] handle The raft handle
* @param[in] params Structure containing the hyper-parameters
* @param[in] centers Pointer to the row-major matrix of cluster centers [n_clusters, dim]
* @param[in] n_clusters Number of clusters/centers
* @param[in] dim Dimensionality of the data
* @param[in] dataset Pointer to the data [n_rows, dim]
* @param[in] n_rows Number samples in the `dataset`
* @param[out] labels Output predictions [n_rows]
* @param[in] mapping_op Mapping operation from T to MathT
* @param[inout] mr (optional) memory resource to use for temporary allocations
* @param[in] dataset_norm (optional) Pre-computed norms of each row in the dataset [n_rows]
*/
template <typename T, typename MathT, typename IdxT, typename LabelT, typename MappingOpT>
void predict(const raft::resources& handle,
const kmeans_balanced_params& params,
const MathT* centers,
IdxT n_clusters,
IdxT dim,
const T* dataset,
IdxT n_rows,
LabelT* labels,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* mr = nullptr,
const MathT* dataset_norm = nullptr)
{
auto stream = resource::get_cuda_stream(handle);
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"predict(%zu, %u)", static_cast<size_t>(n_rows), n_clusters);
if (mr == nullptr) { mr = resource::get_workspace_resource(handle); }
auto [max_minibatch_size, _mem_per_row] =
calc_minibatch_size<MathT>(n_clusters, n_rows, dim, params.metric, std::is_same_v<T, MathT>);
rmm::device_uvector<MathT> cur_dataset(
std::is_same_v<T, MathT> ? 0 : max_minibatch_size * dim, stream, mr);
bool need_compute_norm =
dataset_norm == nullptr && (params.metric == raft::distance::DistanceType::L2Expanded ||
params.metric == raft::distance::DistanceType::L2SqrtExpanded);
rmm::device_uvector<MathT> cur_dataset_norm(
need_compute_norm ? max_minibatch_size : 0, stream, mr);
const MathT* dataset_norm_ptr = nullptr;
auto cur_dataset_ptr = cur_dataset.data();
for (IdxT offset = 0; offset < n_rows; offset += max_minibatch_size) {
IdxT minibatch_size = std::min<IdxT>(max_minibatch_size, n_rows - offset);
if constexpr (std::is_same_v<T, MathT>) {
cur_dataset_ptr = const_cast<MathT*>(dataset + offset * dim);
} else {
linalg::unaryOp(
cur_dataset_ptr, dataset + offset * dim, minibatch_size * dim, mapping_op, stream);
}
// Compute the norm now if it hasn't been pre-computed.
if (need_compute_norm) {
compute_norm(
handle, cur_dataset_norm.data(), cur_dataset_ptr, dim, minibatch_size, mapping_op, mr);
dataset_norm_ptr = cur_dataset_norm.data();
} else if (dataset_norm != nullptr) {
dataset_norm_ptr = dataset_norm + offset;
}
predict_core(handle,
params,
centers,
n_clusters,
dim,
cur_dataset_ptr,
dataset_norm_ptr,
minibatch_size,
labels + offset,
mr);
}
}
template <uint32_t BlockDimY,
typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
__launch_bounds__((WarpSize * BlockDimY)) RAFT_KERNEL
adjust_centers_kernel(MathT* centers, // [n_clusters, dim]
IdxT n_clusters,
IdxT dim,
const T* dataset, // [n_rows, dim]
IdxT n_rows,
const LabelT* labels, // [n_rows]
const CounterT* cluster_sizes, // [n_clusters]
MathT threshold,
IdxT average,
IdxT seed,
IdxT* count,
MappingOpT mapping_op)
{
IdxT l = threadIdx.y + BlockDimY * static_cast<IdxT>(blockIdx.y);
if (l >= n_clusters) return;
auto csize = static_cast<IdxT>(cluster_sizes[l]);
// skip big clusters
if (csize > static_cast<IdxT>(average * threshold)) return;
// choose a "random" i that belongs to a rather large cluster
IdxT i;
IdxT j = laneId();
if (j == 0) {
do {
auto old = atomicAdd(count, IdxT{1});
i = (seed * (old + 1)) % n_rows;
} while (static_cast<IdxT>(cluster_sizes[labels[i]]) < average);
}
i = raft::shfl(i, 0);
// Adjust the center of the selected smaller cluster to gravitate towards
// a sample from the selected larger cluster.
const IdxT li = static_cast<IdxT>(labels[i]);
// Weight of the current center for the weighted average.
// We dump it for anomalously small clusters, but keep constant otherwise.
const MathT wc = min(static_cast<MathT>(csize), static_cast<MathT>(kAdjustCentersWeight));
// Weight for the datapoint used to shift the center.
const MathT wd = 1.0;
for (; j < dim; j += WarpSize) {
MathT val = 0;
val += wc * centers[j + dim * li];
val += wd * mapping_op(dataset[j + dim * i]);
val /= wc + wd;
centers[j + dim * l] = val;
}
}
/**
* @brief Adjust centers for clusters that have small number of entries.
*
* For each cluster, where the cluster size is not bigger than a threshold, the center is moved
* towards a data point that belongs to a large cluster.
*
* NB: if this function returns `true`, you should update the labels.
*
* NB: all pointers must be on the device side.
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam CounterT counter type supported by CUDA's native atomicAdd
* @tparam MappingOpT type of the mapping operation
*
* @param[inout] centers cluster centers [n_clusters, dim]
* @param[in] n_clusters number of rows in `centers`
* @param[in] dim number of columns in `centers` and `dataset`
* @param[in] dataset a host pointer to the row-major data matrix [n_rows, dim]
* @param[in] n_rows number of rows in `dataset`
* @param[in] labels a host pointer to the cluster indices [n_rows]
* @param[in] cluster_sizes number of rows in each cluster [n_clusters]
* @param[in] threshold defines a criterion for adjusting a cluster
* (cluster_sizes <= average_size * threshold)
* 0 <= threshold < 1
* @param[in] mapping_op Mapping operation from T to MathT
* @param[in] stream CUDA stream
* @param[inout] device_memory memory resource to use for temporary allocations
*
* @return whether any of the centers has been updated (and thus, `labels` need to be recalculated).
*/
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
auto adjust_centers(MathT* centers,
IdxT n_clusters,
IdxT dim,
const T* dataset,
IdxT n_rows,
const LabelT* labels,
const CounterT* cluster_sizes,
MathT threshold,
MappingOpT mapping_op,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* device_memory) -> bool
{
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"adjust_centers(%zu, %u)", static_cast<size_t>(n_rows), n_clusters);
if (n_clusters == 0) { return false; }
constexpr static std::array kPrimes{29, 71, 113, 173, 229, 281, 349, 409, 463, 541,
601, 659, 733, 809, 863, 941, 1013, 1069, 1151, 1223,
1291, 1373, 1451, 1511, 1583, 1657, 1733, 1811, 1889, 1987,
2053, 2129, 2213, 2287, 2357, 2423, 2531, 2617, 2687, 2741};
static IdxT i = 0;
static IdxT i_primes = 0;
bool adjusted = false;
IdxT average = n_rows / n_clusters;
IdxT ofst;
do {
i_primes = (i_primes + 1) % kPrimes.size();
ofst = kPrimes[i_primes];
} while (n_rows % ofst == 0);
constexpr uint32_t kBlockDimY = 4;
const dim3 block_dim(WarpSize, kBlockDimY, 1);
const dim3 grid_dim(1, raft::ceildiv(n_clusters, static_cast<IdxT>(kBlockDimY)), 1);
rmm::device_scalar<IdxT> update_count(0, stream, device_memory);
adjust_centers_kernel<kBlockDimY><<<grid_dim, block_dim, 0, stream>>>(centers,
n_clusters,
dim,
dataset,
n_rows,
labels,
cluster_sizes,
threshold,
average,
ofst,
update_count.data(),
mapping_op);
adjusted = update_count.value(stream) > 0; // NB: rmm scalar performs the sync
return adjusted;
}
/**
* @brief Expectation-maximization-balancing combined in an iterative process.
*
* Note, the `cluster_centers` is assumed to be already initialized here.
* Thus, this function can be used for fine-tuning existing clusters;
* to train from scratch, use `build_clusters` function below.
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam CounterT counter type supported by CUDA's native atomicAdd
* @tparam MappingOpT type of the mapping operation
*
* @param[in] handle The raft handle
* @param[in] params Structure containing the hyper-parameters
* @param[in] n_iters Requested number of iterations (can differ from params.n_iter!)
* @param[in] dim Dimensionality of the dataset
* @param[in] dataset Pointer to a managed row-major array [n_rows, dim]
* @param[in] dataset_norm Pointer to the precomputed norm (for L2 metrics only) [n_rows]
* @param[in] n_rows Number of rows in the dataset
* @param[in] n_cluster Requested number of clusters
* @param[inout] cluster_centers Pointer to a managed row-major array [n_clusters, dim]
* @param[out] cluster_labels Pointer to a managed row-major array [n_rows]
* @param[out] cluster_sizes Pointer to a managed row-major array [n_clusters]
* @param[in] balancing_pullback
* if the cluster centers are rebalanced on this number of iterations,
* one extra iteration is performed (this could happen several times) (default should be `2`).
* In other words, the first and then every `ballancing_pullback`-th rebalancing operation adds
* one more iteration to the main cycle.
* @param[in] balancing_threshold
* the rebalancing takes place if any cluster is smaller than `avg_size * balancing_threshold`
* on a given iteration (default should be `~ 0.25`).
* @param[in] mapping_op Mapping operation from T to MathT
* @param[inout] device_memory
* A memory resource for device allocations (makes sense to provide a memory pool here)
*/
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
void balancing_em_iters(const raft::resources& handle,
const kmeans_balanced_params& params,
uint32_t n_iters,
IdxT dim,
const T* dataset,
const MathT* dataset_norm,
IdxT n_rows,
IdxT n_clusters,
MathT* cluster_centers,
LabelT* cluster_labels,
CounterT* cluster_sizes,
uint32_t balancing_pullback,
MathT balancing_threshold,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* device_memory)
{
auto stream = resource::get_cuda_stream(handle);
uint32_t balancing_counter = balancing_pullback;
for (uint32_t iter = 0; iter < n_iters; iter++) {
// Balancing step - move the centers around to equalize cluster sizes
// (but not on the first iteration)
if (iter > 0 && adjust_centers(cluster_centers,
n_clusters,
dim,
dataset,
n_rows,
cluster_labels,
cluster_sizes,
balancing_threshold,
mapping_op,
stream,
device_memory)) {
if (balancing_counter++ >= balancing_pullback) {
balancing_counter -= balancing_pullback;
n_iters++;
}
}
switch (params.metric) {
// For some metrics, cluster calculation and adjustment tends to favor zero center vectors.
// To avoid converging to zero, we normalize the center vectors on every iteration.
case raft::distance::DistanceType::InnerProduct:
case raft::distance::DistanceType::CosineExpanded:
case raft::distance::DistanceType::CorrelationExpanded: {
auto clusters_in_view = raft::make_device_matrix_view<const MathT, IdxT, raft::row_major>(
cluster_centers, n_clusters, dim);
auto clusters_out_view = raft::make_device_matrix_view<MathT, IdxT, raft::row_major>(
cluster_centers, n_clusters, dim);
raft::linalg::row_normalize(
handle, clusters_in_view, clusters_out_view, raft::linalg::L2Norm);
break;
}
default: break;
}
// E: Expectation step - predict labels
predict(handle,
params,
cluster_centers,
n_clusters,
dim,
dataset,
n_rows,
cluster_labels,
mapping_op,
device_memory,
dataset_norm);
// M: Maximization step - calculate optimal cluster centers
calc_centers_and_sizes(handle,
cluster_centers,
cluster_sizes,
n_clusters,
dim,
dataset,
n_rows,
cluster_labels,
true,
mapping_op,
device_memory);
}
}
/** Randomly initialize cluster centers and then call `balancing_em_iters`. */
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
void build_clusters(const raft::resources& handle,
const kmeans_balanced_params& params,
IdxT dim,
const T* dataset,
IdxT n_rows,
IdxT n_clusters,
MathT* cluster_centers,
LabelT* cluster_labels,
CounterT* cluster_sizes,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* device_memory,
const MathT* dataset_norm = nullptr)
{
auto stream = resource::get_cuda_stream(handle);
// "randomly" initialize labels
auto labels_view = raft::make_device_vector_view<LabelT, IdxT>(cluster_labels, n_rows);
linalg::map_offset(
handle,
labels_view,
raft::compose_op(raft::cast_op<LabelT>(), raft::mod_const_op<IdxT>(n_clusters)));
// update centers to match the initialized labels.
calc_centers_and_sizes(handle,
cluster_centers,
cluster_sizes,
n_clusters,
dim,
dataset,
n_rows,
cluster_labels,
true,
mapping_op,
device_memory);
// run EM
balancing_em_iters(handle,
params,
params.n_iters,
dim,
dataset,
dataset_norm,
n_rows,
n_clusters,
cluster_centers,
cluster_labels,
cluster_sizes,
2,
MathT{0.25},
mapping_op,
device_memory);
}
/** Calculate how many fine clusters should belong to each mesocluster. */
template <typename IdxT, typename CounterT>
inline auto arrange_fine_clusters(IdxT n_clusters,
IdxT n_mesoclusters,
IdxT n_rows,
const CounterT* mesocluster_sizes)
{
std::vector<IdxT> fine_clusters_nums(n_mesoclusters);
std::vector<IdxT> fine_clusters_csum(n_mesoclusters + 1);
fine_clusters_csum[0] = 0;
IdxT n_lists_rem = n_clusters;
IdxT n_nonempty_ms_rem = 0;
for (IdxT i = 0; i < n_mesoclusters; i++) {
n_nonempty_ms_rem += mesocluster_sizes[i] > CounterT{0} ? 1 : 0;
}
IdxT n_rows_rem = n_rows;
CounterT mesocluster_size_sum = 0;
CounterT mesocluster_size_max = 0;
IdxT fine_clusters_nums_max = 0;
for (IdxT i = 0; i < n_mesoclusters; i++) {
if (i < n_mesoclusters - 1) {
// Although the algorithm is meant to produce balanced clusters, when something
// goes wrong, we may get empty clusters (e.g. during development/debugging).
// The code below ensures a proportional arrangement of fine cluster numbers
// per mesocluster, even if some clusters are empty.
if (mesocluster_sizes[i] == 0) {
fine_clusters_nums[i] = 0;
} else {
n_nonempty_ms_rem--;
auto s = static_cast<IdxT>(
static_cast<double>(n_lists_rem * mesocluster_sizes[i]) / n_rows_rem + .5);
s = std::min<IdxT>(s, n_lists_rem - n_nonempty_ms_rem);
fine_clusters_nums[i] = std::max(s, IdxT{1});
}
} else {
fine_clusters_nums[i] = n_lists_rem;
}
n_lists_rem -= fine_clusters_nums[i];
n_rows_rem -= mesocluster_sizes[i];
mesocluster_size_max = max(mesocluster_size_max, mesocluster_sizes[i]);
mesocluster_size_sum += mesocluster_sizes[i];
fine_clusters_nums_max = max(fine_clusters_nums_max, fine_clusters_nums[i]);
fine_clusters_csum[i + 1] = fine_clusters_csum[i] + fine_clusters_nums[i];
}
RAFT_EXPECTS(static_cast<IdxT>(mesocluster_size_sum) == n_rows,
"mesocluster sizes do not add up (%zu) to the total trainset size (%zu)",
static_cast<size_t>(mesocluster_size_sum),
static_cast<size_t>(n_rows));
RAFT_EXPECTS(fine_clusters_csum[n_mesoclusters] == n_clusters,
"fine cluster numbers do not add up (%zu) to the total number of clusters (%zu)",
static_cast<size_t>(fine_clusters_csum[n_mesoclusters]),
static_cast<size_t>(n_clusters));
return std::make_tuple(static_cast<IdxT>(mesocluster_size_max),
fine_clusters_nums_max,
std::move(fine_clusters_nums),
std::move(fine_clusters_csum));
}
/**
* Given the (coarse) mesoclusters and the distribution of fine clusters within them,
* build the fine clusters.
*
* Processing one mesocluster at a time:
* 1. Copy mesocluster data into a separate buffer
* 2. Predict fine cluster
* 3. Refince the fine cluster centers
*
* As a result, the fine clusters are what is returned by `build_hierarchical`;
* this function returns the total number of fine clusters, which can be checked to be
* the same as the requested number of clusters.
*
* Note: this function uses at most `fine_clusters_nums_max` points per mesocluster for training;
* if one of the clusters is larger than that (as given by `mesocluster_sizes`), the extra data
* is ignored and a warning is reported.
*/
template <typename T,
typename MathT,
typename IdxT,
typename LabelT,
typename CounterT,
typename MappingOpT>
auto build_fine_clusters(const raft::resources& handle,
const kmeans_balanced_params& params,
IdxT dim,
const T* dataset_mptr,
const MathT* dataset_norm_mptr,
const LabelT* labels_mptr,
IdxT n_rows,
const IdxT* fine_clusters_nums,
const IdxT* fine_clusters_csum,
const CounterT* mesocluster_sizes,
IdxT n_mesoclusters,
IdxT mesocluster_size_max,
IdxT fine_clusters_nums_max,
MathT* cluster_centers,
MappingOpT mapping_op,
rmm::mr::device_memory_resource* managed_memory,
rmm::mr::device_memory_resource* device_memory) -> IdxT
{
auto stream = resource::get_cuda_stream(handle);
rmm::device_uvector<IdxT> mc_trainset_ids_buf(mesocluster_size_max, stream, managed_memory);
rmm::device_uvector<MathT> mc_trainset_buf(mesocluster_size_max * dim, stream, device_memory);
rmm::device_uvector<MathT> mc_trainset_norm_buf(mesocluster_size_max, stream, device_memory);
auto mc_trainset_ids = mc_trainset_ids_buf.data();
auto mc_trainset = mc_trainset_buf.data();
auto mc_trainset_norm = mc_trainset_norm_buf.data();
// label (cluster ID) of each vector
rmm::device_uvector<LabelT> mc_trainset_labels(mesocluster_size_max, stream, device_memory);
rmm::device_uvector<MathT> mc_trainset_ccenters(
fine_clusters_nums_max * dim, stream, device_memory);
// number of vectors in each cluster
rmm::device_uvector<CounterT> mc_trainset_csizes_tmp(
fine_clusters_nums_max, stream, device_memory);
// Training clusters in each meso-cluster
IdxT n_clusters_done = 0;
for (IdxT i = 0; i < n_mesoclusters; i++) {
IdxT k = 0;
for (IdxT j = 0; j < n_rows && k < mesocluster_size_max; j++) {
if (labels_mptr[j] == LabelT(i)) { mc_trainset_ids[k++] = j; }
}
if (k != static_cast<IdxT>(mesocluster_sizes[i]))
RAFT_LOG_WARN("Incorrect mesocluster size at %d. %zu vs %zu",
static_cast<int>(i),
static_cast<size_t>(k),
static_cast<size_t>(mesocluster_sizes[i]));
if (k == 0) {
RAFT_LOG_DEBUG("Empty cluster %d", i);
RAFT_EXPECTS(fine_clusters_nums[i] == 0,
"Number of fine clusters must be zero for the empty mesocluster (got %d)",
static_cast<int>(fine_clusters_nums[i]));
continue;
} else {
RAFT_EXPECTS(fine_clusters_nums[i] > 0,
"Number of fine clusters must be non-zero for a non-empty mesocluster");
}
cub::TransformInputIterator<MathT, MappingOpT, const T*> mapping_itr(dataset_mptr, mapping_op);
raft::matrix::gather(mapping_itr, dim, n_rows, mc_trainset_ids, k, mc_trainset, stream);
if (params.metric == raft::distance::DistanceType::L2Expanded ||
params.metric == raft::distance::DistanceType::L2SqrtExpanded) {
thrust::gather(resource::get_thrust_policy(handle),
mc_trainset_ids,
mc_trainset_ids + k,
dataset_norm_mptr,
mc_trainset_norm);
}
build_clusters(handle,
params,
dim,
mc_trainset,
k,
fine_clusters_nums[i],
mc_trainset_ccenters.data(),
mc_trainset_labels.data(),
mc_trainset_csizes_tmp.data(),
mapping_op,
device_memory,
mc_trainset_norm);
raft::copy(cluster_centers + (dim * fine_clusters_csum[i]),
mc_trainset_ccenters.data(),
fine_clusters_nums[i] * dim,
stream);
resource::sync_stream(handle, stream);
n_clusters_done += fine_clusters_nums[i];
}
return n_clusters_done;
}
/**
* @brief Hierarchical balanced k-means
*
* @tparam T element type
* @tparam MathT type of the centroids and mapped data
* @tparam IdxT index type
* @tparam LabelT label type
* @tparam MappingOpT type of the mapping operation
*
* @param[in] handle The raft handle.
* @param[in] params Structure containing the hyper-parameters
* @param dim number of columns in `centers` and `dataset`
* @param[in] dataset a device pointer to the source dataset [n_rows, dim]
* @param n_rows number of rows in the input
* @param[out] cluster_centers a device pointer to the found cluster centers [n_cluster, dim]
* @param n_cluster
* @param metric the distance type
* @param mapping_op Mapping operation from T to MathT
* @param stream
*/
template <typename T, typename MathT, typename IdxT, typename MappingOpT>
void build_hierarchical(const raft::resources& handle,
const kmeans_balanced_params& params,
IdxT dim,
const T* dataset,
IdxT n_rows,
MathT* cluster_centers,
IdxT n_clusters,
MappingOpT mapping_op)
{
auto stream = resource::get_cuda_stream(handle);
using LabelT = uint32_t;
common::nvtx::range<common::nvtx::domain::raft> fun_scope(
"build_hierarchical(%zu, %u)", static_cast<size_t>(n_rows), n_clusters);
IdxT n_mesoclusters = std::min(n_clusters, static_cast<IdxT>(std::sqrt(n_clusters) + 0.5));
RAFT_LOG_DEBUG("build_hierarchical: n_mesoclusters: %u", n_mesoclusters);
rmm::mr::managed_memory_resource managed_memory;
rmm::mr::device_memory_resource* device_memory = resource::get_workspace_resource(handle);
auto [max_minibatch_size, mem_per_row] =
calc_minibatch_size<MathT>(n_clusters, n_rows, dim, params.metric, std::is_same_v<T, MathT>);
auto pool_guard =
raft::get_pool_memory_resource(device_memory, mem_per_row * size_t(max_minibatch_size));
if (pool_guard) {
RAFT_LOG_DEBUG("build_hierarchical: using pool memory resource with initial size %zu bytes",
mem_per_row * size_t(max_minibatch_size));
}
// Precompute the L2 norm of the dataset if relevant.
const MathT* dataset_norm = nullptr;
rmm::device_uvector<MathT> dataset_norm_buf(0, stream, device_memory);
if (params.metric == raft::distance::DistanceType::L2Expanded ||
params.metric == raft::distance::DistanceType::L2SqrtExpanded) {
dataset_norm_buf.resize(n_rows, stream);
for (IdxT offset = 0; offset < n_rows; offset += max_minibatch_size) {
IdxT minibatch_size = std::min<IdxT>(max_minibatch_size, n_rows - offset);
compute_norm(handle,
dataset_norm_buf.data() + offset,
dataset + dim * offset,
dim,
minibatch_size,
mapping_op,
device_memory);
}
dataset_norm = (const MathT*)dataset_norm_buf.data();
}
/* Temporary workaround to cub::DeviceHistogram not supporting any type that isn't natively
* supported by atomicAdd: find a supported CounterT based on the IdxT. */
typedef typename std::conditional_t<sizeof(IdxT) == 8, unsigned long long int, unsigned int>
CounterT;
// build coarse clusters (mesoclusters)
rmm::device_uvector<LabelT> mesocluster_labels_buf(n_rows, stream, &managed_memory);
rmm::device_uvector<CounterT> mesocluster_sizes_buf(n_mesoclusters, stream, &managed_memory);
{
rmm::device_uvector<MathT> mesocluster_centers_buf(n_mesoclusters * dim, stream, device_memory);
build_clusters(handle,
params,
dim,
dataset,
n_rows,
n_mesoclusters,
mesocluster_centers_buf.data(),
mesocluster_labels_buf.data(),
mesocluster_sizes_buf.data(),
mapping_op,
device_memory,
dataset_norm);
}
auto mesocluster_sizes = mesocluster_sizes_buf.data();
auto mesocluster_labels = mesocluster_labels_buf.data();
resource::sync_stream(handle, stream);
// build fine clusters
auto [mesocluster_size_max, fine_clusters_nums_max, fine_clusters_nums, fine_clusters_csum] =
arrange_fine_clusters(n_clusters, n_mesoclusters, n_rows, mesocluster_sizes);
const IdxT mesocluster_size_max_balanced = div_rounding_up_safe<size_t>(
2lu * size_t(n_rows), std::max<size_t>(size_t(n_mesoclusters), 1lu));
if (mesocluster_size_max > mesocluster_size_max_balanced) {
RAFT_LOG_WARN(
"build_hierarchical: built unbalanced mesoclusters (max_mesocluster_size == %u > %u). "
"At most %u points will be used for training within each mesocluster. "
"Consider increasing the number of training iterations `n_iters`.",
mesocluster_size_max,
mesocluster_size_max_balanced,
mesocluster_size_max_balanced);
RAFT_LOG_TRACE_VEC(mesocluster_sizes, n_mesoclusters);
RAFT_LOG_TRACE_VEC(fine_clusters_nums.data(), n_mesoclusters);
mesocluster_size_max = mesocluster_size_max_balanced;
}
auto n_clusters_done = build_fine_clusters(handle,
params,
dim,
dataset,
dataset_norm,
mesocluster_labels,
n_rows,
fine_clusters_nums.data(),
fine_clusters_csum.data(),
mesocluster_sizes,
n_mesoclusters,
mesocluster_size_max,
fine_clusters_nums_max,
cluster_centers,
mapping_op,
&managed_memory,
device_memory);
RAFT_EXPECTS(n_clusters_done == n_clusters, "Didn't process all clusters.");
rmm::device_uvector<CounterT> cluster_sizes(n_clusters, stream, device_memory);
rmm::device_uvector<LabelT> labels(n_rows, stream, device_memory);
// Fine-tuning k-means for all clusters
//
// (*) Since the likely cluster centroids have been calculated hierarchically already, the number
// of iterations for fine-tuning kmeans for whole clusters should be reduced. However, there is a
// possibility that the clusters could be unbalanced here, in which case the actual number of
// iterations would be increased.
//
balancing_em_iters(handle,
params,
std::max<uint32_t>(params.n_iters / 10, 2),
dim,
dataset,
dataset_norm,
n_rows,
n_clusters,
cluster_centers,
labels.data(),
cluster_sizes.data(),
5,
MathT{0.2},
mapping_op,
device_memory);
}
} // namespace raft::cluster::detail
| 0 |
rapidsai_public_repos/raft/cpp/include/raft/cluster | rapidsai_public_repos/raft/cpp/include/raft/cluster/detail/kmeans_auto_find_k.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <thrust/host_vector.h>
#include <raft/core/logger.hpp>
#include <raft/cluster/detail/kmeans.cuh>
#include <raft/core/error.hpp>
#include <raft/core/resources.hpp>
#include <raft/stats/dispersion.cuh>
namespace raft::cluster::detail {
template <typename value_t, typename idx_t>
void compute_dispersion(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t> X,
KMeansParams& params,
raft::device_matrix_view<value_t, idx_t> centroids_view,
raft::device_vector_view<idx_t> labels,
raft::device_vector_view<idx_t> clusterSizes,
rmm::device_uvector<char>& workspace,
raft::host_vector_view<value_t> clusterDispertionView,
raft::host_vector_view<value_t> resultsView,
raft::host_scalar_view<value_t> residual,
raft::host_scalar_view<idx_t> n_iter,
int val,
idx_t n,
idx_t d)
{
auto centroids_const_view =
raft::make_device_matrix_view<const value_t, idx_t>(centroids_view.data_handle(), val, d);
idx_t* clusterSizes_ptr = clusterSizes.data_handle();
auto cluster_sizes_view =
raft::make_device_vector_view<const idx_t, idx_t>(clusterSizes_ptr, val);
params.n_clusters = val;
raft::cluster::detail::kmeans_fit_predict<value_t, idx_t>(
handle, params, X, std::nullopt, std::make_optional(centroids_view), labels, residual, n_iter);
detail::countLabels(handle, labels.data_handle(), clusterSizes.data_handle(), n, val, workspace);
resultsView[val] = residual[0];
clusterDispertionView[val] = raft::stats::cluster_dispersion(
handle, centroids_const_view, cluster_sizes_view, std::nullopt, n);
}
template <typename idx_t, typename value_t>
void find_k(raft::resources const& handle,
raft::device_matrix_view<const value_t, idx_t> X,
raft::host_scalar_view<idx_t> best_k,
raft::host_scalar_view<value_t> residual,
raft::host_scalar_view<idx_t> n_iter,
idx_t kmax,
idx_t kmin = 1,
idx_t maxiter = 100,
value_t tol = 1e-2)
{
idx_t n = X.extent(0);
idx_t d = X.extent(1);
RAFT_EXPECTS(n >= 1, "n must be >= 1");
RAFT_EXPECTS(d >= 1, "d must be >= 1");
RAFT_EXPECTS(kmin >= 1, "kmin must be >= 1");
RAFT_EXPECTS(kmax <= n, "kmax must be <= number of data samples in X");
RAFT_EXPECTS(tol >= 0, "tolerance must be >= 0");
RAFT_EXPECTS(maxiter >= 0, "maxiter must be >= 0");
// Allocate memory
// Device memory
auto centroids = raft::make_device_matrix<value_t, idx_t>(handle, kmax, X.extent(1));
auto clusterSizes = raft::make_device_vector<idx_t>(handle, kmax);
auto labels = raft::make_device_vector<idx_t>(handle, n);
rmm::device_uvector<char> workspace(0, resource::get_cuda_stream(handle));
idx_t* clusterSizes_ptr = clusterSizes.data_handle();
// Host memory
auto results = raft::make_host_vector<value_t>(kmax + 1);
auto clusterDispersion = raft::make_host_vector<value_t>(kmax + 1);
auto clusterDispertionView = clusterDispersion.view();
auto resultsView = results.view();
// Loop to find *best* k
// Perform k-means in binary search
int left = kmin; // must be at least 2
int right = kmax; // int(floor(len(data)/2)) #assumption of clusters of size 2 at least
int mid = ((unsigned int)left + (unsigned int)right) >> 1;
int oldmid = mid;
int tests = 0;
double objective[3]; // 0= left of mid, 1= right of mid
if (left == 1) left = 2; // at least do 2 clusters
KMeansParams params;
params.max_iter = maxiter;
params.tol = tol;
auto centroids_view =
raft::make_device_matrix_view<value_t, idx_t>(centroids.data_handle(), left, d);
compute_dispersion<value_t, idx_t>(handle,
X,
params,
centroids_view,
labels.view(),
clusterSizes.view(),
workspace,
clusterDispertionView,
resultsView,
residual,
n_iter,
left,
n,
d);
// eval right edge0
resultsView[right] = 1e20;
while (resultsView[right] > resultsView[left] && tests < 3) {
centroids_view =
raft::make_device_matrix_view<value_t, idx_t>(centroids.data_handle(), right, d);
compute_dispersion<value_t, idx_t>(handle,
X,
params,
centroids_view,
labels.view(),
clusterSizes.view(),
workspace,
clusterDispertionView,
resultsView,
residual,
n_iter,
right,
n,
d);
tests += 1;
}
objective[0] = (n - left) / (left - 1) * clusterDispertionView[left] / resultsView[left];
objective[1] = (n - right) / (right - 1) * clusterDispertionView[right] / resultsView[right];
while (left < right - 1) {
resultsView[mid] = 1e20;
tests = 0;
while (resultsView[mid] > resultsView[left] && tests < 3) {
centroids_view =
raft::make_device_matrix_view<value_t, idx_t>(centroids.data_handle(), mid, d);
compute_dispersion<value_t, idx_t>(handle,
X,
params,
centroids_view,
labels.view(),
clusterSizes.view(),
workspace,
clusterDispertionView,
resultsView,
residual,
n_iter,
mid,
n,
d);
if (resultsView[mid] > resultsView[left] && (mid + 1) < right) {
mid += 1;
resultsView[mid] = 1e20;
} else if (resultsView[mid] > resultsView[left] && (mid - 1) > left) {
mid -= 1;
resultsView[mid] = 1e20;
}
tests += 1;
}
// maximize Calinski-Harabasz Index, minimize resid/ cluster
objective[0] = (n - left) / (left - 1) * clusterDispertionView[left] / resultsView[left];
objective[1] = (n - right) / (right - 1) * clusterDispertionView[right] / resultsView[right];
objective[2] = (n - mid) / (mid - 1) * clusterDispertionView[mid] / resultsView[mid];
objective[0] = (objective[2] - objective[0]) / (mid - left);
objective[1] = (objective[1] - objective[2]) / (right - mid);
if (objective[0] > 0 && objective[1] < 0) {
// our point is in the left-of-mid side
right = mid;
} else {
left = mid;
}
oldmid = mid;
mid = ((unsigned int)right + (unsigned int)left) >> 1;
}
best_k[0] = right;
objective[0] = (n - left) / (left - 1) * clusterDispertionView[left] / resultsView[left];
objective[1] = (n - oldmid) / (oldmid - 1) * clusterDispertionView[oldmid] / resultsView[oldmid];
if (objective[1] < objective[0]) { best_k[0] = left; }
// if best_k isn't what we just ran, re-run to get correct centroids and dist data on return->
// this saves memory
if (best_k[0] != oldmid) {
auto centroids_view =
raft::make_device_matrix_view<value_t, idx_t>(centroids.data_handle(), best_k[0], d);
params.n_clusters = best_k[0];
raft::cluster::detail::kmeans_fit_predict<value_t, idx_t>(handle,
params,
X,
std::nullopt,
std::make_optional(centroids_view),
labels.view(),
residual,
n_iter);
}
}
} // namespace raft::cluster::detail | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/label/merge_labels.cuh | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MERGE_LABELS_H
#define __MERGE_LABELS_H
#pragma once
#include <raft/label/detail/merge_labels.cuh>
namespace raft {
namespace label {
/**
* @brief Merge two labellings in-place, according to a core mask
*
* A labelling is a representation of disjoint sets (groups) where points that
* belong to the same group have the same label. It is assumed that group
* labels take values between 1 and N. labels relate to points, i.e a label i+1
* means that you belong to the same group as the point i.
* The special value MAX_LABEL is used to mark points that are not labelled.
*
* The two label arrays A and B induce two sets of groups over points 0..N-1.
* If a point is labelled i in A and j in B and the mask is true for this
* point, then i and j are equivalent labels and their groups are merged by
* relabeling the elements of both groups to have the same label. The new label
* is the smaller one from the original labels.
* It is required that if the mask is true for a point, this point is labelled
* (i.e its label is different than the special value MAX_LABEL).
*
* One use case is finding connected components: the two input label arrays can
* represent the connected components of graphs G_A and G_B, and the output
* would be the connected components labels of G_A \union G_B.
*
* @param[inout] labels_a First input, and output label array (in-place)
* @param[in] labels_b Second input label array
* @param[in] mask Core point mask
* @param[out] R label equivalence map
* @param[in] m Working flag
* @param[in] N Number of points in the dataset
* @param[in] stream CUDA stream
*/
template <typename value_idx = int, int TPB_X = 256>
void merge_labels(value_idx* labels_a,
const value_idx* labels_b,
const bool* mask,
value_idx* R,
bool* m,
value_idx N,
cudaStream_t stream)
{
detail::merge_labels<value_idx, TPB_X>(labels_a, labels_b, mask, R, m, N, stream);
}
}; // namespace label
}; // namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft | rapidsai_public_repos/raft/cpp/include/raft/label/classlabels.cuh | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CLASS_LABELS_H
#define __CLASS_LABELS_H
#pragma once
#include <raft/label/detail/classlabels.cuh>
namespace raft {
namespace label {
/**
* Get unique class labels.
*
* The y array is assumed to store class labels. The unique values are selected
* from this array.
*
* @tparam value_t numeric type of the arrays with class labels
* @param [inout] unique output unique labels
* @param [in] y device array of labels, size [n]
* @param [in] n number of labels
* @param [in] stream cuda stream
* @returns unique device array of unique labels, unallocated on entry,
* on exit it has size
*/
template <typename value_t>
int getUniquelabels(rmm::device_uvector<value_t>& unique, value_t* y, size_t n, cudaStream_t stream)
{
return detail::getUniquelabels<value_t>(unique, y, n, stream);
}
/**
* Assign one versus rest labels.
*
* The output labels will have values +/-1:
* y_out = (y == y_unique[idx]) ? +1 : -1;
*
* The output type currently is set to value_t, but for SVM in principle we are
* free to choose other type for y_out (it should represent +/-1, and it is used
* in floating point arithmetics).
*
* @param [in] y device array if input labels, size [n]
* @param [in] n number of labels
* @param [in] y_unique device array of unique labels, size [n_classes]
* @param [in] n_classes number of unique labels
* @param [out] y_out device array of output labels
* @param [in] idx index of unique label that should be labeled as 1
* @param [in] stream cuda stream
*/
template <typename value_t>
void getOvrlabels(
value_t* y, int n, value_t* y_unique, int n_classes, value_t* y_out, int idx, cudaStream_t stream)
{
detail::getOvrlabels<value_t>(y, n, y_unique, n_classes, y_out, idx, stream);
}
/**
* Maps an input array containing a series of numbers into a new array
* where numbers have been mapped to a monotonically increasing set
* of labels. This can be useful in machine learning algorithms, for instance,
* where a given set of labels is not taken from a monotonically increasing
* set. This can happen if they are filtered or if only a subset of the
* total labels are used in a dataset. This is also useful in graph algorithms
* where a set of vertices need to be labeled in a monotonically increasing
* order.
* @tparam Type the numeric type of the input and output arrays
* @tparam Lambda the type of an optional filter function, which determines
* which items in the array to map.
* @param[out] out the output monotonic array
* @param[in] in input label array
* @param[in] N number of elements in the input array
* @param[in] stream cuda stream to use
* @param[in] filter_op an optional function for specifying which values
* should have monotonically increasing labels applied to them.
* @param[in] zero_based force monotonic set to start at 0?
*/
template <typename Type, typename Lambda>
void make_monotonic(
Type* out, Type* in, size_t N, cudaStream_t stream, Lambda filter_op, bool zero_based = false)
{
detail::make_monotonic<Type, Lambda>(out, in, N, stream, filter_op, zero_based);
}
/**
* Maps an input array containing a series of numbers into a new array
* where numbers have been mapped to a monotonically increasing set
* of labels. This can be useful in machine learning algorithms, for instance,
* where a given set of labels is not taken from a monotonically increasing
* set. This can happen if they are filtered or if only a subset of the
* total labels are used in a dataset. This is also useful in graph algorithms
* where a set of vertices need to be labeled in a monotonically increasing
* order.
* @tparam Type the numeric type of the input and output arrays
* @param[out] out output label array with labels assigned monotonically
* @param[in] in input label array
* @param[in] N number of elements in the input array
* @param[in] stream cuda stream to use
* @param[in] zero_based force monotonic label set to start at 0?
*/
template <typename Type>
void make_monotonic(Type* out, Type* in, size_t N, cudaStream_t stream, bool zero_based = false)
{
detail::make_monotonic<Type>(out, in, N, stream, zero_based);
}
}; // namespace label
}; // end namespace raft
#endif | 0 |
rapidsai_public_repos/raft/cpp/include/raft/label | rapidsai_public_repos/raft/cpp/include/raft/label/detail/merge_labels.cuh | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <limits>
#include <math.h>
#include <raft/linalg/init.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft {
namespace label {
namespace detail {
/** Note: this is one possible implementation where we represent the label
* equivalence graph implicitly using labels_a, labels_b and mask.
* For an additional cost we can build the graph with edges
* E={(A[i], B[i]) | M[i]=1} and make this step faster */
template <typename value_idx, int TPB_X = 256>
RAFT_KERNEL __launch_bounds__(TPB_X) propagate_label_kernel(const value_idx* __restrict__ labels_a,
const value_idx* __restrict__ labels_b,
value_idx* __restrict__ R,
const bool* __restrict__ mask,
bool* __restrict__ m,
value_idx N)
{
value_idx tid = threadIdx.x + blockIdx.x * TPB_X;
if (tid < N) {
if (__ldg((char*)mask + tid)) {
// Note: labels are from 1 to N
value_idx la = __ldg(labels_a + tid) - 1;
value_idx lb = __ldg(labels_b + tid) - 1;
value_idx ra = R[la];
value_idx rb = R[lb];
if (ra != rb) {
*m = true;
// min(ra, rb) would be sufficient but this speeds up convergence
value_idx rmin = R[min(ra, rb)];
if (sizeof(value_idx) == 4) {
atomicMin((int*)(R + la), rmin);
atomicMin((int*)(R + lb), rmin);
} else if (sizeof(value_idx) == 8) {
atomicMin((long long int*)(R + la), rmin);
atomicMin((long long int*)(R + lb), rmin);
}
}
}
}
}
template <typename value_idx, int TPB_X = 256>
RAFT_KERNEL __launch_bounds__(TPB_X) reassign_label_kernel(value_idx* __restrict__ labels_a,
const value_idx* __restrict__ labels_b,
const value_idx* __restrict__ R,
value_idx N,
value_idx MAX_LABEL)
{
value_idx tid = threadIdx.x + blockIdx.x * TPB_X;
if (tid < N) {
// Note: labels are from 1 to N
value_idx la = labels_a[tid];
value_idx lb = __ldg(labels_b + tid);
value_idx ra = (la == MAX_LABEL) ? MAX_LABEL : __ldg(R + (la - 1)) + 1;
value_idx rb = (lb == MAX_LABEL) ? MAX_LABEL : __ldg(R + (lb - 1)) + 1;
labels_a[tid] = min(ra, rb);
}
}
/**
* @brief Merge two labellings in-place, according to a core mask
*
* A labelling is a representation of disjoint sets (groups) where points that
* belong to the same group have the same label. It is assumed that group
* labels take values between 1 and N. labels relate to points, i.e a label i+1
* means that you belong to the same group as the point i.
* The special value MAX_LABEL is used to mark points that are not labelled.
*
* The two label arrays A and B induce two sets of groups over points 0..N-1.
* If a point is labelled i in A and j in B and the mask is true for this
* point, then i and j are equivalent labels and their groups are merged by
* relabeling the elements of both groups to have the same label. The new label
* is the smaller one from the original labels.
* It is required that if the mask is true for a point, this point is labelled
* (i.e its label is different than the special value MAX_LABEL).
*
* One use case is finding connected components: the two input label arrays can
* represent the connected components of graphs G_A and G_B, and the output
* would be the connected components labels of G_A \union G_B.
*
* @param[inout] labels_a First input, and output label array (in-place)
* @param[in] labels_b Second input label array
* @param[in] mask Core point mask
* @param[out] R label equivalence map
* @param[in] m Working flag
* @param[in] N Number of points in the dataset
* @param[in] stream CUDA stream
*/
template <typename value_idx = int, int TPB_X = 256>
void merge_labels(value_idx* labels_a,
const value_idx* labels_b,
const bool* mask,
value_idx* R,
bool* m,
value_idx N,
cudaStream_t stream)
{
dim3 blocks(raft::ceildiv(N, value_idx(TPB_X)));
dim3 threads(TPB_X);
value_idx MAX_LABEL = std::numeric_limits<value_idx>::max();
// Initialize R. R defines the relabeling rules; after merging the input
// arrays, label l will be reassigned as R[l-1]+1.
raft::linalg::range(R, N, stream);
// We define the label equivalence graph: G = (V, E), where:
// - V is the set of unique values from labels_a and labels_b
// - E = {(labels_a[k], labels_b[k]) | mask[k] == true and k \in 0..n-1 }
// The edges connect groups from the two labellings. Only points with true
// mask can induce connection between groups.
// Step 1: compute connected components in the label equivalence graph
bool host_m;
do {
RAFT_CUDA_TRY(cudaMemsetAsync(m, false, sizeof(bool), stream));
propagate_label_kernel<value_idx, TPB_X>
<<<blocks, threads, 0, stream>>>(labels_a, labels_b, R, mask, m, N);
RAFT_CUDA_TRY(cudaPeekAtLastError());
raft::update_host(&host_m, m, 1, stream);
RAFT_CUDA_TRY(cudaStreamSynchronize(stream));
} while (host_m);
// Step 2: re-assign minimum equivalent label
reassign_label_kernel<value_idx, TPB_X>
<<<blocks, threads, 0, stream>>>(labels_a, labels_b, R, N, MAX_LABEL);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
} // namespace detail
}; // namespace label
}; // namespace raft | 0 |
rapidsai_public_repos/raft/cpp/include/raft/label | rapidsai_public_repos/raft/cpp/include/raft/label/detail/classlabels.cuh | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cub/cub.cuh>
#include <raft/core/operators.hpp>
#include <raft/linalg/unary_op.cuh>
#include <raft/util/cuda_utils.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <algorithm>
namespace raft {
namespace label {
namespace detail {
/**
* Get unique class labels.
*
* The y array is assumed to store class labels. The unique values are selected
* from this array.
*
* \tparam value_t numeric type of the arrays with class labels
* \param [in] y device array of labels, size [n]
* \param [in] n number of labels
* \param [out] unique device array of unique labels, unallocated on entry,
* on exit it has size [n_unique]
* \param [out] n_unique number of unique labels
* \param [in] stream cuda stream
*/
template <typename value_t>
int getUniquelabels(rmm::device_uvector<value_t>& unique, value_t* y, size_t n, cudaStream_t stream)
{
rmm::device_scalar<int> d_num_selected(stream);
rmm::device_uvector<value_t> workspace(n, stream);
size_t bytes = 0;
size_t bytes2 = 0;
// Query how much temporary storage we will need for cub operations
// and allocate it
cub::DeviceRadixSort::SortKeys(
NULL, bytes, y, workspace.data(), n, 0, sizeof(value_t) * 8, stream);
cub::DeviceSelect::Unique(
NULL, bytes2, workspace.data(), workspace.data(), d_num_selected.data(), n, stream);
bytes = std::max(bytes, bytes2);
rmm::device_uvector<char> cub_storage(bytes, stream);
// Select Unique classes
cub::DeviceRadixSort::SortKeys(
cub_storage.data(), bytes, y, workspace.data(), n, 0, sizeof(value_t) * 8, stream);
cub::DeviceSelect::Unique(cub_storage.data(),
bytes,
workspace.data(),
workspace.data(),
d_num_selected.data(),
n,
stream);
int n_unique = d_num_selected.value(stream);
// Copy unique classes to output
unique.resize(n_unique, stream);
raft::copy(unique.data(), workspace.data(), n_unique, stream);
return n_unique;
}
/**
* Assign one versus rest labels.
*
* The output labels will have values +/-1:
* y_out = (y == y_unique[idx]) ? +1 : -1;
*
* The output type currently is set to value_t, but for SVM in principle we are
* free to choose other type for y_out (it should represent +/-1, and it is used
* in floating point arithmetics).
*
* \param [in] y device array if input labels, size [n]
* \param [in] n number of labels
* \param [in] y_unique device array of unique labels, size [n_classes]
* \param [in] n_classes number of unique labels
* \param [out] y_out device array of output labels
* \param [in] idx index of unique label that should be labeled as 1
* \param [in] stream cuda stream
*/
template <typename value_t>
void getOvrlabels(
value_t* y, int n, value_t* y_unique, int n_classes, value_t* y_out, int idx, cudaStream_t stream)
{
ASSERT(idx < n_classes,
"Parameter idx should not be larger than the number "
"of classes");
raft::linalg::unaryOp(
y_out,
y,
n,
[idx, y_unique] __device__(value_t y) { return y == y_unique[idx] ? +1 : -1; },
stream);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
// TODO: add one-versus-one selection: select two classes, relabel them to
// +/-1, return array with the new class labels and corresponding indices.
template <typename Type, int TPB_X, typename Lambda>
RAFT_KERNEL map_label_kernel(Type* map_ids,
size_t N_labels,
Type* in,
Type* out,
size_t N,
Lambda filter_op,
bool zero_based = false)
{
int tid = threadIdx.x + blockIdx.x * TPB_X;
if (tid < N) {
if (!filter_op(in[tid])) {
for (size_t i = 0; i < N_labels; i++) {
if (in[tid] == map_ids[i]) {
out[tid] = i + !zero_based;
break;
}
}
}
}
}
/**
* Maps an input array containing a series of numbers into a new array
* where numbers have been mapped to a monotonically increasing set
* of labels. This can be useful in machine learning algorithms, for instance,
* where a given set of labels is not taken from a monotonically increasing
* set. This can happen if they are filtered or if only a subset of the
* total labels are used in a dataset. This is also useful in graph algorithms
* where a set of vertices need to be labeled in a monotonically increasing
* order.
* @tparam Type the numeric type of the input and output arrays
* @tparam Lambda the type of an optional filter function, which determines
* which items in the array to map.
* @param out the output monotonic array
* @param in input label array
* @param N number of elements in the input array
* @param stream cuda stream to use
* @param filter_op an optional function for specifying which values
* should have monotonically increasing labels applied to them.
*/
template <typename Type, typename Lambda>
void make_monotonic(
Type* out, Type* in, size_t N, cudaStream_t stream, Lambda filter_op, bool zero_based = false)
{
static const size_t TPB_X = 256;
dim3 blocks(raft::ceildiv(N, TPB_X));
dim3 threads(TPB_X);
rmm::device_uvector<Type> map_ids(0, stream);
int num_clusters = getUniquelabels(map_ids, in, N, stream);
map_label_kernel<Type, TPB_X><<<blocks, threads, 0, stream>>>(
map_ids.data(), num_clusters, in, out, N, filter_op, zero_based);
}
/**
* Maps an input array containing a series of numbers into a new array
* where numbers have been mapped to a monotonically increasing set
* of labels. This can be useful in machine learning algorithms, for instance,
* where a given set of labels is not taken from a monotonically increasing
* set. This can happen if they are filtered or if only a subset of the
* total labels are used in a dataset. This is also useful in graph algorithms
* where a set of vertices need to be labeled in a monotonically increasing
* order.
* @tparam Type the numeric type of the input and output arrays
* @tparam Lambda the type of an optional filter function, which determines
* which items in the array to map.
* @param out output label array with labels assigned monotonically
* @param in input label array
* @param N number of elements in the input array
* @param stream cuda stream to use
*/
template <typename Type>
void make_monotonic(Type* out, Type* in, size_t N, cudaStream_t stream, bool zero_based = false)
{
make_monotonic<Type>(out, in, N, stream, raft::const_op(false), zero_based);
}
}; // namespace detail
}; // namespace label
}; // end namespace raft
| 0 |
rapidsai_public_repos/raft/cpp/bench | rapidsai_public_repos/raft/cpp/bench/prims/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# ##################################################################################################
# * compiler function -----------------------------------------------------------------------------
function(ConfigureBench)
set(options OPTIONAL LIB EXPLICIT_INSTANTIATE_ONLY)
set(oneValueArgs NAME)
set(multiValueArgs PATH TARGETS CONFIGURATIONS)
cmake_parse_arguments(ConfigureBench "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(BENCH_NAME ${ConfigureBench_NAME})
add_executable(${BENCH_NAME} ${ConfigureBench_PATH})
target_link_libraries(
${BENCH_NAME}
PRIVATE raft::raft
raft_internal
$<$<BOOL:${ConfigureBench_LIB}>:raft::compiled>
${RAFT_CTK_MATH_DEPENDENCIES}
benchmark::benchmark
Threads::Threads
$<TARGET_NAME_IF_EXISTS:OpenMP::OpenMP_CXX>
$<TARGET_NAME_IF_EXISTS:conda_env>
)
set_target_properties(
${BENCH_NAME}
PROPERTIES # set target compile options
INSTALL_RPATH "\$ORIGIN/../../../lib"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON
POSITION_INDEPENDENT_CODE ON
INTERFACE_POSITION_INDEPENDENT_CODE ON
)
target_compile_options(
${BENCH_NAME} PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${RAFT_CXX_FLAGS}>"
"$<$<COMPILE_LANGUAGE:CUDA>:${RAFT_CUDA_FLAGS}>"
)
if(ConfigureTest_EXPLICIT_INSTANTIATE_ONLY)
target_compile_definitions(${BENCH_NAME} PRIVATE "RAFT_EXPLICIT_INSTANTIATE_ONLY")
endif()
target_include_directories(
${BENCH_NAME} PUBLIC "$<BUILD_INTERFACE:${RAFT_SOURCE_DIR}/bench/prims>"
)
install(
TARGETS ${BENCH_NAME}
COMPONENT testing
DESTINATION bin/gbench/prims/libraft
EXCLUDE_FROM_ALL
)
endfunction()
if(BUILD_PRIMS_BENCH)
ConfigureBench(
NAME CORE_BENCH PATH bench/prims/core/bitset.cu bench/prims/core/copy.cu bench/prims/main.cpp
)
ConfigureBench(
NAME CLUSTER_BENCH PATH bench/prims/cluster/kmeans_balanced.cu bench/prims/cluster/kmeans.cu
bench/prims/main.cpp OPTIONAL LIB EXPLICIT_INSTANTIATE_ONLY
)
ConfigureBench(
NAME TUNE_DISTANCE PATH bench/prims/distance/tune_pairwise/kernel.cu
bench/prims/distance/tune_pairwise/bench.cu bench/prims/main.cpp
)
ConfigureBench(
NAME
DISTANCE_BENCH
PATH
bench/prims/distance/distance_cosine.cu
bench/prims/distance/distance_exp_l2.cu
bench/prims/distance/distance_l1.cu
bench/prims/distance/distance_unexp_l2.cu
bench/prims/distance/fused_l2_nn.cu
bench/prims/distance/masked_nn.cu
bench/prims/distance/kernels.cu
bench/prims/main.cpp
OPTIONAL
LIB
EXPLICIT_INSTANTIATE_ONLY
)
ConfigureBench(
NAME
LINALG_BENCH
PATH
bench/prims/linalg/add.cu
bench/prims/linalg/map_then_reduce.cu
bench/prims/linalg/matrix_vector_op.cu
bench/prims/linalg/norm.cu
bench/prims/linalg/normalize.cu
bench/prims/linalg/reduce_cols_by_key.cu
bench/prims/linalg/reduce_rows_by_key.cu
bench/prims/linalg/reduce.cu
bench/prims/main.cpp
)
ConfigureBench(
NAME
MATRIX_BENCH
PATH
bench/prims/matrix/argmin.cu
bench/prims/matrix/gather.cu
bench/prims/matrix/select_k.cu
bench/prims/matrix/main.cpp
OPTIONAL
LIB
EXPLICIT_INSTANTIATE_ONLY
)
ConfigureBench(
NAME RANDOM_BENCH PATH bench/prims/random/make_blobs.cu bench/prims/random/permute.cu
bench/prims/random/rng.cu bench/prims/main.cpp
)
ConfigureBench(NAME SPARSE_BENCH PATH bench/prims/sparse/convert_csr.cu bench/prims/main.cpp)
ConfigureBench(
NAME
NEIGHBORS_BENCH
PATH
bench/prims/neighbors/knn/brute_force_float_int64_t.cu
bench/prims/neighbors/knn/brute_force_float_uint32_t.cu
bench/prims/neighbors/knn/cagra_float_uint32_t.cu
bench/prims/neighbors/knn/ivf_flat_filter_float_int64_t.cu
bench/prims/neighbors/knn/ivf_flat_float_int64_t.cu
bench/prims/neighbors/knn/ivf_flat_int8_t_int64_t.cu
bench/prims/neighbors/knn/ivf_flat_uint8_t_int64_t.cu
bench/prims/neighbors/knn/ivf_pq_float_int64_t.cu
bench/prims/neighbors/knn/ivf_pq_filter_float_int64_t.cu
bench/prims/neighbors/knn/ivf_pq_int8_t_int64_t.cu
bench/prims/neighbors/knn/ivf_pq_uint8_t_int64_t.cu
bench/prims/neighbors/refine_float_int64_t.cu
bench/prims/neighbors/refine_uint8_t_int64_t.cu
bench/prims/main.cpp
OPTIONAL
LIB
EXPLICIT_INSTANTIATE_ONLY
)
endif()
| 0 |
rapidsai_public_repos/raft/cpp/bench | rapidsai_public_repos/raft/cpp/bench/prims/main.cpp | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmark/benchmark.h> // NOLINT
BENCHMARK_MAIN();
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/core/bitset.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/core/bitset.cuh>
#include <raft/core/device_mdspan.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::core {
struct bitset_inputs {
uint32_t bitset_len;
uint32_t mask_len;
uint32_t query_len;
}; // struct bitset_inputs
template <typename bitset_t, typename index_t>
struct bitset_bench : public fixture {
bitset_bench(const bitset_inputs& p)
: params(p),
mask{raft::make_device_vector<index_t, index_t>(res, p.mask_len)},
queries{raft::make_device_vector<index_t, index_t>(res, p.query_len)},
outputs{raft::make_device_vector<bool, index_t>(res, p.query_len)}
{
raft::random::RngState state{42};
raft::random::uniformInt(res, state, mask.view(), index_t{0}, index_t{p.bitset_len});
}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
auto my_bitset = raft::core::bitset<bitset_t, index_t>(
this->res, raft::make_const_mdspan(mask.view()), params.bitset_len);
my_bitset.test(this->res, raft::make_const_mdspan(queries.view()), outputs.view());
});
}
private:
raft::resources res;
bitset_inputs params;
raft::device_vector<index_t, index_t> mask, queries;
raft::device_vector<bool, index_t> outputs;
}; // struct bitset
const std::vector<bitset_inputs> bitset_input_vecs{
{256 * 1024 * 1024, 64 * 1024 * 1024, 256 * 1024 * 1024}, // Standard Bench
{256 * 1024 * 1024, 64 * 1024 * 1024, 1024 * 1024 * 1024}, // Extra queries
{128 * 1024 * 1024, 1024 * 1024 * 1024, 256 * 1024 * 1024}, // Extra mask to test atomics impact
};
using Uint8_32 = bitset_bench<uint8_t, uint32_t>;
using Uint16_64 = bitset_bench<uint16_t, uint32_t>;
using Uint32_32 = bitset_bench<uint32_t, uint32_t>;
using Uint32_64 = bitset_bench<uint32_t, uint64_t>;
RAFT_BENCH_REGISTER(Uint8_32, "", bitset_input_vecs);
RAFT_BENCH_REGISTER(Uint16_64, "", bitset_input_vecs);
RAFT_BENCH_REGISTER(Uint32_32, "", bitset_input_vecs);
RAFT_BENCH_REGISTER(Uint32_64, "", bitset_input_vecs);
} // namespace raft::bench::core
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/core/copy.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <cstdint>
#include <raft/core/copy.cuh>
#include <raft/core/cudart_utils.hpp>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/core/host_mdarray.hpp>
#include <raft/core/mdspan_types.hpp>
#include <raft/core/memory_type.hpp>
#include <raft/thirdparty/mdspan/include/experimental/mdspan>
namespace raft::bench::core {
template <typename IdxT, std::size_t Rank>
auto constexpr const default_dims = []() {
auto dims = std::array<IdxT, Rank>{};
std::fill(dims.begin(), dims.end(), 2);
return dims;
}();
template <typename IdxT>
auto constexpr const default_dims<IdxT, std::size_t{1}> = std::array<IdxT, 1>{3000000};
template <typename IdxT>
auto constexpr const default_dims<IdxT, std::size_t{2}> = std::array<IdxT, 2>{1000, 3000};
template <typename IdxT>
auto constexpr const default_dims<IdxT, std::size_t{3}> = std::array<IdxT, 3>{20, 300, 500};
template <typename T,
typename IdxT,
typename LayoutPolicy,
memory_type MemType,
std::size_t Rank,
typename = std::make_index_sequence<Rank>>
struct bench_array_type;
template <typename T,
typename IdxT,
typename LayoutPolicy,
memory_type MemType,
std::size_t Rank,
std::size_t... S>
struct bench_array_type<T, IdxT, LayoutPolicy, MemType, Rank, std::index_sequence<S...>> {
template <std::size_t>
auto static constexpr const extent_type = raft::dynamic_extent;
using type =
std::conditional_t<MemType == memory_type::host,
host_mdarray<T, extents<IdxT, extent_type<S>...>, LayoutPolicy>,
device_mdarray<T, extents<IdxT, extent_type<S>...>, LayoutPolicy>>;
};
template <typename SrcT,
typename DstT,
typename IdxT,
typename SrcLayoutPolicy,
typename DstLayoutPolicy,
memory_type SrcMemType,
memory_type DstMemType,
std::size_t Rank>
struct params {
std::array<IdxT, Rank> dims = default_dims<IdxT, Rank>;
using src_array_type =
typename bench_array_type<SrcT, IdxT, SrcLayoutPolicy, SrcMemType, Rank>::type;
using dst_array_type =
typename bench_array_type<DstT, IdxT, DstLayoutPolicy, DstMemType, Rank>::type;
};
template <typename SrcT,
typename DstT,
typename IdxT,
typename SrcLayoutPolicy,
typename DstLayoutPolicy,
memory_type SrcMemType,
memory_type DstMemType,
std::size_t Rank>
struct CopyBench : public fixture {
using params_type =
params<SrcT, DstT, IdxT, SrcLayoutPolicy, DstLayoutPolicy, SrcMemType, DstMemType, Rank>;
using src_array_type = typename params_type::src_array_type;
using dst_array_type = typename params_type::dst_array_type;
explicit CopyBench(const params_type& ps)
: fixture{true},
res_{},
params_{ps},
src_{
res_,
typename src_array_type::mapping_type{
std::apply([](auto... exts) { return make_extents<IdxT>(exts...); }, ps.dims)},
typename src_array_type::container_policy_type{},
},
dst_{
res_,
typename dst_array_type::mapping_type{
std::apply([](auto... exts) { return make_extents<IdxT>(exts...); }, ps.dims)},
typename dst_array_type::container_policy_type{},
}
{
res_.get_cublas_handle(); // initialize cublas handle
auto src_data = std::vector<SrcT>(src_.size());
std::iota(src_data.begin(), src_data.end(), SrcT{});
raft::copy(src_.data_handle(), src_data.data(), src_.size(), res_.get_stream());
}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() { raft::copy(res_, dst_.view(), src_.view()); });
}
private:
raft::device_resources res_;
params_type params_;
src_array_type src_;
dst_array_type dst_;
};
template <typename ParamsT>
auto static const inputs = std::vector<ParamsT>{ParamsT{}};
#define COPY_REGISTER(BenchT) \
RAFT_BENCH_REGISTER(BenchT, "BenchT", inputs<typename BenchT::params_type>)
using copy_bench_device_device_1d_same_dtype_same_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_c_contiguous,
memory_type::device,
memory_type::device,
1>;
using copy_bench_device_device_1d_same_dtype_diff_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::device,
1>;
using copy_bench_device_device_1d_diff_dtype_diff_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::device,
1>;
using copy_bench_device_device_2d_same_dtype_diff_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::device,
2>;
using copy_bench_device_device_2d_same_dtype_diff_layout_cublas = CopyBench<float,
float,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::device,
2>;
using copy_bench_device_device_3d_diff_dtype_diff_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::device,
3>;
using copy_bench_device_device_3d_diff_dtype_same_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_c_contiguous,
memory_type::device,
memory_type::device,
3>;
using copy_bench_host_host_1d_same_dtype_same_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_c_contiguous,
memory_type::host,
memory_type::host,
1>;
using copy_bench_host_host_1d_same_dtype_diff_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::host,
1>;
using copy_bench_host_host_1d_diff_dtype_diff_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::host,
1>;
using copy_bench_host_host_2d_same_dtype_diff_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::host,
2>;
using copy_bench_host_host_2d_same_dtype_diff_layout_float_float = CopyBench<float,
float,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::host,
2>;
using copy_bench_host_host_3d_diff_dtype_same_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_c_contiguous,
memory_type::host,
memory_type::host,
3>;
using copy_bench_host_host_3d_diff_dtype_diff_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::host,
3>;
using copy_bench_device_host_1d_same_dtype_same_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_c_contiguous,
memory_type::device,
memory_type::host,
1>;
using copy_bench_device_host_1d_same_dtype_diff_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::host,
1>;
using copy_bench_device_host_1d_diff_dtype_diff_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::host,
1>;
using copy_bench_device_host_2d_same_dtype_diff_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::host,
2>;
using copy_bench_device_host_2d_same_dtype_diff_layout_cublas = CopyBench<float,
float,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::host,
2>;
using copy_bench_device_host_3d_diff_dtype_same_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_c_contiguous,
memory_type::device,
memory_type::host,
3>;
using copy_bench_device_host_3d_diff_dtype_diff_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::device,
memory_type::host,
3>;
using copy_bench_host_device_1d_same_dtype_same_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_c_contiguous,
memory_type::host,
memory_type::device,
1>;
using copy_bench_host_device_1d_same_dtype_diff_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::device,
1>;
using copy_bench_host_device_1d_diff_dtype_diff_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::device,
1>;
using copy_bench_host_device_2d_same_dtype_diff_layout = CopyBench<int,
int,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::device,
2>;
using copy_bench_host_device_2d_same_dtype_diff_layout_cublas = CopyBench<float,
float,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::device,
2>;
using copy_bench_host_device_3d_diff_dtype_diff_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_f_contiguous,
memory_type::host,
memory_type::device,
3>;
using copy_bench_host_device_3d_diff_dtype_same_layout = CopyBench<float,
double,
int,
layout_c_contiguous,
layout_c_contiguous,
memory_type::host,
memory_type::device,
3>;
// COPY_REGISTER(copy_bench_same_dtype_1d_host_host);
COPY_REGISTER(copy_bench_device_device_1d_same_dtype_same_layout);
COPY_REGISTER(copy_bench_device_device_1d_same_dtype_diff_layout);
COPY_REGISTER(copy_bench_device_device_1d_diff_dtype_diff_layout);
COPY_REGISTER(copy_bench_device_device_2d_same_dtype_diff_layout);
COPY_REGISTER(copy_bench_device_device_2d_same_dtype_diff_layout_cublas);
COPY_REGISTER(copy_bench_device_device_3d_diff_dtype_same_layout);
COPY_REGISTER(copy_bench_device_device_3d_diff_dtype_diff_layout);
COPY_REGISTER(copy_bench_host_host_1d_same_dtype_same_layout);
COPY_REGISTER(copy_bench_host_host_1d_same_dtype_diff_layout);
COPY_REGISTER(copy_bench_host_host_1d_diff_dtype_diff_layout);
COPY_REGISTER(copy_bench_host_host_2d_same_dtype_diff_layout);
COPY_REGISTER(copy_bench_host_host_2d_same_dtype_diff_layout_float_float);
COPY_REGISTER(copy_bench_host_host_3d_diff_dtype_same_layout);
COPY_REGISTER(copy_bench_host_host_3d_diff_dtype_diff_layout);
COPY_REGISTER(copy_bench_device_host_1d_same_dtype_same_layout);
COPY_REGISTER(copy_bench_device_host_1d_same_dtype_diff_layout);
COPY_REGISTER(copy_bench_device_host_1d_diff_dtype_diff_layout);
COPY_REGISTER(copy_bench_device_host_2d_same_dtype_diff_layout);
COPY_REGISTER(copy_bench_device_host_2d_same_dtype_diff_layout_cublas);
COPY_REGISTER(copy_bench_device_host_3d_diff_dtype_same_layout);
COPY_REGISTER(copy_bench_device_host_3d_diff_dtype_diff_layout);
COPY_REGISTER(copy_bench_host_device_1d_same_dtype_same_layout);
COPY_REGISTER(copy_bench_host_device_1d_same_dtype_diff_layout);
COPY_REGISTER(copy_bench_host_device_1d_diff_dtype_diff_layout);
COPY_REGISTER(copy_bench_host_device_2d_same_dtype_diff_layout);
COPY_REGISTER(copy_bench_host_device_2d_same_dtype_diff_layout_cublas);
COPY_REGISTER(copy_bench_host_device_3d_diff_dtype_same_layout);
COPY_REGISTER(copy_bench_host_device_3d_diff_dtype_diff_layout);
} // namespace raft::bench::core
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/sparse/convert_csr.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <common/benchmark.hpp>
#include <raft/sparse/convert/csr.cuh>
#include <rmm/device_uvector.hpp>
namespace raft::bench::sparse {
template <typename index_t>
struct bench_param {
index_t num_cols;
index_t num_rows;
index_t divisor;
};
template <typename index_t>
RAFT_KERNEL init_adj_kernel(bool* adj, index_t num_rows, index_t num_cols, index_t divisor)
{
index_t r = blockDim.y * blockIdx.y + threadIdx.y;
index_t c = blockDim.x * blockIdx.x + threadIdx.x;
for (; r < num_rows; r += gridDim.y * blockDim.y) {
for (; c < num_cols; c += gridDim.x * blockDim.x) {
adj[r * num_cols + c] = c % divisor == 0;
}
}
}
template <typename index_t>
void init_adj(bool* adj, index_t num_rows, index_t num_cols, index_t divisor, cudaStream_t stream)
{
// adj matrix: element a_ij is set to one if j is divisible by divisor.
dim3 block(32, 32);
const index_t max_y_grid_dim = 65535;
dim3 grid(num_cols / 32 + 1, (int)min(num_rows / 32 + 1, max_y_grid_dim));
init_adj_kernel<index_t><<<grid, block, 0, stream>>>(adj, num_rows, num_cols, divisor);
RAFT_CHECK_CUDA(stream);
}
template <typename index_t>
struct bench_base : public fixture {
bench_base(const bench_param<index_t>& p)
: params(p),
handle(stream),
adj(p.num_rows * p.num_cols, stream),
row_ind(p.num_rows, stream),
row_ind_host(p.num_rows),
row_counters(p.num_rows, stream),
// col_ind is over-dimensioned because nnz is unknown at this point
col_ind(p.num_rows * p.num_cols, stream)
{
init_adj(adj.data(), p.num_rows, p.num_cols, p.divisor, stream);
std::vector<index_t> row_ind_host(p.num_rows);
for (size_t i = 0; i < row_ind_host.size(); ++i) {
size_t nnz_per_row = raft::ceildiv(p.num_cols, p.divisor);
row_ind_host[i] = nnz_per_row * i;
}
raft::update_device(row_ind.data(), row_ind_host.data(), row_ind.size(), stream);
}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
raft::sparse::convert::adj_to_csr<index_t>(handle,
adj.data(),
row_ind.data(),
params.num_rows,
params.num_cols,
row_counters.data(),
col_ind.data());
});
// Estimate bandwidth:
index_t num_entries = params.num_rows * params.num_cols;
index_t bytes_read = num_entries * sizeof(bool);
index_t bytes_write = num_entries / params.divisor * sizeof(index_t);
state.counters["BW"] = benchmark::Counter(bytes_read + bytes_write,
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1024);
state.counters["BW read"] = benchmark::Counter(
bytes_read, benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024);
state.counters["BW write"] = benchmark::Counter(bytes_write,
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1024);
state.counters["Fraction nz"] = benchmark::Counter(100.0 / ((double)params.divisor));
state.counters["Columns"] = benchmark::Counter(params.num_cols);
state.counters["Rows"] = benchmark::Counter(params.num_rows);
}
protected:
raft::device_resources handle;
bench_param<index_t> params;
rmm::device_uvector<bool> adj;
rmm::device_uvector<index_t> row_ind;
std::vector<index_t> row_ind_host;
rmm::device_uvector<index_t> row_counters;
rmm::device_uvector<index_t> col_ind;
}; // struct bench_base
const int64_t num_cols = 1 << 30;
const std::vector<bench_param<int64_t>> bench_params = {
{num_cols, 1, 8},
{num_cols >> 3, 1 << 3, 8},
{num_cols >> 6, 1 << 6, 8},
{num_cols, 1, 64},
{num_cols >> 3, 1 << 3, 64},
{num_cols >> 6, 1 << 6, 64},
{num_cols, 1, 2048},
{num_cols >> 3, 1 << 3, 2048},
{num_cols >> 6, 1 << 6, 2048},
};
RAFT_BENCH_REGISTER(bench_base<int64_t>, "", bench_params);
} // namespace raft::bench::sparse
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/random/permute.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/random/permute.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::random {
struct permute_inputs {
int rows, cols;
bool needPerms, needShuffle, rowMajor;
}; // struct permute_inputs
template <typename T>
struct permute : public fixture {
permute(const permute_inputs& p)
: params(p),
perms(p.needPerms ? p.rows : 0, stream),
out(p.rows * p.cols, stream),
in(p.rows * p.cols, stream)
{
raft::random::RngState r(123456ULL);
uniform(handle, r, in.data(), p.rows, T(-1.0), T(1.0));
}
void run_benchmark(::benchmark::State& state) override
{
raft::random::RngState r(123456ULL);
loop_on_state(state, [this, &r]() {
raft::random::permute(
perms.data(), out.data(), in.data(), params.cols, params.rows, params.rowMajor, stream);
});
}
private:
raft::device_resources handle;
permute_inputs params;
rmm::device_uvector<T> out, in;
rmm::device_uvector<int> perms;
}; // struct permute
const std::vector<permute_inputs> permute_input_vecs = {
{32 * 1024, 128, true, true, true},
{1024 * 1024, 128, true, true, true},
{32 * 1024, 128 + 2, true, true, true},
{1024 * 1024, 128 + 2, true, true, true},
{32 * 1024, 128 + 1, true, true, true},
{1024 * 1024, 128 + 1, true, true, true},
{32 * 1024, 128, true, true, false},
{1024 * 1024, 128, true, true, false},
{32 * 1024, 128 + 2, true, true, false},
{1024 * 1024, 128 + 2, true, true, false},
{32 * 1024, 128 + 1, true, true, false},
{1024 * 1024, 128 + 1, true, true, false},
};
RAFT_BENCH_REGISTER(permute<float>, "", permute_input_vecs);
RAFT_BENCH_REGISTER(permute<double>, "", permute_input_vecs);
} // namespace raft::bench::random
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/random/make_blobs.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/random/make_blobs.cuh>
#include <rmm/device_uvector.hpp>
#include <vector>
namespace raft::bench::random {
struct make_blobs_inputs {
int rows, cols, clusters;
bool row_major;
}; // struct make_blobs_inputs
inline auto operator<<(std::ostream& os, const make_blobs_inputs& p) -> std::ostream&
{
os << p.rows << "#" << p.cols << "#" << p.clusters << "#" << p.row_major;
return os;
}
template <typename T>
struct make_blobs : public fixture {
make_blobs(const make_blobs_inputs& p)
: params(p), data(p.rows * p.cols, stream), labels(p.rows, stream)
{
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
loop_on_state(state, [this]() {
raft::random::make_blobs(data.data(),
labels.data(),
params.rows,
params.cols,
params.clusters,
this->stream,
params.row_major);
});
}
private:
make_blobs_inputs params;
rmm::device_uvector<T> data;
rmm::device_uvector<int> labels;
}; // struct MakeBlobs
static std::vector<make_blobs_inputs> get_make_blobs_input_vecs()
{
std::vector<make_blobs_inputs> out;
make_blobs_inputs p;
for (auto rows : std::vector<int>{100000, 1000000}) {
for (auto cols : std::vector<int>{10, 100}) {
for (auto clusters : std::vector<int>{2, 10, 100}) {
p.rows = rows;
p.cols = cols;
p.clusters = clusters;
p.row_major = true;
out.push_back(p);
p.row_major = false;
out.push_back(p);
}
}
}
return out;
}
RAFT_BENCH_REGISTER(make_blobs<float>, "", get_make_blobs_input_vecs());
RAFT_BENCH_REGISTER(make_blobs<double>, "", get_make_blobs_input_vecs());
} // namespace raft::bench::random
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/random/rng.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::random {
enum RandomType {
RNG_Normal,
RNG_LogNormal,
RNG_Uniform,
RNG_Gumbel,
RNG_Logistic,
RNG_Exp,
RNG_Rayleigh,
RNG_Laplace,
RNG_Fill
}; // enum RandomType
template <typename T>
struct rng_inputs {
int len;
RandomType type;
raft::random::GeneratorType gtype;
T start, end;
}; // struct rng_inputs
template <typename T>
struct rng : public fixture {
rng(const rng_inputs<T>& p) : params(p), ptr(p.len, stream) {}
void run_benchmark(::benchmark::State& state) override
{
raft::random::RngState r(123456ULL, params.gtype);
loop_on_state(state, [this, &r]() {
switch (params.type) {
case RNG_Normal: normal(handle, r, ptr.data(), params.len, params.start, params.end); break;
case RNG_LogNormal:
lognormal(handle, r, ptr.data(), params.len, params.start, params.end);
break;
case RNG_Uniform:
uniform(handle, r, ptr.data(), params.len, params.start, params.end);
break;
case RNG_Gumbel: gumbel(handle, r, ptr.data(), params.len, params.start, params.end); break;
case RNG_Logistic:
logistic(handle, r, ptr.data(), params.len, params.start, params.end);
break;
case RNG_Exp: exponential(handle, r, ptr.data(), params.len, params.start); break;
case RNG_Rayleigh: rayleigh(handle, r, ptr.data(), params.len, params.start); break;
case RNG_Laplace:
laplace(handle, r, ptr.data(), params.len, params.start, params.end);
break;
case RNG_Fill: fill(handle, r, ptr.data(), params.len, params.start); break;
};
});
}
private:
rng_inputs<T> params;
rmm::device_uvector<T> ptr;
}; // struct RngBench
template <typename T>
static std::vector<rng_inputs<T>> get_rng_input_vecs()
{
using namespace raft::random;
return {
{1024 * 1024, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{32 * 1024 * 1024, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 * 1024, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 + 2, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 2, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 2, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 + 1, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 1, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 1, RNG_Uniform, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{32 * 1024 * 1024, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 * 1024, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 + 2, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 2, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 2, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 + 1, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 1, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 1, RNG_Uniform, GenPC, T(-1.0), T(1.0)},
{1024 * 1024, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{32 * 1024 * 1024, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 * 1024, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 + 2, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 2, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 2, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 + 1, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{32 * 1024 * 1024 + 1, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
{1024 * 1024 * 1024 + 1, RNG_Fill, GenPhilox, T(-1.0), T(1.0)},
};
}
RAFT_BENCH_REGISTER(rng<float>, "", get_rng_input_vecs<float>());
RAFT_BENCH_REGISTER(rng<double>, "", get_rng_input_vecs<double>());
} // namespace raft::bench::random
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/distance/distance_l1.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distance_common.cuh"
namespace raft::bench::distance {
DIST_BENCH_REGISTER(DistanceL1, raft::distance::DistanceType::L1);
} // namespace raft::bench::distance
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/distance/masked_nn.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <common/benchmark.hpp>
#include <limits>
#include <raft/core/device_mdarray.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/handle.hpp>
#include <raft/distance/masked_nn.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/cudart_utils.hpp>
namespace raft::bench::distance::masked_nn {
// Introduce various sparsity patterns
enum AdjacencyPattern {
checkerboard = 0,
checkerboard_4 = 1,
checkerboard_64 = 2,
all_true = 3,
all_false = 4
};
struct Params {
int m, n, k, num_groups;
AdjacencyPattern pattern;
}; // struct Params
RAFT_KERNEL init_adj(AdjacencyPattern pattern,
int n,
raft::device_matrix_view<bool, int, raft::layout_c_contiguous> adj,
raft::device_vector_view<int, int, raft::layout_c_contiguous> group_idxs)
{
int m = adj.extent(0);
int num_groups = adj.extent(1);
for (int idx_m = blockIdx.y * blockDim.y + threadIdx.y; idx_m < m;
idx_m += blockDim.y * gridDim.y) {
for (int idx_g = blockIdx.x * blockDim.x + threadIdx.x; idx_g < num_groups;
idx_g += blockDim.x * gridDim.x) {
switch (pattern) {
case checkerboard: adj(idx_m, idx_g) = (idx_m + idx_g) % 2; break;
case checkerboard_4: adj(idx_m, idx_g) = (idx_m / 4 + idx_g) % 2; break;
case checkerboard_64: adj(idx_m, idx_g) = (idx_m / 64 + idx_g) % 2; break;
case all_true: adj(idx_m, idx_g) = true; break;
case all_false: adj(idx_m, idx_g) = false; break;
default: assert(false && "unknown pattern");
}
}
}
// Each group is of size n / num_groups.
//
// - group_idxs[j] indicates the start of group j + 1 (i.e. is the inclusive
// scan of the group lengths)
//
// - The first group always starts at index zero, so we do not store it.
//
// - The group_idxs[num_groups - 1] should always equal n.
if (blockIdx.y == 0 && threadIdx.y == 0) {
const int g_stride = blockDim.x * gridDim.x;
for (int idx_g = blockIdx.x * blockDim.x + threadIdx.x; idx_g < num_groups; idx_g += g_stride) {
group_idxs(idx_g) = (idx_g + 1) * (n / num_groups);
}
group_idxs(num_groups - 1) = n;
}
}
template <typename T>
struct masked_l2_nn : public fixture {
using DataT = T;
using IdxT = int;
using OutT = raft::KeyValuePair<IdxT, DataT>;
using RedOpT = raft::distance::MinAndDistanceReduceOp<int, DataT>;
using PairRedOpT = raft::distance::KVPMinReduce<int, DataT>;
using ParamT = raft::distance::masked_l2_nn_params<RedOpT, PairRedOpT>;
// Parameters
Params params;
// Data
raft::device_vector<OutT, IdxT> out;
raft::device_matrix<T, IdxT> x, y;
raft::device_vector<DataT, IdxT> xn, yn;
raft::device_matrix<bool, IdxT> adj;
raft::device_vector<IdxT, IdxT> group_idxs;
masked_l2_nn(const Params& p)
: params(p),
out{raft::make_device_vector<OutT, IdxT>(handle, p.m)},
x{raft::make_device_matrix<DataT, IdxT>(handle, p.m, p.k)},
y{raft::make_device_matrix<DataT, IdxT>(handle, p.n, p.k)},
xn{raft::make_device_vector<DataT, IdxT>(handle, p.m)},
yn{raft::make_device_vector<DataT, IdxT>(handle, p.n)},
adj{raft::make_device_matrix<bool, IdxT>(handle, p.m, p.num_groups)},
group_idxs{raft::make_device_vector<IdxT, IdxT>(handle, p.num_groups)}
{
raft::random::RngState r(123456ULL);
uniform(handle, r, x.data_handle(), p.m * p.k, T(-1.0), T(1.0));
uniform(handle, r, y.data_handle(), p.n * p.k, T(-1.0), T(1.0));
raft::linalg::rowNorm(
xn.data_handle(), x.data_handle(), p.k, p.m, raft::linalg::L2Norm, true, stream);
raft::linalg::rowNorm(
yn.data_handle(), y.data_handle(), p.k, p.n, raft::linalg::L2Norm, true, stream);
raft::distance::initialize<T, raft::KeyValuePair<int, T>, int>(
handle, out.data_handle(), p.m, std::numeric_limits<T>::max(), RedOpT{});
dim3 block(32, 32);
dim3 grid(10, 10);
init_adj<<<grid, block, 0, stream>>>(p.pattern, p.n, adj.view(), group_idxs.view());
RAFT_CUDA_TRY(cudaGetLastError());
}
void run_benchmark(::benchmark::State& state) override
{
bool init_out = true;
bool sqrt = false;
ParamT masked_l2_params{RedOpT{}, PairRedOpT{}, sqrt, init_out};
loop_on_state(state, [this, masked_l2_params]() {
// It is sufficient to only benchmark the L2-squared metric
raft::distance::masked_l2_nn<DataT, OutT, IdxT>(handle,
masked_l2_params,
x.view(),
y.view(),
xn.view(),
yn.view(),
adj.view(),
group_idxs.view(),
out.view());
});
// Virtual flop count if no skipping had occurred.
size_t virtual_flops = size_t(2) * size_t(params.m) * size_t(params.n) * size_t(params.k);
int64_t read_elts = params.n * params.k + params.m * params.k;
int64_t write_elts = params.m;
// Virtual min flops is the number of flops that would have been executed if
// the algorithm had actually skipped each computation that it could have
// skipped.
size_t virtual_min_flops = 0;
switch (params.pattern) {
case checkerboard:
case checkerboard_4:
case checkerboard_64: virtual_min_flops = virtual_flops / 2; break;
case all_true: virtual_min_flops = virtual_flops; break;
case all_false: virtual_min_flops = 0; break;
default: assert(false && "unknown pattern");
}
// VFLOP/s is the "virtual" flop count that would have executed if there was
// no adjacency pattern. This is useful for comparing to fusedL2NN
state.counters["VFLOP/s"] = benchmark::Counter(virtual_flops,
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
// Virtual min flops is the number of flops that would have been executed if
// the algorithm had actually skipped each computation that it could have
// skipped.
state.counters["VminFLOP/s"] = benchmark::Counter(virtual_min_flops,
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["BW Wr"] = benchmark::Counter(write_elts * sizeof(OutT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["BW Rd"] = benchmark::Counter(read_elts * sizeof(DataT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["m"] = benchmark::Counter(params.m);
state.counters["n"] = benchmark::Counter(params.n);
state.counters["k"] = benchmark::Counter(params.k);
state.counters["num_groups"] = benchmark::Counter(params.num_groups);
state.counters["group size"] = benchmark::Counter(params.n / params.num_groups);
state.counters["Pat"] = benchmark::Counter(static_cast<int>(params.pattern));
state.counters["SM count"] = raft::getMultiProcessorCount();
}
};
const std::vector<Params> masked_l2_nn_input_vecs = {
// Very fat matrices...
{32, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{64, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{128, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{256, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{512, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{1024, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 32, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 64, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 128, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 256, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 512, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 1024, 16384, 32, AdjacencyPattern::checkerboard},
// Representative matrices...
{16384, 16384, 32, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 64, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 128, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 256, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 512, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 1024, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 16384, 32, AdjacencyPattern::checkerboard},
{16384, 16384, 32, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 64, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 128, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 256, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 512, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 1024, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 16384, 32, AdjacencyPattern::checkerboard_4},
{16384, 16384, 32, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 64, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 128, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 256, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 512, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 1024, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 16384, 32, AdjacencyPattern::checkerboard_64},
{16384, 16384, 32, 32, AdjacencyPattern::all_true},
{16384, 16384, 64, 32, AdjacencyPattern::all_true},
{16384, 16384, 128, 32, AdjacencyPattern::all_true},
{16384, 16384, 256, 32, AdjacencyPattern::all_true},
{16384, 16384, 512, 32, AdjacencyPattern::all_true},
{16384, 16384, 1024, 32, AdjacencyPattern::all_true},
{16384, 16384, 16384, 32, AdjacencyPattern::all_true},
{16384, 16384, 32, 32, AdjacencyPattern::all_false},
{16384, 16384, 64, 32, AdjacencyPattern::all_false},
{16384, 16384, 128, 32, AdjacencyPattern::all_false},
{16384, 16384, 256, 32, AdjacencyPattern::all_false},
{16384, 16384, 512, 32, AdjacencyPattern::all_false},
{16384, 16384, 1024, 32, AdjacencyPattern::all_false},
{16384, 16384, 16384, 32, AdjacencyPattern::all_false},
};
RAFT_BENCH_REGISTER(masked_l2_nn<float>, "", masked_l2_nn_input_vecs);
// We don't benchmark double to keep compile times in check when not using the
// distance library.
} // namespace raft::bench::distance::masked_nn
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/distance/distance_exp_l2.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distance_common.cuh"
namespace raft::bench::distance {
DIST_BENCH_REGISTER(DistanceL2Sq, raft::distance::DistanceType::L2Expanded);
DIST_BENCH_REGISTER(DistanceL2Sqrt, raft::distance::DistanceType::L2SqrtExpanded);
} // namespace raft::bench::distance
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/distance/kernels.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <memory>
#include <raft/core/device_resources.hpp>
#include <raft/core/resource/cublas_handle.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/distance/kernels.cuh>
#include <raft/random/rng.cuh>
#include <sstream>
#include <string>
#include <vector>
namespace raft::bench::distance::kernels {
using namespace raft::distance::kernels;
struct GramTestParams {
int m; // m parameter of the GEMM
int k; // k parameter of the GEMM
int n; // n parameter of the GEMM
KernelParams kernel_params;
bool is_row_major;
}; // struct GramTestParams
template <typename T>
struct GramMatrix : public fixture {
GramMatrix(const GramTestParams& p)
: params(p), handle(stream), A(0, stream), B(0, stream), C(0, stream)
{
kernel = std::unique_ptr<GramMatrixBase<T>>(
KernelFactory<T>::create(p.kernel_params, resource::get_cublas_handle(handle)));
A.resize(params.m * params.k, stream);
B.resize(params.k * params.n, stream);
C.resize(params.m * params.n, stream);
raft::random::RngState rng(123456ULL);
raft::random::uniform(handle, rng, A.data(), params.m * params.k, T(-1.0), T(1.0));
raft::random::uniform(handle, rng, B.data(), params.k * params.n, T(-1.0), T(1.0));
}
~GramMatrix()
{
A.release();
B.release();
C.release();
}
void run_benchmark(::benchmark::State& state) override
{
if (!this->kernel) { state.SkipWithError("Kernel matrix is not initialized"); }
loop_on_state(state, [this]() {
(*this->kernel)(A.data(),
this->params.m,
this->params.k,
B.data(),
this->params.n,
C.data(),
this->params.is_row_major,
this->stream);
});
}
private:
const raft::device_resources handle;
std::unique_ptr<GramMatrixBase<T>> kernel;
GramTestParams params;
rmm::device_uvector<T> A; // input matrix A, size [m * k]
rmm::device_uvector<T> B; // input matrix B, size [n * k]
rmm::device_uvector<T> C; // output matrix C, size [m*n]
};
static std::vector<GramTestParams> getInputs()
{
std::vector<GramTestParams> param_vec;
std::vector<KernelParams> kernel_params{KernelParams{LINEAR, 3, 1, 0},
KernelParams{POLYNOMIAL, 2, 1.3, 1},
KernelParams{TANH, 2, 0.5, 2.4},
KernelParams{RBF, 2, 0.5, 0}};
struct TestSize {
int m;
int k;
int n;
};
std::vector<TestSize> data_size{{4096, 10, 1024},
{4096, 100, 1024},
{4096, 1000, 1024},
{4096, 10000, 1024},
{100000, 10, 1024},
{100000, 100, 1024},
{100000, 1000, 1024}};
param_vec.reserve(kernel_params.size() * data_size.size());
for (TestSize s : data_size) {
for (auto kernel : kernel_params) {
for (bool row_major : {false, true}) {
param_vec.push_back(GramTestParams{s.m, s.k, s.n, kernel, row_major});
}
}
}
return param_vec;
}
RAFT_BENCH_REGISTER(GramMatrix<float>, "", getInputs());
RAFT_BENCH_REGISTER(GramMatrix<double>, "", getInputs());
} // namespace raft::bench::distance::kernels
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/distance/distance_common.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/distance/distance.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::distance {
struct distance_params {
int m, n, k;
bool isRowMajor;
}; // struct distance_params
template <typename T, raft::distance::DistanceType DType>
struct distance : public fixture {
distance(const distance_params& p)
: params(p),
x(p.m * p.k, stream),
y(p.n * p.k, stream),
out(p.m * p.n, stream),
workspace(0, stream)
{
RAFT_CUDA_TRY(cudaMemsetAsync(x.data(), 0, x.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(y.data(), 0, y.size() * sizeof(T), stream));
RAFT_CUDA_TRY(cudaMemsetAsync(out.data(), 0, out.size() * sizeof(T), stream));
worksize = raft::distance::getWorkspaceSize<DType, T, T, T>(
x.data(), y.data(), params.m, params.n, params.k);
workspace.resize(worksize, stream);
}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
raft::distance::distance<DType, T, T, T>(handle,
x.data(),
y.data(),
out.data(),
params.m,
params.n,
params.k,
(void*)workspace.data(),
worksize,
params.isRowMajor);
});
}
private:
distance_params params;
rmm::device_uvector<T> x, y, out;
rmm::device_uvector<char> workspace;
size_t worksize;
}; // struct Distance
const std::vector<distance_params> dist_input_vecs{
{32, 16384, 16384, true}, {64, 16384, 16384, true}, {128, 16384, 16384, true},
{256, 16384, 16384, true}, {512, 16384, 16384, true}, {1024, 16384, 16384, true},
{16384, 32, 16384, true}, {16384, 64, 16384, true}, {16384, 128, 16384, true},
{16384, 256, 16384, true}, {16384, 512, 16384, true}, {16384, 1024, 16384, true},
{16384, 16384, 32, true}, {16384, 16384, 64, true}, {16384, 16384, 128, true},
{16384, 16384, 256, true}, {16384, 16384, 512, true}, {16384, 16384, 1024, true},
{16384, 16384, 16384, true}, {32, 16384, 16384, false}, {64, 16384, 16384, false},
{128, 16384, 16384, false}, {256, 16384, 16384, false}, {512, 16384, 16384, false},
{1024, 16384, 16384, false}, {16384, 32, 16384, false}, {16384, 64, 16384, false},
{16384, 128, 16384, false}, {16384, 256, 16384, false}, {16384, 512, 16384, false},
{16384, 1024, 16384, false}, {16384, 16384, 32, false}, {16384, 16384, 64, false},
{16384, 16384, 128, false}, {16384, 16384, 256, false}, {16384, 16384, 512, false},
{16384, 16384, 1024, false}, {16384, 16384, 16384, false}
};
#define DIST_BENCH_REGISTER(Name, Metric) \
using Name##F = distance<float, Metric>; \
RAFT_BENCH_REGISTER(Name##F, "", dist_input_vecs); \
using Name##D = distance<double, Metric>; \
RAFT_BENCH_REGISTER(Name##D, "", dist_input_vecs);
} // namespace raft::bench::distance
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/distance/distance_cosine.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distance_common.cuh"
namespace raft::bench::distance {
DIST_BENCH_REGISTER(DistanceCosine, raft::distance::DistanceType::CosineExpanded);
} // namespace raft::bench::distance
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/distance/fused_l2_nn.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/distance/fused_l2_nn.cuh>
#include <raft/linalg/norm.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::distance {
struct fusedl2nn_inputs {
int64_t m, n, k;
}; // struct fusedl2nn_inputs
inline auto operator<<(std::ostream& os, const fusedl2nn_inputs& p) -> std::ostream&
{
os << p.m << "#" << p.n << "#" << p.k;
return os;
}
template <typename DataT, typename IdxT, typename OutT>
struct fusedl2nn : public fixture {
fusedl2nn(const fusedl2nn_inputs& p)
: params(p),
workspace(this->handle),
x(this->handle),
y(this->handle),
x_norm(this->handle),
y_norm(this->handle),
out(this->handle)
{
}
void allocate_data(const ::benchmark::State& state) override
{
x = raft::make_device_matrix<DataT, IdxT>(handle, params.m, params.k);
y = raft::make_device_matrix<DataT, IdxT>(handle, params.n, params.k);
x_norm = raft::make_device_vector<DataT, IdxT>(handle, params.m);
y_norm = raft::make_device_vector<DataT, IdxT>(handle, params.n);
out = raft::make_device_vector<OutT, IdxT>(handle, params.m);
raft::random::RngState rng{1234};
raft::random::uniform(
handle, rng, x.data_handle(), params.m * params.k, (DataT)-1.0, (DataT)1.0);
raft::random::uniform(
handle, rng, y.data_handle(), params.n * params.k, (DataT)-1.0, (DataT)1.0);
// Pre-compute norms
raft::linalg::rowNorm(x_norm.data_handle(),
x.data_handle(),
params.k,
params.m,
raft::linalg::L2Norm,
true,
stream);
raft::linalg::rowNorm(y_norm.data_handle(),
y.data_handle(),
params.k,
params.n,
raft::linalg::L2Norm,
true,
stream);
resource::sync_stream(handle, stream);
}
void allocate_temp_buffers(const ::benchmark::State& state) override
{
workspace = raft::make_device_vector<char, IdxT>(handle, params.m * sizeof(IdxT));
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
loop_on_state(state, [this]() {
raft::distance::fusedL2NNMinReduce<DataT, OutT, IdxT>(out.data_handle(),
x.data_handle(),
y.data_handle(),
x_norm.data_handle(),
y_norm.data_handle(),
static_cast<IdxT>(params.m),
static_cast<IdxT>(params.n),
static_cast<IdxT>(params.k),
(void*)workspace.data_handle(),
false,
true,
stream);
});
int64_t num_flops = 2 * params.m * params.n * params.k;
int64_t read_elts = params.n * params.k + params.m * params.k;
int64_t write_elts = params.m;
state.counters["FLOP/s"] = benchmark::Counter(
num_flops, benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1000);
state.counters["BW Wr"] = benchmark::Counter(write_elts * sizeof(OutT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["BW Rd"] = benchmark::Counter(read_elts * sizeof(DataT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
}
private:
fusedl2nn_inputs params;
raft::device_matrix<DataT, IdxT> x, y;
raft::device_vector<DataT, IdxT> x_norm, y_norm;
raft::device_vector<OutT, IdxT> out;
raft::device_vector<char, IdxT> workspace;
}; // struct fusedl2nn
template <typename IdxT>
std::vector<fusedl2nn_inputs> getFusedL2NNInputs()
{
std::vector<fusedl2nn_inputs> inputs;
std::vector<int64_t> m_list = {100000, 1000000};
if constexpr (sizeof(IdxT) == 8) { m_list.push_back(10000000); }
std::vector<int64_t> n_list = {100, 1000, 10000};
std::vector<int64_t> k_list = {64, 128, 256};
for (auto m : m_list) {
for (auto n : n_list) {
for (auto k : k_list) {
inputs.push_back({m, n, k});
}
}
}
return inputs;
}
#define FUSEDL2NN_BENCH(DataT, IdxT, OutT) \
RAFT_BENCH_REGISTER((fusedl2nn<DataT, IdxT, RAFT_DEPAREN(OutT)>), "", getFusedL2NNInputs<IdxT>())
FUSEDL2NN_BENCH(float, int, float);
FUSEDL2NN_BENCH(double, int, double);
FUSEDL2NN_BENCH(float, int, (raft::KeyValuePair<int, float>));
FUSEDL2NN_BENCH(double, int, (raft::KeyValuePair<int, double>));
FUSEDL2NN_BENCH(float, int64_t, float);
FUSEDL2NN_BENCH(double, int64_t, double);
FUSEDL2NN_BENCH(float, int64_t, (raft::KeyValuePair<int64_t, float>));
FUSEDL2NN_BENCH(double, int64_t, (raft::KeyValuePair<int64_t, double>));
} // namespace raft::bench::distance
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/distance/distance_unexp_l2.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "distance_common.cuh"
namespace raft::bench::distance {
DIST_BENCH_REGISTER(DistanceUnexpL2Sq, raft::distance::DistanceType::L2Unexpanded);
DIST_BENCH_REGISTER(DistanceUnexpL2Sqrt, raft::distance::DistanceType::L2SqrtUnexpanded);
} // namespace raft::bench::distance
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/distance | rapidsai_public_repos/raft/cpp/bench/prims/distance/tune_pairwise/kernel.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel.cuh"
#include <raft/distance/detail/pairwise_matrix/kernel_sm60.cuh> // pairwise_matrix_sm60_wrapper
#include <raft/linalg/contractions.cuh> // raft::linalg::Policy4x4
#include <raft/util/arch.cuh> // raft::util::arch::SM_compute_arch
namespace raft::bench::distance::tune {
// Distance op
using OpT = raft::distance::detail::ops::lp_unexp_distance_op<DataT, AccT, IdxT>;
constexpr float metric_arg = 2.0;
OpT distance_op{metric_arg};
// Kernel policy
constexpr int vec_len = 1;
using Policy = typename raft::linalg::Policy4x4<DataT, vec_len>::Policy;
// Architecture
namespace arch = raft::util::arch;
constexpr auto sm_compat_range = arch::SM_range(arch::SM_min(), arch::SM_future());
void launch_kernel(pairwise_matrix_params params, dim3 grid, cudaStream_t stream)
{
dim3 block(Policy::Nthreads);
int smem_size = OpT::shared_mem_size<Policy>();
// Obtain function pointer to kernel
auto kernel = raft::distance::detail::pairwise_matrix_kernel<Policy,
row_major,
decltype(sm_compat_range),
OpT,
IdxT,
DataT,
OutT,
FinOpT>;
kernel<<<grid, block, smem_size, stream>>>(distance_op, params);
RAFT_CUDA_TRY(cudaGetLastError());
}
void get_block_size(int& m, int& n, int& k)
{
m = Policy::Mblk;
n = Policy::Nblk;
k = Policy::Kblk;
}
void* get_kernel_ptr()
{
auto kernel = raft::distance::detail::pairwise_matrix_kernel<Policy,
row_major,
decltype(sm_compat_range),
OpT,
IdxT,
DataT,
OutT,
FinOpT>;
return reinterpret_cast<void*>(kernel);
}
int get_max_occupancy()
{
void* kernel_ptr = get_kernel_ptr();
int max_occupancy;
int smem_size = OpT::shared_mem_size<Policy>();
RAFT_CUDA_TRY(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_occupancy, kernel_ptr, Policy::Nthreads, smem_size));
return max_occupancy;
}
} // namespace raft::bench::distance::tune
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/distance | rapidsai_public_repos/raft/cpp/bench/prims/distance/tune_pairwise/bench.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Tuning benchmarks.
//
// Goals:
//
// 1. Fast compile times to maintain iteration speed.
// 2. Create benchmarks that can inform the design of the kernels.
//
// Non-goals:
//
// 1. Measure every distance operation. Instead measures just one distance
// operation at the same time.
// 2. Be useful for finding performance regressions. This is handled by the
// normal benchmarks.
//
// So far, both goals are partly achieved.
//
// RE (1), COMPILE TIMES: kernel.cu is fast to compile. This file is not.
// When the internals of a pairwise distance kernel is changed, this file is not
// recompiled.
//
// RE 2, benchmarks with intent: this file contains a benchmark to check the
// maximal throughput of a kernel. Measuring other things, like performance on
// skinny or wide matrices is not yet implemented.
#include "kernel.cuh" // launch_kernel
#include <algorithm> // std::min
#include <common/benchmark.hpp> // RAFT_BENCH_REGISTER
#include <raft/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
#include <rmm/device_uvector.hpp> // rmm::device_uvector
#include <vector> // std::vector
namespace raft::bench::distance::tune {
// Max throughput benchmark.
//
// Goal: Measure the maximum distances/sec that can be computed.
//
// To achieve this, we make sure that:
//
// - Input data size is a multiple of the block tile size.
//
// - Perfect distribution of work between SMs, i.e. the number of block tiles is
// a large multiple (num_waves) of the number of blocks (#SMs * occupancy).
//
// - Multiple iterations over Kblk are executed (num_k_iters).
struct throughput_param {
int num_waves;
int occupancy;
int num_k_iters;
};
const std::vector<throughput_param> throughput_params{
// 32 waves, requested occupancy of 4, and 32 k iterations typically achieves
// maximum throughput. No need to pick higher values.
{32, 4, 32},
};
struct throughput_bench : public fixture {
const throughput_param p;
throughput_bench(const throughput_param& p_) : p(p_) {}
void run_benchmark(::benchmark::State& state) override
{
// Get block size:
int block_m, block_n, block_k;
get_block_size(block_m, block_n, block_k);
// Determine number of blocks that will be launched. This informs the size
// of the inputs as well as the grid size.
const int num_sms = raft::getMultiProcessorCount();
const int max_occupancy = get_max_occupancy();
const int occupancy = std::min(p.occupancy, max_occupancy);
const int num_blocks = occupancy * num_sms;
dim3 grid(num_blocks);
// Create input sizes that are a multiple of the block tile size.
size_t m = block_m;
size_t n = block_n * p.num_waves * num_blocks;
size_t k = block_k * p.num_k_iters;
// DataT, OutT, IdxT, etc, are defined in tuned_kernel.cuh
rmm::device_uvector<DataT> x_vec(m * k, stream);
rmm::device_uvector<DataT> y_vec(n * k, stream);
rmm::device_uvector<DataT> x_norm_vec(m, stream);
rmm::device_uvector<DataT> y_norm_vec(n, stream);
rmm::device_uvector<OutT> out_vec(m * n, stream);
auto x = x_vec.data();
auto y = y_vec.data();
auto x_norm = x_norm_vec.data();
auto y_norm = y_norm_vec.data();
auto out = out_vec.data();
FinOpT fin_op{};
// Create kernel parameter struct. Flip x and y if column major.
IdxT ldx = row_major ? k : m;
IdxT ldy = row_major ? k : n;
IdxT ld_out = row_major ? n : m;
// Template parameters of pairwise_matrix_params are defined in kernel.cuh
pairwise_matrix_params kparams{
IdxT(m), IdxT(n), IdxT(k), ldx, ldy, ld_out, x, y, x_norm, y_norm, out, fin_op, row_major};
// Run benchmark
loop_on_state(state, [&]() { launch_kernel(kparams, grid, stream); });
// Report metrics. We don't report flop/s because we do not know for each
// distance operation how many flops it costs. For L2_unexp and l1, we can
// double this number to get the flop/s. For l2 expanded, core_ops/s should
// equal flop/s (modulo the sqrt and subtracting from the norm).
size_t num_core_ops = m * n * k;
size_t read_elts = n * k + m * k;
size_t write_elts = m * n;
state.counters["m"] = benchmark::Counter(m);
state.counters["n"] = benchmark::Counter(n);
state.counters["k"] = benchmark::Counter(k);
state.counters["occupancy"] = benchmark::Counter(occupancy);
state.counters["# waves"] = benchmark::Counter(p.num_waves);
state.counters["# k iters"] = benchmark::Counter(p.num_k_iters);
state.counters["core_ops/s"] = benchmark::Counter(num_core_ops,
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
state.counters["BW"] = benchmark::Counter(write_elts * sizeof(OutT) + read_elts * sizeof(DataT),
benchmark::Counter::kIsIterationInvariantRate,
benchmark::Counter::OneK::kIs1000);
}
};
RAFT_BENCH_REGISTER(throughput_bench, "", throughput_params);
} // namespace raft::bench::distance::tune
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/distance | rapidsai_public_repos/raft/cpp/bench/prims/distance/tune_pairwise/kernel.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft/distance/detail/distance_ops/all_ops.cuh> // lp_unexp_distance_op
#include <raft/distance/detail/pairwise_matrix/params.cuh> // pairwise_matrix_params
namespace raft::bench::distance::tune {
// Launch one specific kernel with the following template parameters
constexpr bool row_major = true;
using DataT = float;
using AccT = float;
using OutT = DataT;
using IdxT = int;
using FinOpT = raft::identity_op;
using pairwise_matrix_params =
raft::distance::detail::pairwise_matrix_params<IdxT, DataT, OutT, FinOpT>;
// Launches kernel
void launch_kernel(pairwise_matrix_params, dim3, cudaStream_t);
// Describes the block size that is decided by the policy
void get_block_size(int& m, int& n, int& k);
int get_max_occupancy();
} // namespace raft::bench::distance::tune
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <common/benchmark.hpp>
#include <raft/core/resource/device_id.hpp>
#include <raft/random/rng.cuh>
#include <raft/core/bitset.cuh>
#include <raft/neighbors/ivf_flat.cuh>
#include <raft/neighbors/ivf_pq.cuh>
#include <raft/neighbors/sample_filter.cuh>
#include <raft/spatial/knn/knn.cuh>
#include <raft/util/itertools.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/host/new_delete_resource.hpp>
#include <rmm/mr/host/pinned_memory_resource.hpp>
#include <thrust/sequence.h>
#include <optional>
namespace raft::bench::spatial {
struct params {
/** Size of the dataset. */
size_t n_samples;
/** Number of dimensions in the dataset. */
size_t n_dims;
/** The batch size -- number of KNN searches. */
size_t n_queries;
/** Number of nearest neighbours to find for every probe. */
size_t k;
/** Ratio of removed indices. */
double removed_ratio;
};
inline auto operator<<(std::ostream& os, const params& p) -> std::ostream&
{
os << p.n_samples << "#" << p.n_dims << "#" << p.n_queries << "#" << p.k << "#"
<< p.removed_ratio;
return os;
}
enum class TransferStrategy { NO_COPY, COPY_PLAIN, COPY_PINNED, MAP_PINNED, MANAGED }; // NOLINT
enum class Scope { BUILD, SEARCH, BUILD_SEARCH }; // NOLINT
inline auto operator<<(std::ostream& os, const TransferStrategy& ts) -> std::ostream&
{
switch (ts) {
case TransferStrategy::NO_COPY: os << "NO_COPY"; break;
case TransferStrategy::COPY_PLAIN: os << "COPY_PLAIN"; break;
case TransferStrategy::COPY_PINNED: os << "COPY_PINNED"; break;
case TransferStrategy::MAP_PINNED: os << "MAP_PINNED"; break;
case TransferStrategy::MANAGED: os << "MANAGED"; break;
default: os << "UNKNOWN";
}
return os;
}
inline auto operator<<(std::ostream& os, const Scope& s) -> std::ostream&
{
switch (s) {
case Scope::BUILD: os << "BUILD"; break;
case Scope::SEARCH: os << "SEARCH"; break;
case Scope::BUILD_SEARCH: os << "BUILD_SEARCH"; break;
default: os << "UNKNOWN";
}
return os;
}
struct device_resource {
public:
explicit device_resource(bool managed) : managed_(managed)
{
if (managed_) {
res_ = new rmm::mr::managed_memory_resource();
} else {
res_ = rmm::mr::get_current_device_resource();
}
}
~device_resource()
{
if (managed_) { delete res_; }
}
[[nodiscard]] auto get() const -> rmm::mr::device_memory_resource* { return res_; }
private:
const bool managed_;
rmm::mr::device_memory_resource* res_;
};
template <typename T>
struct host_uvector {
host_uvector(size_t n, bool pinned) : n_(n)
{
if (pinned) {
res_ = new rmm::mr::pinned_memory_resource();
} else {
res_ = new rmm::mr::new_delete_resource();
}
arr_ = static_cast<T*>(res_->allocate(n_ * sizeof(T)));
}
~host_uvector() noexcept
{
res_->deallocate(arr_, n_ * sizeof(T));
delete res_;
}
auto data() -> T* { return arr_; }
[[nodiscard]] auto size() const -> size_t { return n_; }
private:
rmm::mr::host_memory_resource* res_;
size_t n_;
T* arr_;
};
template <typename ValT, typename IdxT>
struct ivf_flat_knn {
using dist_t = float;
std::optional<const raft::neighbors::ivf_flat::index<ValT, IdxT>> index;
raft::neighbors::ivf_flat::index_params index_params;
raft::neighbors::ivf_flat::search_params search_params;
params ps;
ivf_flat_knn(const raft::device_resources& handle, const params& ps, const ValT* data) : ps(ps)
{
index_params.n_lists = 4096;
index_params.metric = raft::distance::DistanceType::L2Expanded;
index.emplace(raft::neighbors::ivf_flat::build(
handle, index_params, data, IdxT(ps.n_samples), uint32_t(ps.n_dims)));
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
search_params.n_probes = 20;
raft::neighbors::ivf_flat::search(
handle, search_params, *index, search_items, ps.n_queries, ps.k, out_idxs, out_dists);
}
};
template <typename ValT, typename IdxT>
struct ivf_pq_knn {
using dist_t = float;
std::optional<const raft::neighbors::ivf_pq::index<IdxT>> index;
raft::neighbors::ivf_pq::index_params index_params;
raft::neighbors::ivf_pq::search_params search_params;
params ps;
ivf_pq_knn(const raft::device_resources& handle, const params& ps, const ValT* data) : ps(ps)
{
index_params.n_lists = 4096;
index_params.metric = raft::distance::DistanceType::L2Expanded;
auto data_view = raft::make_device_matrix_view<const ValT, IdxT>(data, ps.n_samples, ps.n_dims);
index.emplace(raft::neighbors::ivf_pq::build(handle, index_params, data_view));
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
search_params.n_probes = 20;
auto queries_view =
raft::make_device_matrix_view<const ValT, uint32_t>(search_items, ps.n_queries, ps.n_dims);
auto idxs_view = raft::make_device_matrix_view<IdxT, uint32_t>(out_idxs, ps.n_queries, ps.k);
auto dists_view =
raft::make_device_matrix_view<dist_t, uint32_t>(out_dists, ps.n_queries, ps.k);
raft::neighbors::ivf_pq::search(
handle, search_params, *index, queries_view, idxs_view, dists_view);
}
};
template <typename ValT, typename IdxT>
struct brute_force_knn {
using dist_t = ValT;
ValT* index;
params ps;
brute_force_knn(const raft::device_resources& handle, const params& ps, const ValT* data)
: index(const_cast<ValT*>(data)), ps(ps)
{
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
std::vector<ValT*> input{index};
std::vector<size_t> sizes{ps.n_samples};
raft::spatial::knn::brute_force_knn<IdxT, ValT, size_t>(handle,
input,
sizes,
ps.n_dims,
const_cast<ValT*>(search_items),
ps.n_queries,
out_idxs,
out_dists,
ps.k);
}
};
template <typename ValT, typename IdxT>
struct ivf_flat_filter_knn {
using dist_t = float;
std::optional<const raft::neighbors::ivf_flat::index<ValT, IdxT>> index;
raft::neighbors::ivf_flat::index_params index_params;
raft::neighbors::ivf_flat::search_params search_params;
raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset_;
params ps;
ivf_flat_filter_knn(const raft::device_resources& handle, const params& ps, const ValT* data)
: ps(ps), removed_indices_bitset_(handle, ps.n_samples)
{
index_params.n_lists = 4096;
index_params.metric = raft::distance::DistanceType::L2Expanded;
index.emplace(raft::neighbors::ivf_flat::build(
handle, index_params, data, IdxT(ps.n_samples), uint32_t(ps.n_dims)));
auto removed_indices =
raft::make_device_vector<IdxT, int64_t>(handle, ps.removed_ratio * ps.n_samples);
thrust::sequence(
resource::get_thrust_policy(handle),
thrust::device_pointer_cast(removed_indices.data_handle()),
thrust::device_pointer_cast(removed_indices.data_handle() + removed_indices.extent(0)));
removed_indices_bitset_.set(handle, removed_indices.view());
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
search_params.n_probes = 20;
auto queries_view =
raft::make_device_matrix_view<const ValT, IdxT>(search_items, ps.n_queries, ps.n_dims);
auto neighbors_view = raft::make_device_matrix_view<IdxT, IdxT>(out_idxs, ps.n_queries, ps.k);
auto distance_view = raft::make_device_matrix_view<dist_t, IdxT>(out_dists, ps.n_queries, ps.k);
auto filter = raft::neighbors::filtering::bitset_filter(removed_indices_bitset_.view());
if (ps.removed_ratio > 0) {
raft::neighbors::ivf_flat::search_with_filtering(
handle, search_params, *index, queries_view, neighbors_view, distance_view, filter);
} else {
raft::neighbors::ivf_flat::search(
handle, search_params, *index, queries_view, neighbors_view, distance_view);
}
}
};
template <typename ValT, typename IdxT>
struct ivf_pq_filter_knn {
using dist_t = float;
std::optional<const raft::neighbors::ivf_pq::index<IdxT>> index;
raft::neighbors::ivf_pq::index_params index_params;
raft::neighbors::ivf_pq::search_params search_params;
raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset_;
params ps;
ivf_pq_filter_knn(const raft::device_resources& handle, const params& ps, const ValT* data)
: ps(ps), removed_indices_bitset_(handle, ps.n_samples)
{
index_params.n_lists = 4096;
index_params.metric = raft::distance::DistanceType::L2Expanded;
auto data_view = raft::make_device_matrix_view<const ValT, IdxT>(data, ps.n_samples, ps.n_dims);
index.emplace(raft::neighbors::ivf_pq::build(handle, index_params, data_view));
auto removed_indices =
raft::make_device_vector<IdxT, int64_t>(handle, ps.removed_ratio * ps.n_samples);
thrust::sequence(
resource::get_thrust_policy(handle),
thrust::device_pointer_cast(removed_indices.data_handle()),
thrust::device_pointer_cast(removed_indices.data_handle() + removed_indices.extent(0)));
removed_indices_bitset_.set(handle, removed_indices.view());
}
void search(const raft::device_resources& handle,
const ValT* search_items,
dist_t* out_dists,
IdxT* out_idxs)
{
search_params.n_probes = 20;
auto queries_view =
raft::make_device_matrix_view<const ValT, uint32_t>(search_items, ps.n_queries, ps.n_dims);
auto neighbors_view =
raft::make_device_matrix_view<IdxT, uint32_t>(out_idxs, ps.n_queries, ps.k);
auto distance_view =
raft::make_device_matrix_view<dist_t, uint32_t>(out_dists, ps.n_queries, ps.k);
auto filter = raft::neighbors::filtering::bitset_filter(removed_indices_bitset_.view());
if (ps.removed_ratio > 0) {
raft::neighbors::ivf_pq::search_with_filtering(
handle, search_params, *index, queries_view, neighbors_view, distance_view, filter);
} else {
raft::neighbors::ivf_pq::search(
handle, search_params, *index, queries_view, neighbors_view, distance_view);
}
}
};
template <typename ValT, typename IdxT, typename ImplT>
struct knn : public fixture {
explicit knn(const params& p, const TransferStrategy& strategy, const Scope& scope)
: fixture(true),
params_(p),
strategy_(strategy),
scope_(scope),
dev_mem_res_(strategy == TransferStrategy::MANAGED),
data_host_(0),
search_items_(p.n_queries * p.n_dims, stream),
out_dists_(p.n_queries * p.k, stream),
out_idxs_(p.n_queries * p.k, stream)
{
raft::random::RngState state{42};
gen_data(state, search_items_, search_items_.size(), stream);
try {
size_t total_size = p.n_samples * p.n_dims;
data_host_.resize(total_size);
constexpr size_t kGenMinibatchSize = 1024 * 1024 * 1024;
rmm::device_uvector<ValT> d(std::min(kGenMinibatchSize, total_size), stream);
for (size_t offset = 0; offset < total_size; offset += kGenMinibatchSize) {
size_t actual_size = std::min(total_size - offset, kGenMinibatchSize);
gen_data(state, d, actual_size, stream);
copy(data_host_.data() + offset, d.data(), actual_size, stream);
}
} catch (std::bad_alloc& e) {
data_does_not_fit_ = true;
}
}
template <typename T>
void gen_data(raft::random::RngState& state, // NOLINT
rmm::device_uvector<T>& vec,
size_t n,
rmm::cuda_stream_view stream)
{
constexpr T kRangeMax = std::is_integral_v<T> ? std::numeric_limits<T>::max() : T(1);
constexpr T kRangeMin = std::is_integral_v<T> ? std::numeric_limits<T>::min() : T(-1);
if constexpr (std::is_integral_v<T>) {
raft::random::uniformInt(handle, state, vec.data(), n, kRangeMin, kRangeMax);
} else {
raft::random::uniform(handle, state, vec.data(), n, kRangeMin, kRangeMax);
}
}
void run_benchmark(::benchmark::State& state) override
{
if (data_does_not_fit_) {
state.SkipWithError("The data size is too big to fit into the host memory.");
}
if (scope_ == Scope::SEARCH && strategy_ != TransferStrategy::NO_COPY) {
state.SkipWithError(
"When benchmarking without index building (Scope::SEARCH), the data must be already on the "
"device (TransferStrategy::NO_COPY)");
}
try {
std::ostringstream label_stream;
label_stream << params_ << "#" << strategy_ << "#" << scope_;
state.SetLabel(label_stream.str());
raft::device_resources handle(stream);
std::optional<ImplT> index;
if (scope_ == Scope::SEARCH) { // also implies TransferStrategy::NO_COPY
rmm::device_uvector<ValT> data(data_host_.size(), stream);
copy(data.data(), data_host_.data(), data_host_.size(), stream);
index.emplace(handle, params_, data.data());
stream.synchronize();
}
// benchmark loop
for (auto _ : state) {
// managed or plain device memory initialized anew every time
rmm::device_uvector<ValT> data(data_host_.size(), stream, dev_mem_res_.get());
ValT* data_ptr = data.data();
size_t allocation_size = data_host_.size() * sizeof(ValT);
// Non-benchmarked part: using different methods to copy the data if necessary
switch (strategy_) {
case TransferStrategy::NO_COPY: // copy data to GPU before starting the timer.
copy(data_ptr, data_host_.data(), data_host_.size(), stream);
break;
case TransferStrategy::COPY_PINNED:
RAFT_CUDA_TRY(
cudaHostRegister(data_host_.data(), allocation_size, cudaHostRegisterDefault));
break;
case TransferStrategy::MAP_PINNED:
RAFT_CUDA_TRY(
cudaHostRegister(data_host_.data(), allocation_size, cudaHostRegisterMapped));
RAFT_CUDA_TRY(cudaHostGetDevicePointer(&data_ptr, data_host_.data(), 0));
break;
case TransferStrategy::MANAGED: // sic! using std::memcpy rather than cuda copy
RAFT_CUDA_TRY(cudaMemAdvise(data_ptr,
allocation_size,
cudaMemAdviseSetPreferredLocation,
resource::get_device_id(handle)));
RAFT_CUDA_TRY(cudaMemAdvise(data_ptr,
allocation_size,
cudaMemAdviseSetAccessedBy,
resource::get_device_id(handle)));
RAFT_CUDA_TRY(cudaMemAdvise(data_ptr,
allocation_size,
cudaMemAdviseSetReadMostly,
resource::get_device_id(handle)));
std::memcpy(data_ptr, data_host_.data(), allocation_size);
break;
default: break;
}
flush_L2_cache();
{
// Timer synchronizes the stream, so all prior gpu work should be done before it sets off.
cuda_event_timer timer(state, stream);
switch (strategy_) {
case TransferStrategy::COPY_PLAIN:
case TransferStrategy::COPY_PINNED:
copy(data_ptr, data_host_.data(), data_host_.size(), stream);
default: break;
}
if (scope_ != Scope::SEARCH) { index.emplace(handle, params_, data_ptr); }
if (scope_ != Scope::BUILD) {
index->search(handle, search_items_.data(), out_dists_.data(), out_idxs_.data());
}
}
if (scope_ != Scope::SEARCH) { index.reset(); }
switch (strategy_) {
case TransferStrategy::COPY_PINNED:
case TransferStrategy::MAP_PINNED:
RAFT_CUDA_TRY(cudaHostUnregister(data_host_.data()));
break;
default: break;
}
}
} catch (raft::exception& e) {
state.SkipWithError(e.what());
} catch (std::bad_alloc& e) {
state.SkipWithError(e.what());
}
}
private:
const params params_;
const TransferStrategy strategy_;
const Scope scope_;
device_resource dev_mem_res_;
bool data_does_not_fit_ = false;
std::vector<ValT> data_host_;
rmm::device_uvector<ValT> search_items_;
rmm::device_uvector<typename ImplT::dist_t> out_dists_;
rmm::device_uvector<IdxT> out_idxs_;
};
inline const std::vector<params> kInputs{
{2000000, 128, 1000, 32, 0}, {10000000, 128, 1000, 32, 0}, {10000, 8192, 1000, 32, 0}};
const std::vector<params> kInputsFilter =
raft::util::itertools::product<params>({size_t(10000000)}, // n_samples
{size_t(128)}, // n_dim
{size_t(1000)}, // n_queries
{size_t(255)}, // k
{0.0, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64} // removed_ratio
);
inline const std::vector<TransferStrategy> kAllStrategies{
TransferStrategy::NO_COPY, TransferStrategy::MAP_PINNED, TransferStrategy::MANAGED};
inline const std::vector<TransferStrategy> kNoCopyOnly{TransferStrategy::NO_COPY};
inline const std::vector<Scope> kScopeFull{Scope::BUILD_SEARCH};
inline const std::vector<Scope> kAllScopes{Scope::BUILD_SEARCH, Scope::SEARCH, Scope::BUILD};
#define KNN_REGISTER(ValT, IdxT, ImplT, inputs, strats, scope) \
namespace BENCHMARK_PRIVATE_NAME(knn) { \
using KNN = knn<ValT, IdxT, ImplT<ValT, IdxT>>; \
RAFT_BENCH_REGISTER(KNN, #ValT "/" #IdxT "/" #ImplT, inputs, strats, scope); \
}
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/refine.cuh | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <raft_internal/neighbors/refine_helper.cuh>
#include <common/benchmark.hpp>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/device_resources.hpp>
#include <raft/distance/distance_types.hpp>
#include <raft/neighbors/detail/refine.cuh>
#include <raft/neighbors/refine.cuh>
#include <raft/random/rng.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <iostream>
#include <sstream>
using namespace raft::neighbors;
namespace raft::bench::neighbors {
template <typename IdxT>
inline auto operator<<(std::ostream& os, const RefineInputs<IdxT>& p) -> std::ostream&
{
os << p.n_rows << "#" << p.dim << "#" << p.n_queries << "#" << p.k0 << "#" << p.k << "#"
<< (p.host_data ? "host" : "device");
return os;
}
template <typename DataT, typename DistanceT, typename IdxT>
class RefineAnn : public fixture {
public:
RefineAnn(RefineInputs<IdxT> p) : data(handle_, p) {}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << data.p;
state.SetLabel(label_stream.str());
auto old_mr = rmm::mr::get_current_device_resource();
rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource> pool_mr(old_mr);
rmm::mr::set_current_device_resource(&pool_mr);
if (data.p.host_data) {
loop_on_state(state, [this]() {
raft::neighbors::refine<IdxT, DataT, DistanceT, IdxT>(handle_,
data.dataset_host.view(),
data.queries_host.view(),
data.candidates_host.view(),
data.refined_indices_host.view(),
data.refined_distances_host.view(),
data.p.metric);
});
} else {
loop_on_state(state, [&]() {
raft::neighbors::refine<IdxT, DataT, DistanceT, IdxT>(handle_,
data.dataset.view(),
data.queries.view(),
data.candidates.view(),
data.refined_indices.view(),
data.refined_distances.view(),
data.p.metric);
});
}
rmm::mr::set_current_device_resource(old_mr);
}
private:
raft::device_resources handle_;
RefineHelper<DataT, DistanceT, IdxT> data;
};
template <typename T>
std::vector<RefineInputs<T>> getInputs()
{
std::vector<RefineInputs<T>> out;
raft::distance::DistanceType metric = raft::distance::DistanceType::L2Expanded;
for (bool host_data : {true, false}) {
for (T n_queries : {1000, 10000}) {
for (T dim : {128, 512}) {
out.push_back(RefineInputs<T>{n_queries, 2000000, dim, 32, 128, metric, host_data});
out.push_back(RefineInputs<T>{n_queries, 2000000, dim, 10, 40, metric, host_data});
}
}
}
return out;
}
} // namespace raft::bench::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/cagra_bench.cuh | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <common/benchmark.hpp>
#include <raft/neighbors/cagra.cuh>
#include <raft/neighbors/sample_filter.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/itertools.hpp>
#include <thrust/sequence.h>
#include <optional>
namespace raft::bench::neighbors {
struct params {
/** Size of the dataset. */
size_t n_samples;
/** Number of dimensions in the dataset. */
int n_dims;
/** The batch size -- number of KNN searches. */
int n_queries;
/** Number of nearest neighbours to find for every probe. */
int k;
/** kNN graph degree*/
int degree;
int itopk_size;
int block_size;
int search_width;
int max_iterations;
/** Ratio of removed indices. */
double removed_ratio;
};
template <typename T, typename IdxT>
struct CagraBench : public fixture {
explicit CagraBench(const params& ps)
: fixture(true),
params_(ps),
queries_(make_device_matrix<T, int64_t>(handle, ps.n_queries, ps.n_dims)),
dataset_(make_device_matrix<T, int64_t>(handle, ps.n_samples, ps.n_dims)),
knn_graph_(make_device_matrix<IdxT, int64_t>(handle, ps.n_samples, ps.degree)),
removed_indices_bitset_(handle, ps.n_samples)
{
// Generate random dataset and queriees
raft::random::RngState state{42};
constexpr T kRangeMax = std::is_integral_v<T> ? std::numeric_limits<T>::max() : T(1);
constexpr T kRangeMin = std::is_integral_v<T> ? std::numeric_limits<T>::min() : T(-1);
if constexpr (std::is_integral_v<T>) {
raft::random::uniformInt(
handle, state, dataset_.data_handle(), dataset_.size(), kRangeMin, kRangeMax);
raft::random::uniformInt(
handle, state, queries_.data_handle(), queries_.size(), kRangeMin, kRangeMax);
} else {
raft::random::uniform(
handle, state, dataset_.data_handle(), dataset_.size(), kRangeMin, kRangeMax);
raft::random::uniform(
handle, state, queries_.data_handle(), queries_.size(), kRangeMin, kRangeMax);
}
// Generate random knn graph
raft::random::uniformInt<IdxT>(
handle, state, knn_graph_.data_handle(), knn_graph_.size(), 0, ps.n_samples - 1);
auto metric = raft::distance::DistanceType::L2Expanded;
auto removed_indices =
raft::make_device_vector<IdxT, int64_t>(handle, ps.removed_ratio * ps.n_samples);
thrust::sequence(
resource::get_thrust_policy(handle),
thrust::device_pointer_cast(removed_indices.data_handle()),
thrust::device_pointer_cast(removed_indices.data_handle() + removed_indices.extent(0)));
removed_indices_bitset_.set(handle, removed_indices.view());
index_.emplace(raft::neighbors::cagra::index<T, IdxT>(
handle, metric, make_const_mdspan(dataset_.view()), make_const_mdspan(knn_graph_.view())));
}
void run_benchmark(::benchmark::State& state) override
{
raft::neighbors::cagra::search_params search_params;
search_params.max_queries = 1024;
search_params.itopk_size = params_.itopk_size;
search_params.team_size = 0;
search_params.thread_block_size = params_.block_size;
search_params.search_width = params_.search_width;
auto indices = make_device_matrix<IdxT, int64_t>(handle, params_.n_queries, params_.k);
auto distances = make_device_matrix<float, int64_t>(handle, params_.n_queries, params_.k);
auto ind_v = make_device_matrix_view<IdxT, int64_t, row_major>(
indices.data_handle(), params_.n_queries, params_.k);
auto dist_v = make_device_matrix_view<float, int64_t, row_major>(
distances.data_handle(), params_.n_queries, params_.k);
auto queries_v = make_const_mdspan(queries_.view());
if (params_.removed_ratio > 0) {
auto filter = raft::neighbors::filtering::bitset_filter(removed_indices_bitset_.view());
loop_on_state(state, [&]() {
raft::neighbors::cagra::search_with_filtering(
this->handle, search_params, *this->index_, queries_v, ind_v, dist_v, filter);
});
} else {
loop_on_state(state, [&]() {
raft::neighbors::cagra::search(
this->handle, search_params, *this->index_, queries_v, ind_v, dist_v);
});
}
double data_size = params_.n_samples * params_.n_dims * sizeof(T);
double graph_size = params_.n_samples * params_.degree * sizeof(IdxT);
int iterations = params_.max_iterations;
if (iterations == 0) {
// see search_plan_impl::adjust_search_params()
double r = params_.itopk_size / static_cast<float>(params_.search_width);
iterations = 1 + std::min(r * 1.1, r + 10);
}
state.counters["dataset (GiB)"] = data_size / (1 << 30);
state.counters["graph (GiB)"] = graph_size / (1 << 30);
state.counters["n_rows"] = params_.n_samples;
state.counters["n_cols"] = params_.n_dims;
state.counters["degree"] = params_.degree;
state.counters["n_queries"] = params_.n_queries;
state.counters["k"] = params_.k;
state.counters["itopk_size"] = params_.itopk_size;
state.counters["block_size"] = params_.block_size;
state.counters["search_width"] = params_.search_width;
state.counters["iterations"] = iterations;
state.counters["removed_ratio"] = params_.removed_ratio;
}
private:
const params params_;
std::optional<const raft::neighbors::cagra::index<T, IdxT>> index_;
raft::device_matrix<T, int64_t, row_major> queries_;
raft::device_matrix<T, int64_t, row_major> dataset_;
raft::device_matrix<IdxT, int64_t, row_major> knn_graph_;
raft::core::bitset<std::uint32_t, IdxT> removed_indices_bitset_;
};
inline const std::vector<params> generate_inputs()
{
std::vector<params> inputs =
raft::util::itertools::product<params>({2000000ull}, // n_samples
{128, 256, 512, 1024}, // dataset dim
{1000}, // n_queries
{32}, // k
{64}, // knn graph degree
{64}, // itopk_size
{0}, // block_size
{1}, // search_width
{0}, // max_iterations
{0.0} // removed_ratio
);
auto inputs2 = raft::util::itertools::product<params>({2000000ull, 10000000ull}, // n_samples
{128}, // dataset dim
{1000}, // n_queries
{32}, // k
{64}, // knn graph degree
{64}, // itopk_size
{64, 128, 256, 512, 1024}, // block_size
{1}, // search_width
{0}, // max_iterations
{0.0} // removed_ratio
);
inputs.insert(inputs.end(), inputs2.begin(), inputs2.end());
inputs2 = raft::util::itertools::product<params>(
{2000000ull, 10000000ull}, // n_samples
{128}, // dataset dim
{1, 10, 10000}, // n_queries
{255}, // k
{64}, // knn graph degree
{300}, // itopk_size
{256}, // block_size
{2}, // search_width
{0}, // max_iterations
{0.0, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64} // removed_ratio
);
inputs.insert(inputs.end(), inputs2.begin(), inputs2.end());
return inputs;
}
const std::vector<params> kCagraInputs = generate_inputs();
#define CAGRA_REGISTER(ValT, IdxT, inputs) \
namespace BENCHMARK_PRIVATE_NAME(knn) { \
using AnnCagra = CagraBench<ValT, IdxT>; \
RAFT_BENCH_REGISTER(AnnCagra, #ValT "/" #IdxT, inputs); \
}
} // namespace raft::bench::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/refine_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "refine.cuh"
#include <common/benchmark.hpp>
using namespace raft::neighbors;
namespace raft::bench::neighbors {
using refine_uint8_int64 = RefineAnn<uint8_t, float, int64_t>;
RAFT_BENCH_REGISTER(refine_uint8_int64, "", getInputs<int64_t>());
} // namespace raft::bench::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/refine_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "refine.cuh"
#include <common/benchmark.hpp>
using namespace raft::neighbors;
namespace raft::bench::neighbors {
using refine_float_int64 = RefineAnn<float, float, int64_t>;
RAFT_BENCH_REGISTER(refine_float_int64, "", getInputs<int64_t>());
} // namespace raft::bench::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/brute_force_float_uint32_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(float, uint32_t, brute_force_knn, kInputs, kAllStrategies, kScopeFull);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/ivf_pq_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(float, int64_t, ivf_pq_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/ivf_pq_filter_float_int64_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(float, int64_t, ivf_pq_filter_knn, kInputsFilter, kNoCopyOnly, kScopeFull);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/ivf_pq_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(uint8_t, int64_t, ivf_pq_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/brute_force_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(float, int64_t, brute_force_knn, kInputs, kAllStrategies, kScopeFull);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/cagra_float_uint32_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../cagra_bench.cuh"
namespace raft::bench::neighbors {
CAGRA_REGISTER(float, uint32_t, kCagraInputs);
} // namespace raft::bench::neighbors
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/ivf_flat_float_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(float, int64_t, ivf_flat_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/ivf_flat_filter_float_int64_t.cu | /*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#undef RAFT_EXPLICIT_INSTANTIATE_ONLY // Enable instantiation of search with filter
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(float, int64_t, ivf_flat_filter_knn, kInputsFilter, kNoCopyOnly, kScopeFull);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/ivf_flat_uint8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(uint8_t, int64_t, ivf_flat_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/ivf_pq_int8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(int8_t, int64_t, ivf_pq_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims/neighbors | rapidsai_public_repos/raft/cpp/bench/prims/neighbors/knn/ivf_flat_int8_t_int64_t.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../knn.cuh"
namespace raft::bench::spatial {
KNN_REGISTER(int8_t, int64_t, ivf_flat_knn, kInputs, kNoCopyOnly, kAllScopes);
} // namespace raft::bench::spatial
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/linalg/add.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/linalg/add.cuh>
#include <rmm/device_uvector.hpp>
namespace raft::bench::linalg {
struct add_inputs {
int len;
}; // struct add_inputs
template <typename T>
struct add : public fixture {
add(const add_inputs& p) : params(p), ptr0(p.len, stream), ptr1(p.len, stream) {}
void run_benchmark(::benchmark::State& state) override
{
loop_on_state(state, [this]() {
raft::linalg::add(ptr0.data(), ptr0.data(), ptr1.data(), params.len, stream);
});
}
private:
add_inputs params;
rmm::device_uvector<T> ptr0, ptr1;
}; // struct add
const std::vector<add_inputs> add_input_vecs{
{256 * 1024 * 1024}, {256 * 1024 * 1024 + 2}, {256 * 1024 * 1024 + 1}
};
RAFT_BENCH_REGISTER(add<float>, "", add_input_vecs);
RAFT_BENCH_REGISTER(add<double>, "", add_input_vecs);
} // namespace raft::bench::linalg
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/linalg/reduce_cols_by_key.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/linalg/reduce_cols_by_key.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/itertools.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::linalg {
template <typename IdxT>
struct rcbk_params {
IdxT rows, cols;
IdxT keys;
};
template <typename IdxT>
inline auto operator<<(std::ostream& os, const rcbk_params<IdxT>& p) -> std::ostream&
{
os << p.rows << "#" << p.cols << "#" << p.keys;
return os;
}
template <typename T, typename KeyT, typename IdxT>
struct reduce_cols_by_key : public fixture {
reduce_cols_by_key(const rcbk_params<IdxT>& p)
: params(p), in(p.rows * p.cols, stream), out(p.rows * p.keys, stream), keys(p.cols, stream)
{
raft::random::RngState rng{42};
raft::random::uniformInt(handle, rng, keys.data(), p.cols, (KeyT)0, (KeyT)p.keys);
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
loop_on_state(state, [this]() {
raft::linalg::reduce_cols_by_key(
in.data(), keys.data(), out.data(), params.rows, params.cols, params.keys, stream, false);
});
}
protected:
rcbk_params<IdxT> params;
rmm::device_uvector<T> in, out;
rmm::device_uvector<KeyT> keys;
}; // struct reduce_cols_by_key
const std::vector<rcbk_params<int>> rcbk_inputs_i32 =
raft::util::itertools::product<rcbk_params<int>>(
{1, 10, 100, 1000}, {1000, 10000, 100000}, {8, 32, 128, 512, 2048});
const std::vector<rcbk_params<int64_t>> rcbk_inputs_i64 =
raft::util::itertools::product<rcbk_params<int64_t>>(
{1, 10, 100, 1000}, {1000, 10000, 100000}, {8, 32, 128, 512, 2048});
RAFT_BENCH_REGISTER((reduce_cols_by_key<float, uint32_t, int>), "", rcbk_inputs_i32);
RAFT_BENCH_REGISTER((reduce_cols_by_key<double, uint32_t, int>), "", rcbk_inputs_i32);
RAFT_BENCH_REGISTER((reduce_cols_by_key<float, uint32_t, int64_t>), "", rcbk_inputs_i64);
RAFT_BENCH_REGISTER((reduce_cols_by_key<double, uint32_t, int64_t>), "", rcbk_inputs_i64);
} // namespace raft::bench::linalg
| 0 |
rapidsai_public_repos/raft/cpp/bench/prims | rapidsai_public_repos/raft/cpp/bench/prims/linalg/normalize.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/benchmark.hpp>
#include <raft/linalg/normalize.cuh>
#include <raft/random/rng.cuh>
#include <raft/util/itertools.hpp>
#include <rmm/device_uvector.hpp>
namespace raft::bench::linalg {
template <typename IdxT>
struct normalize_input {
IdxT rows, cols;
};
template <typename IdxT>
inline auto operator<<(std::ostream& os, const normalize_input<IdxT>& p) -> std::ostream&
{
os << p.rows << "#" << p.cols;
return os;
}
template <typename T, typename IdxT>
struct rowNormalize : public fixture {
rowNormalize(const normalize_input<IdxT>& p)
: params(p), in(p.rows * p.cols, stream), out(p.rows * p.cols, stream)
{
raft::random::RngState rng{1234};
raft::random::uniform(handle, rng, in.data(), p.rows * p.cols, (T)-10.0, (T)10.0);
}
void run_benchmark(::benchmark::State& state) override
{
std::ostringstream label_stream;
label_stream << params;
state.SetLabel(label_stream.str());
loop_on_state(state, [this]() {
auto input_view = raft::make_device_matrix_view<const T, IdxT, raft::row_major>(
in.data(), params.rows, params.cols);
auto output_view = raft::make_device_matrix_view<T, IdxT, raft::row_major>(
out.data(), params.rows, params.cols);
raft::linalg::row_normalize(handle, input_view, output_view, raft::linalg::L2Norm);
});
}
private:
normalize_input<IdxT> params;
rmm::device_uvector<T> in, out;
}; // struct rowNormalize
const std::vector<normalize_input<int>> normalize_inputs_i32 =
raft::util::itertools::product<normalize_input<int>>(
{10, 100, 1000, 10000, 100000}, {8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384});
const std::vector<normalize_input<int64_t>> normalize_inputs_i64 =
raft::util::itertools::product<normalize_input<int64_t>>(
{10, 100, 1000, 10000, 100000}, {8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384});
RAFT_BENCH_REGISTER((rowNormalize<float, int>), "", normalize_inputs_i32);
RAFT_BENCH_REGISTER((rowNormalize<double, int>), "", normalize_inputs_i32);
RAFT_BENCH_REGISTER((rowNormalize<float, int64_t>), "", normalize_inputs_i64);
RAFT_BENCH_REGISTER((rowNormalize<double, int64_t>), "", normalize_inputs_i64);
} // namespace raft::bench::linalg
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.